sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def FromString(s, **kwargs): """Like FromFile, but takes a string.""" f = StringIO.StringIO(s) return FromFile(f, **kwargs)
Like FromFile, but takes a string.
entailment
def FromFile(f, more_formatters=lambda x: None, more_predicates=lambda x: None, _constructor=None): """Parse a template from a file, using a simple file format. This is useful when you want to include template options in a data file, rather than in the source code. The format is similar to HTTP or E-mail headers. The first lines of the file can specify template options, such as the metacharacters to use. One blank line must separate the options from the template body. Example: default-formatter: none meta: {{}} format-char: : <blank line required> Template goes here: {{variable:html}} Args: f: A file handle to read from. Caller is responsible for opening and closing it. """ _constructor = _constructor or Template options = {} # Parse lines until the first one that doesn't look like an option while 1: line = f.readline() match = _OPTION_RE.match(line) if match: name, value = match.group(1), match.group(2) # Accept something like 'Default-Formatter: raw'. This syntax is like # HTTP/E-mail headers. name = name.lower() # In Python 2.4, kwargs must be plain strings name = name.encode('utf-8') if name in _OPTION_NAMES: name = name.replace('-', '_') value = value.strip() if name == 'default_formatter' and value.lower() == 'none': value = None options[name] = value else: break else: break if options: if line.strip(): raise CompilationError( 'Must be one blank line between template options and body (got %r)' % line) body = f.read() else: # There were no options, so no blank line is necessary. body = line + f.read() return _constructor(body, more_formatters=more_formatters, more_predicates=more_predicates, **options)
Parse a template from a file, using a simple file format. This is useful when you want to include template options in a data file, rather than in the source code. The format is similar to HTTP or E-mail headers. The first lines of the file can specify template options, such as the metacharacters to use. One blank line must separate the options from the template body. Example: default-formatter: none meta: {{}} format-char: : <blank line required> Template goes here: {{variable:html}} Args: f: A file handle to read from. Caller is responsible for opening and closing it.
entailment
def _MakeGroupFromRootSection(root_section, undefined_str): """Construct a dictinary { template name -> Template() instance } Args: root_section: _Section instance -- root of the original parse tree """ group = {} for statement in root_section.Statements(): if isinstance(statement, six.string_types): continue func, args = statement # here the function acts as ID for the block type if func is _DoDef and isinstance(args, _Section): section = args # Construct a Template instance from a this _Section subtree t = Template._FromSection(section, group, undefined_str) group[section.section_name] = t return group
Construct a dictinary { template name -> Template() instance } Args: root_section: _Section instance -- root of the original parse tree
entailment
def JoinTokens(tokens): """Join tokens (which may be a mix of unicode and str values). See notes on unicode at the top. This function allows mixing encoded utf-8 byte string tokens with unicode tokens. (Python's default encoding is ASCII, and we don't want to change that.) We also want to support pure byte strings, so we can't get rid of the try/except. Two tries necessary. If someone really wanted to use another encoding, they could monkey patch jsontemplate.JoinTokens (this function). """ try: return ''.join(tokens) except UnicodeDecodeError: # This can still raise UnicodeDecodeError if that data isn't utf-8. return ''.join(t.decode('utf-8') for t in tokens)
Join tokens (which may be a mix of unicode and str values). See notes on unicode at the top. This function allows mixing encoded utf-8 byte string tokens with unicode tokens. (Python's default encoding is ASCII, and we don't want to change that.) We also want to support pure byte strings, so we can't get rid of the try/except. Two tries necessary. If someone really wanted to use another encoding, they could monkey patch jsontemplate.JoinTokens (this function).
entailment
def _DoRepeatedSection(args, context, callback, trace): """{.repeated section foo}""" block = args items = context.PushSection(block.section_name, block.pre_formatters) if items: if not isinstance(items, list): raise EvaluationError('Expected a list; got %s' % type(items)) last_index = len(items) - 1 statements = block.Statements() alt_statements = block.Statements('alternates with') try: i = 0 while True: context.Next() # Execute the statements in the block for every item in the list. # Execute the alternate block on every iteration except the last. Each # item could be an atom (string, integer, etc.) or a dictionary. _Execute(statements, context, callback, trace) if i != last_index: _Execute(alt_statements, context, callback, trace) i += 1 except StopIteration: pass else: _Execute(block.Statements('or'), context, callback, trace) context.Pop()
{.repeated section foo}
entailment
def _DoSection(args, context, callback, trace): """{.section foo}""" block = args # If a section present and "true", push the dictionary onto the stack as the # new context, and show it if context.PushSection(block.section_name, block.pre_formatters): _Execute(block.Statements(), context, callback, trace) context.Pop() else: # missing or "false" -- show the {.or} section context.Pop() _Execute(block.Statements('or'), context, callback, trace)
{.section foo}
entailment
def _DoPredicates(args, context, callback, trace): """{.predicate?} Here we execute the first clause that evaluates to true, and then stop. """ block = args value = context.Lookup('@') for (predicate, args, func_type), statements in block.clauses: if func_type == ENHANCED_FUNC: do_clause = predicate(value, context, args) else: do_clause = predicate(value) if do_clause: if trace: trace.Push(predicate) _Execute(statements, context, callback, trace) if trace: trace.Pop() break
{.predicate?} Here we execute the first clause that evaluates to true, and then stop.
entailment
def _DoSubstitute(args, context, callback, trace): """Variable substitution, i.e. {foo} We also implement template formatters here, i.e. {foo|template bar} as well as {.template FOO} for templates that operate on the root of the data dict rather than a subtree. """ name, formatters = args if name is None: value = context.Root() # don't use the cursor else: try: value = context.Lookup(name) except TypeError as e: raise EvaluationError( 'Error evaluating %r in context %r: %r' % (name, context, e)) last_index = len(formatters) - 1 for i, (f, args, formatter_type) in enumerate(formatters): try: if formatter_type == TEMPLATE_FORMATTER: template = f.Resolve(context) if i == last_index: # In order to keep less template output in memory, we can just let the # other template write to our callback directly, and then stop. template.execute(value, callback, trace=trace) return # EARLY RETURN else: # We have more formatters to apply, so explicitly construct 'value' tokens = [] template.execute(value, tokens.append, trace=trace) value = JoinTokens(tokens) elif formatter_type == ENHANCED_FUNC: value = f(value, context, args) elif formatter_type == SIMPLE_FUNC: value = f(value) else: assert False, 'Invalid formatter type %r' % formatter_type except (KeyboardInterrupt, EvaluationError): # Don't "wrap" recursive EvaluationErrors raise except Exception as e: if formatter_type == TEMPLATE_FORMATTER: raise # in this case we want to see the original exception raise EvaluationError( 'Formatting name %r, value %r with formatter %s raised exception: %r ' '-- see e.original_exc_info' % (name, value, f, e), original_exc_info=sys.exc_info()) # TODO: Require a string/unicode instance here? if value is None: raise EvaluationError('Evaluating %r gave None value' % name) callback(value)
Variable substitution, i.e. {foo} We also implement template formatters here, i.e. {foo|template bar} as well as {.template FOO} for templates that operate on the root of the data dict rather than a subtree.
entailment
def _Execute(statements, context, callback, trace): """Execute a bunch of template statements in a ScopedContext. Args: callback: Strings are "written" to this callback function. trace: Trace object, or None This is called in a mutually recursive fashion. """ # Every time we call _Execute, increase this depth if trace: trace.exec_depth += 1 for i, statement in enumerate(statements): if isinstance(statement, six.string_types): callback(statement) else: # In the case of a substitution, args is a pair (name, formatters). # In the case of a section, it's a _Section instance. try: func, args = statement func(args, context, callback, trace) except UndefinedVariable as e: # Show context for statements start = max(0, i - 3) end = i + 3 e.near = statements[start:end] e.trace = trace # Attach caller's trace (could be None) raise
Execute a bunch of template statements in a ScopedContext. Args: callback: Strings are "written" to this callback function. trace: Trace object, or None This is called in a mutually recursive fashion.
entailment
def expand(template_str, dictionary, **kwargs): """Free function to expands a template string with a data dictionary. This is useful for cases where you don't care about saving the result of compilation (similar to re.match('.*', s) vs DOT_STAR.match(s)) """ t = Template(template_str, **kwargs) return t.expand(dictionary)
Free function to expands a template string with a data dictionary. This is useful for cases where you don't care about saving the result of compilation (similar to re.match('.*', s) vs DOT_STAR.match(s))
entailment
def _FlattenToCallback(tokens, callback): """Takes a nested list structure and flattens it. ['a', ['b', 'c']] -> callback('a'); callback('b'); callback('c'); """ for t in tokens: if isinstance(t, six.string_types): callback(t) else: _FlattenToCallback(t, callback)
Takes a nested list structure and flattens it. ['a', ['b', 'c']] -> callback('a'); callback('b'); callback('c');
entailment
def execute_with_style_LEGACY(template, style, data, callback, body_subtree='body'): """OBSOLETE old API.""" try: body_data = data[body_subtree] except KeyError: raise EvaluationError('Data dictionary has no subtree %r' % body_subtree) tokens_body = [] template.execute(body_data, tokens_body.append) data[body_subtree] = tokens_body tokens = [] style.execute(data, tokens.append) _FlattenToCallback(tokens, callback)
OBSOLETE old API.
entailment
def expand_with_style(template, style, data, body_subtree='body'): """Expand a data dictionary with a template AND a style. DEPRECATED -- Remove this entire function in favor of expand(d, style=style) A style is a Template instance that factors out the common strings in several "body" templates. Args: template: Template instance for the inner "page content" style: Template instance for the outer "page style" data: Data dictionary, with a 'body' key (or body_subtree """ if template.has_defines: return template.expand(data, style=style) else: tokens = [] execute_with_style_LEGACY(template, style, data, tokens.append, body_subtree=body_subtree) return JoinTokens(tokens)
Expand a data dictionary with a template AND a style. DEPRECATED -- Remove this entire function in favor of expand(d, style=style) A style is a Template instance that factors out the common strings in several "body" templates. Args: template: Template instance for the inner "page content" style: Template instance for the outer "page style" data: Data dictionary, with a 'body' key (or body_subtree
entailment
def LookupWithType(self, user_str): """ Returns: ref: Either a template instance (itself) or _TemplateRef """ prefix = 'template ' ref = None # fail the lookup by default if user_str.startswith(prefix): name = user_str[len(prefix):] if name == 'SELF': # we can resolve this right away ref = _TemplateRef(template=self.owner) # special value else: ref = _TemplateRef(name) return ref, (), TEMPLATE_FORMATTER
Returns: ref: Either a template instance (itself) or _TemplateRef
entailment
def _GetFormatter(self, format_str): """ The user's formatters are consulted first, then the default formatters. """ formatter, args, func_type = self.formatters.LookupWithType(format_str) if formatter: return formatter, args, func_type else: raise BadFormatter('%r is not a valid formatter' % format_str)
The user's formatters are consulted first, then the default formatters.
entailment
def _GetPredicate(self, pred_str, test_attr=False): """ The user's predicates are consulted first, then the default predicates. """ predicate, args, func_type = self.predicates.LookupWithType(pred_str) if predicate: pred = predicate, args, func_type else: # Nicer syntax, {.debug?} is shorthand for {.if test debug}. # Currently there is not if/elif chain; just use # {.if test debug} {.or test release} {.or} {.end} if test_attr: assert pred_str.endswith('?') # func, args, func_type pred = (_TestAttribute, (pred_str[:-1],), ENHANCED_FUNC) else: raise BadPredicate('%r is not a valid predicate' % pred_str) return pred
The user's predicates are consulted first, then the default predicates.
entailment
def NewSection(self, token_type, section_name, pre_formatters): """For sections or repeated sections.""" pre_formatters = [self._GetFormatter(f) for f in pre_formatters] # TODO: Consider getting rid of this dispatching, and turn _Do* into methods if token_type == REPEATED_SECTION_TOKEN: new_block = _RepeatedSection(section_name, pre_formatters) func = _DoRepeatedSection elif token_type == SECTION_TOKEN: new_block = _Section(section_name, pre_formatters) func = _DoSection elif token_type == DEF_TOKEN: new_block = _Section(section_name, []) func = _DoDef else: raise AssertionError('Invalid token type %s' % token_type) self._NewSection(func, new_block)
For sections or repeated sections.
entailment
def NewOrClause(self, pred_str): """ {.or ...} Can appear inside predicate blocks or section blocks, with slightly different meaning. """ if pred_str: pred = self._GetPredicate(pred_str, test_attr=False) else: pred = None self.current_section.NewOrClause(pred)
{.or ...} Can appear inside predicate blocks or section blocks, with slightly different meaning.
entailment
def NewPredicateSection(self, pred_str, test_attr=False): """For chains of predicate clauses.""" pred = self._GetPredicate(pred_str, test_attr=test_attr) block = _PredicateSection() block.NewOrClause(pred) self._NewSection(_DoPredicates, block)
For chains of predicate clauses.
entailment
def PushSection(self, name, pre_formatters): """Given a section name, push it on the top of the stack. Returns: The new section, or None if there is no such section. """ if name == '@': value = self.stack[-1].context else: value = self.stack[-1].context.get(name) # Apply pre-formatters for i, (f, args, formatter_type) in enumerate(pre_formatters): if formatter_type == ENHANCED_FUNC: value = f(value, self, args) elif formatter_type == SIMPLE_FUNC: value = f(value) else: assert False, 'Invalid formatter type %r' % formatter_type self.stack.append(_Frame(value)) return value
Given a section name, push it on the top of the stack. Returns: The new section, or None if there is no such section.
entailment
def Next(self): """Advance to the next item in a repeated section. Raises: StopIteration if there are no more elements """ stacktop = self.stack[-1] # Now we're iterating -- push a new mutable object onto the stack if stacktop.index == -1: stacktop = _Frame(None, index=0) self.stack.append(stacktop) context_array = self.stack[-2].context if stacktop.index == len(context_array): self.stack.pop() raise StopIteration stacktop.context = context_array[stacktop.index] stacktop.index += 1 return True
Advance to the next item in a repeated section. Raises: StopIteration if there are no more elements
entailment
def _LookUpStack(self, name): """Look up the stack for the given name.""" i = len(self.stack) - 1 while 1: frame = self.stack[i] if name == '@index': if frame.index != -1: # -1 is undefined return frame.index # @index is 1-based else: context = frame.context if hasattr(context, 'get'): # Can't look up names in a list or atom try: return context[name] except KeyError: pass i -= 1 # Next frame if i <= -1: # Couldn't find it anywhere return self._Undefined(name)
Look up the stack for the given name.
entailment
def Lookup(self, name): """Get the value associated with a name in the current context. The current context could be an dictionary in a list, or a dictionary outside a list. Args: name: name to lookup, e.g. 'foo' or 'foo.bar.baz' Returns: The value, or self.undefined_str Raises: UndefinedVariable if self.undefined_str is not set """ if name == '@': return self.stack[-1].context parts = name.split('.') value = self._LookUpStack(parts[0]) # Now do simple lookups of the rest of the parts for part in parts[1:]: try: value = value[part] except (KeyError, TypeError): # TypeError for non-dictionaries return self._Undefined(part) return value
Get the value associated with a name in the current context. The current context could be an dictionary in a list, or a dictionary outside a list. Args: name: name to lookup, e.g. 'foo' or 'foo.bar.baz' Returns: The value, or self.undefined_str Raises: UndefinedVariable if self.undefined_str is not set
entailment
def execute(self, data_dict, callback, group=None, trace=None): """Low level method to expand the template piece by piece. Args: data_dict: The JSON data dictionary. callback: A callback which should be called with each expanded token. group: Dictionary of name -> Template instance (for styles) Example: You can pass 'f.write' as the callback to write directly to a file handle. """ # First try the passed in version, then the one set by _SetTemplateGroup. May # be None. Only one of these should be set. group = group or self.group context = _ScopedContext(data_dict, self.undefined_str, group=group) _Execute(self._program.Statements(), context, callback, trace)
Low level method to expand the template piece by piece. Args: data_dict: The JSON data dictionary. callback: A callback which should be called with each expanded token. group: Dictionary of name -> Template instance (for styles) Example: You can pass 'f.write' as the callback to write directly to a file handle.
entailment
def expand(self, *args, **kwargs): """Expands the template with the given data dictionary, returning a string. This is a small wrapper around execute(), and is the most convenient interface. Args: data_dict: The JSON data dictionary. Like the builtin dict() constructor, it can take a single dictionary as a positional argument, or arbitrary keyword arguments. trace: Trace object for debugging style: Template instance to be treated as a style for this template (the "outside") Returns: The return value could be a str() or unicode() instance, depending on the the type of the template string passed in, and what the types the strings in the dictionary are. """ if args: if len(args) == 1: data_dict = args[0] trace = kwargs.get('trace') style = kwargs.get('style') else: raise TypeError( 'expand() only takes 1 positional argument (got %s)' % args) else: data_dict = kwargs trace = None # Can't use trace= with the kwargs style style = None tokens = [] group = _MakeGroupFromRootSection(self._program, self.undefined_str) if style: style.execute(data_dict, tokens.append, group=group, trace=trace) else: # Needs a group to reference its OWN {.define}s self.execute(data_dict, tokens.append, group=group, trace=trace) return JoinTokens(tokens)
Expands the template with the given data dictionary, returning a string. This is a small wrapper around execute(), and is the most convenient interface. Args: data_dict: The JSON data dictionary. Like the builtin dict() constructor, it can take a single dictionary as a positional argument, or arbitrary keyword arguments. trace: Trace object for debugging style: Template instance to be treated as a style for this template (the "outside") Returns: The return value could be a str() or unicode() instance, depending on the the type of the template string passed in, and what the types the strings in the dictionary are.
entailment
def tokenstream(self, data_dict): """Yields a list of tokens resulting from expansion. This may be useful for WSGI apps. NOTE: In the current implementation, the entire expanded template must be stored memory. NOTE: This is a generator, but JavaScript doesn't have generators. """ tokens = [] self.execute(data_dict, tokens.append) for token in tokens: yield token
Yields a list of tokens resulting from expansion. This may be useful for WSGI apps. NOTE: In the current implementation, the entire expanded template must be stored memory. NOTE: This is a generator, but JavaScript doesn't have generators.
entailment
def align_unwrapped(sino): """Align an unwrapped phase array to zero-phase All operations are performed in-place. """ samples = [] if len(sino.shape) == 2: # 2D # take 1D samples at beginning and end of array samples.append(sino[:, 0]) samples.append(sino[:, 1]) samples.append(sino[:, 2]) samples.append(sino[:, -1]) samples.append(sino[:, -2]) elif len(sino.shape) == 3: # 3D # take 1D samples at beginning and end of array samples.append(sino[:, 0, 0]) samples.append(sino[:, 0, -1]) samples.append(sino[:, -1, 0]) samples.append(sino[:, -1, -1]) samples.append(sino[:, 0, 1]) # find discontinuities in the samples steps = np.zeros((len(samples), samples[0].shape[0])) for i in range(len(samples)): t = np.unwrap(samples[i]) steps[i] = samples[i] - t # if the majority believes so, add a step of PI remove = mode(steps, axis=0)[0][0] # obtain divmod min twopi = 2*np.pi minimum = divmod_neg(np.min(sino), twopi)[0] remove += minimum*twopi for i in range(len(sino)): sino[i] -= remove[i]
Align an unwrapped phase array to zero-phase All operations are performed in-place.
entailment
def divmod_neg(a, b): """Return divmod with closest result to zero""" q, r = divmod(a, b) # make sure r is close to zero sr = np.sign(r) if np.abs(r) > b/2: q += sr r -= b * sr return q, r
Return divmod with closest result to zero
entailment
def sinogram_as_radon(uSin, align=True): r"""Compute the phase from a complex wave field sinogram This step is essential when using the ray approximation before computation of the refractive index with the inverse Radon transform. Parameters ---------- uSin: 2d or 3d complex ndarray The background-corrected sinogram of the complex scattered wave :math:`u(\mathbf{r})/u_0(\mathbf{r})`. The first axis iterates through the angles :math:`\phi_0`. align: bool Tries to correct for a phase offset in the phase sinogram. Returns ------- phase: 2d or 3d real ndarray The unwrapped phase array corresponding to `uSin`. See Also -------- skimage.restoration.unwrap_phase: phase unwrapping radontea.backproject_3d: e.g. reconstruction via backprojection """ ndims = len(uSin.shape) if ndims == 2: # unwrapping is very important phiR = np.unwrap(np.angle(uSin), axis=-1) else: # Unwrap gets the dimension of the problem from the input # data. Since we have a sinogram, we need to pass it the # slices one by one. phiR = np.angle(uSin) for ii in range(len(phiR)): phiR[ii] = unwrap_phase(phiR[ii], seed=47) if align: align_unwrapped(phiR) return phiR
r"""Compute the phase from a complex wave field sinogram This step is essential when using the ray approximation before computation of the refractive index with the inverse Radon transform. Parameters ---------- uSin: 2d or 3d complex ndarray The background-corrected sinogram of the complex scattered wave :math:`u(\mathbf{r})/u_0(\mathbf{r})`. The first axis iterates through the angles :math:`\phi_0`. align: bool Tries to correct for a phase offset in the phase sinogram. Returns ------- phase: 2d or 3d real ndarray The unwrapped phase array corresponding to `uSin`. See Also -------- skimage.restoration.unwrap_phase: phase unwrapping radontea.backproject_3d: e.g. reconstruction via backprojection
entailment
def sinogram_as_rytov(uSin, u0=1, align=True): r"""Convert the complex wave field sinogram to the Rytov phase This method applies the Rytov approximation to the recorded complex wave sinogram. To achieve this, the following filter is applied: .. math:: u_\mathrm{B}(\mathbf{r}) = u_\mathrm{0}(\mathbf{r}) \ln\!\left( \frac{u_\mathrm{R}(\mathbf{r})}{u_\mathrm{0}(\mathbf{r})} +1 \right) This filter step effectively replaces the Born approximation :math:`u_\mathrm{B}(\mathbf{r})` with the Rytov approximation :math:`u_\mathrm{R}(\mathbf{r})`, assuming that the scattered field is equal to :math:`u(\mathbf{r})\approx u_\mathrm{R}(\mathbf{r})+ u_\mathrm{0}(\mathbf{r})`. Parameters ---------- uSin: 2d or 3d complex ndarray The sinogram of the complex wave :math:`u_\mathrm{R}(\mathbf{r}) + u_\mathrm{0}(\mathbf{r})`. The first axis iterates through the angles :math:`\phi_0`. u0: ndarray of dimension as `uSin` or less, or int. The incident plane wave :math:`u_\mathrm{0}(\mathbf{r})` at the detector. If `u0` is "1", it is assumed that the data is already background-corrected ( `uSin` :math:`= \frac{u_\mathrm{R}(\mathbf{r})}{ u_\mathrm{0}(\mathbf{r})} + 1` ). Note that if the reconstruction distance :math:`l_\mathrm{D}` of the original experiment is non-zero and `u0` is set to 1, then the reconstruction will be wrong; the field is not focused to the center of the reconstruction volume. align: bool Tries to correct for a phase offset in the phase sinogram. Returns ------- uB: 2d or 3d real ndarray The Rytov-filtered complex sinogram :math:`u_\mathrm{B}(\mathbf{r})`. See Also -------- skimage.restoration.unwrap_phase: phase unwrapping """ ndims = len(uSin.shape) # imaginary part of the complex Rytov phase phiR = np.angle(uSin / u0) # real part of the complex Rytov phase lna = np.log(np.absolute(uSin / u0)) if ndims == 2: # unwrapping is very important phiR[:] = np.unwrap(phiR, axis=-1) else: # Unwrap gets the dimension of the problem from the input # data. Since we have a sinogram, we need to pass it the # slices one by one. for ii in range(len(phiR)): phiR[ii] = unwrap_phase(phiR[ii], seed=47) if align: align_unwrapped(phiR) # rytovSin = u0*(np.log(a/a0) + 1j*phiR) # u0 is one - we already did background correction # complex rytov phase: rytovSin = 1j * phiR + lna return u0 * rytovSin
r"""Convert the complex wave field sinogram to the Rytov phase This method applies the Rytov approximation to the recorded complex wave sinogram. To achieve this, the following filter is applied: .. math:: u_\mathrm{B}(\mathbf{r}) = u_\mathrm{0}(\mathbf{r}) \ln\!\left( \frac{u_\mathrm{R}(\mathbf{r})}{u_\mathrm{0}(\mathbf{r})} +1 \right) This filter step effectively replaces the Born approximation :math:`u_\mathrm{B}(\mathbf{r})` with the Rytov approximation :math:`u_\mathrm{R}(\mathbf{r})`, assuming that the scattered field is equal to :math:`u(\mathbf{r})\approx u_\mathrm{R}(\mathbf{r})+ u_\mathrm{0}(\mathbf{r})`. Parameters ---------- uSin: 2d or 3d complex ndarray The sinogram of the complex wave :math:`u_\mathrm{R}(\mathbf{r}) + u_\mathrm{0}(\mathbf{r})`. The first axis iterates through the angles :math:`\phi_0`. u0: ndarray of dimension as `uSin` or less, or int. The incident plane wave :math:`u_\mathrm{0}(\mathbf{r})` at the detector. If `u0` is "1", it is assumed that the data is already background-corrected ( `uSin` :math:`= \frac{u_\mathrm{R}(\mathbf{r})}{ u_\mathrm{0}(\mathbf{r})} + 1` ). Note that if the reconstruction distance :math:`l_\mathrm{D}` of the original experiment is non-zero and `u0` is set to 1, then the reconstruction will be wrong; the field is not focused to the center of the reconstruction volume. align: bool Tries to correct for a phase offset in the phase sinogram. Returns ------- uB: 2d or 3d real ndarray The Rytov-filtered complex sinogram :math:`u_\mathrm{B}(\mathbf{r})`. See Also -------- skimage.restoration.unwrap_phase: phase unwrapping
entailment
def json_encoder_default(obj): """Handle more data types than the default JSON encoder. Specifically, it treats a `set` and a `numpy.array` like a `list`. Example usage: ``json.dumps(obj, default=json_encoder_default)`` """ if np is not None and hasattr(obj, 'size') and hasattr(obj, 'dtype'): if obj.size == 1: if np.issubdtype(obj.dtype, np.integer): return int(obj) elif np.issubdtype(obj.dtype, np.floating): return float(obj) if isinstance(obj, set): return list(obj) elif hasattr(obj, 'to_native'): # DatastoreList, DatastoreDict return obj.to_native() elif hasattr(obj, 'tolist') and hasattr(obj, '__iter__'): # for np.array return obj.tolist() return obj
Handle more data types than the default JSON encoder. Specifically, it treats a `set` and a `numpy.array` like a `list`. Example usage: ``json.dumps(obj, default=json_encoder_default)``
entailment
def fig_to_src(figure, image_format='png', dpi=80): """Convert a matplotlib figure to an inline HTML image. :param matplotlib.figure.Figure figure: Figure to display. :param str image_format: png (default) or svg :param int dpi: dots-per-inch for raster graphics. :rtype: str """ if image_format == 'png': f = io.BytesIO() figure.savefig(f, format=image_format, dpi=dpi) f.seek(0) return png_to_src(f.read()) elif image_format == 'svg': f = io.StringIO() figure.savefig(f, format=image_format, dpi=dpi) f.seek(0) return svg_to_src(f.read())
Convert a matplotlib figure to an inline HTML image. :param matplotlib.figure.Figure figure: Figure to display. :param str image_format: png (default) or svg :param int dpi: dots-per-inch for raster graphics. :rtype: str
entailment
def Reset(self): ' Reset Axis and set default parameters for H-bridge ' spi.SPI_write(self.CS, [0xC0, 0x60]) # reset # spi.SPI_write(self.CS, [0x14, 0x14]) # Stall Treshold setup # spi.SPI_write(self.CS, [0xFF, 0xFF]) # spi.SPI_write(self.CS, [0x13, 0x13]) # Over Current Treshold setup # spi.SPI_write(self.CS, [0xFF, 0xFF]) spi.SPI_write(self.CS, [0x15, 0xFF]) # Full Step speed spi.SPI_write(self.CS, [0xFF, 0xFF]) spi.SPI_write(self.CS, [0xFF, 0xFF]) spi.SPI_write(self.CS, [0x05, 0x05]) # ACC spi.SPI_write(self.CS, [0x01, 0x01]) spi.SPI_write(self.CS, [0xF5, 0xF5]) spi.SPI_write(self.CS, [0x06, 0x06]) # DEC spi.SPI_write(self.CS, [0x01, 0x01]) spi.SPI_write(self.CS, [0xF5, 0xF5]) spi.SPI_write(self.CS, [0x0A, 0x0A]) # KVAL_RUN spi.SPI_write(self.CS, [0x10, 0x10]) spi.SPI_write(self.CS, [0x0B, 0x0B]) # KVAL_ACC spi.SPI_write(self.CS, [0x20, 0x20]) spi.SPI_write(self.CS, [0x0C, 0x0C]) # KVAL_DEC spi.SPI_write(self.CS, [0x20, 0x20]) spi.SPI_write(self.CS, [0x18, 0x18]) # CONFIG spi.SPI_write(self.CS, [0b00111000, 0b00111000]) spi.SPI_write(self.CS, [0b00000000, 0b00000000])
Reset Axis and set default parameters for H-bridge
entailment
def MaxSpeed(self, speed): ' Setup of maximum speed ' spi.SPI_write(self.CS, [0x07, 0x07]) # Max Speed setup spi.SPI_write(self.CS, [0x00, 0x00]) spi.SPI_write(self.CS, [speed, speed])
Setup of maximum speed
entailment
def ReleaseSW(self): ' Go away from Limit Switch ' while self.ReadStatusBit(2) == 1: # is Limit Switch ON ? spi.SPI_write(self.CS, [0x92, 0x92] | (~self.Dir & 1)) # release SW while self.IsBusy(): pass self.MoveWait(10)
Go away from Limit Switch
entailment
def Move(self, units): ' Move some distance units from current position ' steps = units * self.SPU # translate units to steps if steps > 0: # look for direction spi.SPI_write(self.CS, [0x40 | (~self.Dir & 1), 0x40 | (~self.Dir & 1)]) else: spi.SPI_write(self.CS, [0x40 | (self.Dir & 1), 0x40 | (self.Dir & 1)]) steps = int(abs(steps)) spi.SPI_write(self.CS, [(steps >> 16) & 0xFF, (steps >> 16) & 0xFF]) spi.SPI_write(self.CS, [(steps >> 8) & 0xFF, (steps >> 8) & 0xFF]) spi.SPI_write(self.CS, [steps & 0xFF, steps & 0xFF])
Move some distance units from current position
entailment
def bbox(coordinates, crs, outname=None, format='ESRI Shapefile', overwrite=True): """ create a bounding box vector object or shapefile from coordinates and coordinate reference system. The CRS can be in either WKT, EPSG or PROJ4 format Parameters ---------- coordinates: dict a dictionary containing numerical variables with keys `xmin`, `xmax`, `ymin` and `ymax` crs: int, str or :osgeo:class:`osr.SpatialReference` the CRS of the `coordinates`. See :func:`~spatialist.auxil.crsConvert` for options. outname: str the file to write to. If `None`, the bounding box is returned as :class:`~spatialist.vector.Vector` object format: str the output file format overwrite: bool overwrite an existing file? Returns ------- Vector or None the bounding box Vector object """ srs = crsConvert(crs, 'osr') ring = ogr.Geometry(ogr.wkbLinearRing) ring.AddPoint(coordinates['xmin'], coordinates['ymin']) ring.AddPoint(coordinates['xmin'], coordinates['ymax']) ring.AddPoint(coordinates['xmax'], coordinates['ymax']) ring.AddPoint(coordinates['xmax'], coordinates['ymin']) ring.CloseRings() geom = ogr.Geometry(ogr.wkbPolygon) geom.AddGeometry(ring) geom.FlattenTo2D() bbox = Vector(driver='Memory') bbox.addlayer('bbox', srs, geom.GetGeometryType()) bbox.addfield('area', ogr.OFTReal) bbox.addfeature(geom, fields={'area': geom.Area()}) geom = None if outname is None: return bbox else: bbox.write(outname, format, overwrite)
create a bounding box vector object or shapefile from coordinates and coordinate reference system. The CRS can be in either WKT, EPSG or PROJ4 format Parameters ---------- coordinates: dict a dictionary containing numerical variables with keys `xmin`, `xmax`, `ymin` and `ymax` crs: int, str or :osgeo:class:`osr.SpatialReference` the CRS of the `coordinates`. See :func:`~spatialist.auxil.crsConvert` for options. outname: str the file to write to. If `None`, the bounding box is returned as :class:`~spatialist.vector.Vector` object format: str the output file format overwrite: bool overwrite an existing file? Returns ------- Vector or None the bounding box Vector object
entailment
def dissolve(infile, outfile, field, layername=None): """ dissolve the polygons of a vector file by an attribute field Parameters ---------- infile: str the input vector file outfile: str the output shapefile field: str the field name to merge the polygons by layername: str the name of the output vector layer; If set to None the layername will be the basename of infile without extension Returns ------- """ with Vector(infile) as vec: srs = vec.srs feat = vec.layer[0] d = feat.GetFieldDefnRef(field) width = d.width type = d.type feat = None layername = layername if layername is not None else os.path.splitext(os.path.basename(infile))[0] # the following can be used if GDAL was compiled with the spatialite extension # not tested, might need some additional/different lines # with Vector(infile) as vec: # vec.vector.ExecuteSQL('SELECT ST_Union(geometry), {0} FROM {1} GROUP BY {0}'.format(field, vec.layername), # dialect='SQLITE') # vec.write(outfile) conn = sqlite_setup(extensions=['spatialite', 'gdal']) conn.execute('CREATE VIRTUAL TABLE merge USING VirtualOGR("{}");'.format(infile)) select = conn.execute('SELECT {0},asText(ST_Union(geometry)) as geometry FROM merge GROUP BY {0};'.format(field)) fetch = select.fetchall() with Vector(driver='Memory') as merge: merge.addlayer(layername, srs, ogr.wkbPolygon) merge.addfield(field, type=type, width=width) for i in range(len(fetch)): merge.addfeature(ogr.CreateGeometryFromWkt(fetch[i][1]), {field: fetch[i][0]}) merge.write(outfile) conn.close()
dissolve the polygons of a vector file by an attribute field Parameters ---------- infile: str the input vector file outfile: str the output shapefile field: str the field name to merge the polygons by layername: str the name of the output vector layer; If set to None the layername will be the basename of infile without extension Returns -------
entailment
def feature2vector(feature, ref, layername=None): """ create a Vector object from ogr features Parameters ---------- feature: list of :osgeo:class:`ogr.Feature` or :osgeo:class:`ogr.Feature` a single feature or a list of features ref: Vector a reference Vector object to retrieve geo information from layername: str or None the name of the output layer; retrieved from `ref` if `None` Returns ------- Vector the new Vector object """ features = feature if isinstance(feature, list) else [feature] layername = layername if layername is not None else ref.layername vec = Vector(driver='Memory') vec.addlayer(layername, ref.srs, ref.geomType) feat_def = features[0].GetDefnRef() fields = [feat_def.GetFieldDefn(x) for x in range(0, feat_def.GetFieldCount())] vec.layer.CreateFields(fields) for feat in features: vec.layer.CreateFeature(feat) vec.init_features() return vec
create a Vector object from ogr features Parameters ---------- feature: list of :osgeo:class:`ogr.Feature` or :osgeo:class:`ogr.Feature` a single feature or a list of features ref: Vector a reference Vector object to retrieve geo information from layername: str or None the name of the output layer; retrieved from `ref` if `None` Returns ------- Vector the new Vector object
entailment
def intersect(obj1, obj2): """ intersect two Vector objects Parameters ---------- obj1: Vector the first vector object; this object is reprojected to the CRS of obj2 if necessary obj2: Vector the second vector object Returns ------- Vector the intersect of obj1 and obj2 """ if not isinstance(obj1, Vector) or not isinstance(obj2, Vector): raise RuntimeError('both objects must be of type Vector') obj1 = obj1.clone() obj2 = obj2.clone() obj1.reproject(obj2.srs) ####################################################### # create basic overlap union1 = ogr.Geometry(ogr.wkbMultiPolygon) # union all the geometrical features of layer 1 for feat in obj1.layer: union1.AddGeometry(feat.GetGeometryRef()) obj1.layer.ResetReading() union1.Simplify(0) # same for layer2 union2 = ogr.Geometry(ogr.wkbMultiPolygon) for feat in obj2.layer: union2.AddGeometry(feat.GetGeometryRef()) obj2.layer.ResetReading() union2.Simplify(0) # intersection intersect_base = union1.Intersection(union2) union1 = None union2 = None ####################################################### # compute detailed per-geometry overlaps if intersect_base.GetArea() > 0: intersection = Vector(driver='Memory') intersection.addlayer('intersect', obj1.srs, ogr.wkbPolygon) fieldmap = [] for index, fielddef in enumerate([obj1.fieldDefs, obj2.fieldDefs]): for field in fielddef: name = field.GetName() i = 2 while name in intersection.fieldnames: name = '{}_{}'.format(field.GetName(), i) i += 1 fieldmap.append((index, field.GetName(), name)) intersection.addfield(name, type=field.GetType(), width=field.GetWidth()) for feature1 in obj1.layer: geom1 = feature1.GetGeometryRef() if geom1.Intersects(intersect_base): for feature2 in obj2.layer: geom2 = feature2.GetGeometryRef() # select only the intersections if geom2.Intersects(intersect_base): intersect = geom2.Intersection(geom1) fields = {} for item in fieldmap: if item[0] == 0: fields[item[2]] = feature1.GetField(item[1]) else: fields[item[2]] = feature2.GetField(item[1]) intersection.addfeature(intersect, fields) intersect_base = None return intersection
intersect two Vector objects Parameters ---------- obj1: Vector the first vector object; this object is reprojected to the CRS of obj2 if necessary obj2: Vector the second vector object Returns ------- Vector the intersect of obj1 and obj2
entailment
def addfeature(self, geometry, fields=None): """ add a feature to the vector object from a geometry Parameters ---------- geometry: :osgeo:class:`ogr.Geometry` the geometry to add as a feature fields: dict or None the field names and values to assign to the new feature Returns ------- """ feature = ogr.Feature(self.layerdef) feature.SetGeometry(geometry) if fields is not None: for fieldname, value in fields.items(): if fieldname not in self.fieldnames: raise IOError('field "{}" is missing'.format(fieldname)) try: feature.SetField(fieldname, value) except NotImplementedError as e: fieldindex = self.fieldnames.index(fieldname) fieldtype = feature.GetFieldDefnRef(fieldindex).GetTypeName() message = str(e) + '\ntrying to set field {} (type {}) to value {} (type {})' message = message.format(fieldname, fieldtype, value, type(value)) raise(NotImplementedError(message)) self.layer.CreateFeature(feature) feature = None self.init_features()
add a feature to the vector object from a geometry Parameters ---------- geometry: :osgeo:class:`ogr.Geometry` the geometry to add as a feature fields: dict or None the field names and values to assign to the new feature Returns -------
entailment
def addfield(self, name, type, width=10): """ add a field to the vector layer Parameters ---------- name: str the field name type: int the OGR Field Type (OFT), e.g. ogr.OFTString. See `Module ogr <https://gdal.org/python/osgeo.ogr-module.html>`_. width: int the width of the new field (only for ogr.OFTString fields) Returns ------- """ fieldDefn = ogr.FieldDefn(name, type) if type == ogr.OFTString: fieldDefn.SetWidth(width) self.layer.CreateField(fieldDefn)
add a field to the vector layer Parameters ---------- name: str the field name type: int the OGR Field Type (OFT), e.g. ogr.OFTString. See `Module ogr <https://gdal.org/python/osgeo.ogr-module.html>`_. width: int the width of the new field (only for ogr.OFTString fields) Returns -------
entailment
def addlayer(self, name, srs, geomType): """ add a layer to the vector layer Parameters ---------- name: str the layer name srs: int, str or :osgeo:class:`osr.SpatialReference` the spatial reference system. See :func:`spatialist.auxil.crsConvert` for options. geomType: int an OGR well-known binary data type. See `Module ogr <https://gdal.org/python/osgeo.ogr-module.html>`_. Returns ------- """ self.vector.CreateLayer(name, srs, geomType) self.init_layer()
add a layer to the vector layer Parameters ---------- name: str the layer name srs: int, str or :osgeo:class:`osr.SpatialReference` the spatial reference system. See :func:`spatialist.auxil.crsConvert` for options. geomType: int an OGR well-known binary data type. See `Module ogr <https://gdal.org/python/osgeo.ogr-module.html>`_. Returns -------
entailment
def addvector(self, vec): """ add a vector object to the layer of the current Vector object Parameters ---------- vec: Vector the vector object to add merge: bool merge overlapping polygons? Returns ------- """ vec.layer.ResetReading() for feature in vec.layer: self.layer.CreateFeature(feature) self.init_features() vec.layer.ResetReading()
add a vector object to the layer of the current Vector object Parameters ---------- vec: Vector the vector object to add merge: bool merge overlapping polygons? Returns -------
entailment
def bbox(self, outname=None, format='ESRI Shapefile', overwrite=True): """ create a bounding box from the extent of the Vector object Parameters ---------- outname: str or None the name of the vector file to be written; if None, a Vector object is returned format: str the name of the file format to write overwrite: bool overwrite an already existing file? Returns ------- Vector or None if outname is None, the bounding box Vector object """ if outname is None: return bbox(self.extent, self.srs) else: bbox(self.extent, self.srs, outname=outname, format=format, overwrite=overwrite)
create a bounding box from the extent of the Vector object Parameters ---------- outname: str or None the name of the vector file to be written; if None, a Vector object is returned format: str the name of the file format to write overwrite: bool overwrite an already existing file? Returns ------- Vector or None if outname is None, the bounding box Vector object
entailment
def convert2wkt(self, set3D=True): """ export the geometry of each feature as a wkt string Parameters ---------- set3D: bool keep the third (height) dimension? Returns ------- """ features = self.getfeatures() for feature in features: try: feature.geometry().Set3D(set3D) except AttributeError: dim = 3 if set3D else 2 feature.geometry().SetCoordinateDimension(dim) return [feature.geometry().ExportToWkt() for feature in features]
export the geometry of each feature as a wkt string Parameters ---------- set3D: bool keep the third (height) dimension? Returns -------
entailment
def getFeatureByAttribute(self, fieldname, attribute): """ get features by field attribute Parameters ---------- fieldname: str the name of the queried field attribute: int or str the field value of interest Returns ------- list of :osgeo:class:`ogr.Feature` or :osgeo:class:`ogr.Feature` the feature(s) matching the search query """ attr = attribute.strip() if isinstance(attribute, str) else attribute if fieldname not in self.fieldnames: raise KeyError('invalid field name') out = [] self.layer.ResetReading() for feature in self.layer: field = feature.GetField(fieldname) field = field.strip() if isinstance(field, str) else field if field == attr: out.append(feature.Clone()) self.layer.ResetReading() if len(out) == 0: return None elif len(out) == 1: return out[0] else: return out
get features by field attribute Parameters ---------- fieldname: str the name of the queried field attribute: int or str the field value of interest Returns ------- list of :osgeo:class:`ogr.Feature` or :osgeo:class:`ogr.Feature` the feature(s) matching the search query
entailment
def getFeatureByIndex(self, index): """ get features by numerical (positional) index Parameters ---------- index: int the queried index Returns ------- :osgeo:class:`ogr.Feature` the requested feature """ feature = self.layer[index] if feature is None: feature = self.getfeatures()[index] return feature
get features by numerical (positional) index Parameters ---------- index: int the queried index Returns ------- :osgeo:class:`ogr.Feature` the requested feature
entailment
def init_layer(self): """ initialize a layer object Returns ------- """ self.layer = self.vector.GetLayer() self.__features = [None] * self.nfeatures
initialize a layer object Returns -------
entailment
def load(self): """ load all feature into memory Returns ------- """ self.layer.ResetReading() for i in range(self.nfeatures): if self.__features[i] is None: self.__features[i] = self.layer[i]
load all feature into memory Returns -------
entailment
def reproject(self, projection): """ in-memory reprojection Parameters ---------- projection: int, str or :osgeo:class:`osr.SpatialReference` the target CRS. See :func:`spatialist.auxil.crsConvert`. Returns ------- """ srs_out = crsConvert(projection, 'osr') if self.srs.IsSame(srs_out) == 0: # create the CoordinateTransformation coordTrans = osr.CoordinateTransformation(self.srs, srs_out) layername = self.layername geomType = self.geomType features = self.getfeatures() feat_def = features[0].GetDefnRef() fields = [feat_def.GetFieldDefn(x) for x in range(0, feat_def.GetFieldCount())] self.__init__() self.addlayer(layername, srs_out, geomType) self.layer.CreateFields(fields) for feature in features: geom = feature.GetGeometryRef() geom.Transform(coordTrans) newfeature = feature.Clone() newfeature.SetGeometry(geom) self.layer.CreateFeature(newfeature) newfeature = None self.init_features()
in-memory reprojection Parameters ---------- projection: int, str or :osgeo:class:`osr.SpatialReference` the target CRS. See :func:`spatialist.auxil.crsConvert`. Returns -------
entailment
def setCRS(self, crs): """ directly reset the spatial reference system of the vector object. This is not going to reproject the Vector object, see :meth:`reproject` instead. Parameters ---------- crs: int, str or :osgeo:class:`osr.SpatialReference` the input CRS Returns ------- Example ------- >>> site = Vector('shape.shp') >>> site.setCRS('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ') """ # try to convert the input crs to osr.SpatialReference srs_out = crsConvert(crs, 'osr') # save all relevant info from the existing vector object layername = self.layername geomType = self.geomType layer_definition = ogr.Feature(self.layer.GetLayerDefn()) fields = [layer_definition.GetFieldDefnRef(x) for x in range(layer_definition.GetFieldCount())] features = self.getfeatures() # initialize a new vector object and create a layer self.__init__() self.addlayer(layername, srs_out, geomType) # add the fields to new layer self.layer.CreateFields(fields) # add the features to the newly created layer for feat in features: self.layer.CreateFeature(feat) self.init_features()
directly reset the spatial reference system of the vector object. This is not going to reproject the Vector object, see :meth:`reproject` instead. Parameters ---------- crs: int, str or :osgeo:class:`osr.SpatialReference` the input CRS Returns ------- Example ------- >>> site = Vector('shape.shp') >>> site.setCRS('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ')
entailment
def write(self, outfile, format='ESRI Shapefile', overwrite=True): """ write the Vector object to a file Parameters ---------- outfile: the name of the file to write format: str the output file format overwrite: bool overwrite an already existing file? Returns ------- """ (outfilepath, outfilename) = os.path.split(outfile) basename = os.path.splitext(outfilename)[0] driver = ogr.GetDriverByName(format) if os.path.exists(outfile): if overwrite: driver.DeleteDataSource(outfile) else: raise RuntimeError('target file already exists') outdataset = driver.CreateDataSource(outfile) outlayer = outdataset.CreateLayer(self.layername, geom_type=self.geomType) outlayerdef = outlayer.GetLayerDefn() for fieldDef in self.fieldDefs: outlayer.CreateField(fieldDef) self.layer.ResetReading() for feature in self.layer: outFeature = ogr.Feature(outlayerdef) outFeature.SetGeometry(feature.GetGeometryRef()) for name in self.fieldnames: outFeature.SetField(name, feature.GetField(name)) # add the feature to the shapefile outlayer.CreateFeature(outFeature) outFeature = None self.layer.ResetReading() if format == 'ESRI Shapefile': srs_out = self.srs.Clone() srs_out.MorphToESRI() with open(os.path.join(outfilepath, basename + '.prj'), 'w') as prj: prj.write(srs_out.ExportToWkt()) outdataset = None
write the Vector object to a file Parameters ---------- outfile: the name of the file to write format: str the output file format overwrite: bool overwrite an already existing file? Returns -------
entailment
def write_byte_data(self, address, register, value): """ SMBus Read Byte: i2c_smbus_read_byte_data() ============================================ This reads a single byte from a device, from a designated register. The register is specified through the Comm byte. S Addr Wr [A] Comm [A] S Addr Rd [A] [Data] NA P Functionality flag: I2C_FUNC_SMBUS_READ_BYTE_DATA """ return self.smbus.write_byte_data(address, register, value)
SMBus Read Byte: i2c_smbus_read_byte_data() ============================================ This reads a single byte from a device, from a designated register. The register is specified through the Comm byte. S Addr Wr [A] Comm [A] S Addr Rd [A] [Data] NA P Functionality flag: I2C_FUNC_SMBUS_READ_BYTE_DATA
entailment
def write_word_data(self, address, register, value): """ SMBus Write Word: i2c_smbus_write_word_data() ============================================== This is the opposite of the Read Word operation. 16 bits of data is written to a device, to the designated register that is specified through the Comm byte. S Addr Wr [A] Comm [A] DataLow [A] DataHigh [A] P Functionality flag: I2C_FUNC_SMBUS_WRITE_WORD_DATA Note the convenience function i2c_smbus_write_word_swapped is available for writes where the two data bytes are the other way around (not SMBus compliant, but very popular.) """ return self.smbus.write_word_data(address, register, value)
SMBus Write Word: i2c_smbus_write_word_data() ============================================== This is the opposite of the Read Word operation. 16 bits of data is written to a device, to the designated register that is specified through the Comm byte. S Addr Wr [A] Comm [A] DataLow [A] DataHigh [A] P Functionality flag: I2C_FUNC_SMBUS_WRITE_WORD_DATA Note the convenience function i2c_smbus_write_word_swapped is available for writes where the two data bytes are the other way around (not SMBus compliant, but very popular.)
entailment
def write_block_data(self, address, register, value): """ SMBus Block Write: i2c_smbus_write_block_data() ================================================ The opposite of the Block Read command, this writes up to 32 bytes to a device, to a designated register that is specified through the Comm byte. The amount of data is specified in the Count byte. S Addr Wr [A] Comm [A] Count [A] Data [A] Data [A] ... [A] Data [A] P Functionality flag: I2C_FUNC_SMBUS_WRITE_BLOCK_DATA """ return self.smbus.write_block_data(address, register, value)
SMBus Block Write: i2c_smbus_write_block_data() ================================================ The opposite of the Block Read command, this writes up to 32 bytes to a device, to a designated register that is specified through the Comm byte. The amount of data is specified in the Count byte. S Addr Wr [A] Comm [A] Count [A] Data [A] Data [A] ... [A] Data [A] P Functionality flag: I2C_FUNC_SMBUS_WRITE_BLOCK_DATA
entailment
def block_process_call(self, address, register, value): """ SMBus Block Write - Block Read Process Call =========================================== SMBus Block Write - Block Read Process Call was introduced in Revision 2.0 of the specification. This command selects a device register (through the Comm byte), sends 1 to 31 bytes of data to it, and reads 1 to 31 bytes of data in return. S Addr Wr [A] Comm [A] Count [A] Data [A] ... S Addr Rd [A] [Count] A [Data] ... A P Functionality flag: I2C_FUNC_SMBUS_BLOCK_PROC_CALL """ return self.smbus.block_process_call(address, register, value)
SMBus Block Write - Block Read Process Call =========================================== SMBus Block Write - Block Read Process Call was introduced in Revision 2.0 of the specification. This command selects a device register (through the Comm byte), sends 1 to 31 bytes of data to it, and reads 1 to 31 bytes of data in return. S Addr Wr [A] Comm [A] Count [A] Data [A] ... S Addr Rd [A] [Count] A [Data] ... A P Functionality flag: I2C_FUNC_SMBUS_BLOCK_PROC_CALL
entailment
def write_i2c_block_data(self, address, register, value): """ I2C block transactions do not limit the number of bytes transferred but the SMBus layer places a limit of 32 bytes. I2C Block Write: i2c_smbus_write_i2c_block_data() ================================================== The opposite of the Block Read command, this writes bytes to a device, to a designated register that is specified through the Comm byte. Note that command lengths of 0, 2, or more bytes are seupported as they are indistinguishable from data. S Addr Wr [A] Comm [A] Data [A] Data [A] ... [A] Data [A] P Functionality flag: I2C_FUNC_SMBUS_WRITE_I2C_BLOCK """ return self.smbus.write_i2c_block_data(address, register, value)
I2C block transactions do not limit the number of bytes transferred but the SMBus layer places a limit of 32 bytes. I2C Block Write: i2c_smbus_write_i2c_block_data() ================================================== The opposite of the Block Read command, this writes bytes to a device, to a designated register that is specified through the Comm byte. Note that command lengths of 0, 2, or more bytes are seupported as they are indistinguishable from data. S Addr Wr [A] Comm [A] Data [A] Data [A] ... [A] Data [A] P Functionality flag: I2C_FUNC_SMBUS_WRITE_I2C_BLOCK
entailment
def read_i2c_block_data(self, address, register, length): """ I2C block transactions do not limit the number of bytes transferred but the SMBus layer places a limit of 32 bytes. I2C Block Read: i2c_smbus_read_i2c_block_data() ================================================ This command reads a block of bytes from a device, from a designated register that is specified through the Comm byte. S Addr Wr [A] Comm [A] S Addr Rd [A] [Data] A [Data] A ... A [Data] NA P Functionality flag: I2C_FUNC_SMBUS_READ_I2C_BLOCK """ return self.smbus.read_i2c_block_data(address, register, length)
I2C block transactions do not limit the number of bytes transferred but the SMBus layer places a limit of 32 bytes. I2C Block Read: i2c_smbus_read_i2c_block_data() ================================================ This command reads a block of bytes from a device, from a designated register that is specified through the Comm byte. S Addr Wr [A] Comm [A] S Addr Rd [A] [Data] A [Data] A ... A [Data] NA P Functionality flag: I2C_FUNC_SMBUS_READ_I2C_BLOCK
entailment
def _init_zmq(self, port_publish, port_subscribe): """Initialize zmq messaging. Listen on sub_port. This port might at some point receive the message to start publishing on a certain port, but until then, no publishing. """ log.debug('kernel {} publishing on port {}' ''.format(self.analysis.id_, port_publish)) self.zmq_publish = zmq.Context().socket(zmq.PUB) self.zmq_publish.connect('tcp://127.0.0.1:{}'.format(port_publish)) log.debug('kernel {} subscribed on port {}' ''.format(self.analysis.id_, port_subscribe)) self.zmq_sub_ctx = zmq.Context() self.zmq_sub = self.zmq_sub_ctx.socket(zmq.SUB) self.zmq_sub.setsockopt(zmq.SUBSCRIBE, self.analysis.id_.encode('utf-8')) self.zmq_sub.connect('tcp://127.0.0.1:{}'.format(port_subscribe)) self.zmq_stream_sub = zmq.eventloop.zmqstream.ZMQStream(self.zmq_sub) self.zmq_stream_sub.on_recv(self.zmq_listener) # send zmq handshakes until a zmq ack is received self.zmq_ack = False self.send_handshake()
Initialize zmq messaging. Listen on sub_port. This port might at some point receive the message to start publishing on a certain port, but until then, no publishing.
entailment
def run_process(self, analysis, action_name, message='__nomessagetoken__'): """Executes an process in the analysis with the given message. It also handles the start and stop signals in case an process_id is given. This method is similar to the method in databench.Analysis. """ # detect process_id process_id = None if isinstance(message, dict) and '__process_id' in message: process_id = message['__process_id'] del message['__process_id'] if process_id: analysis.emit('__process', {'id': process_id, 'status': 'start'}) fns = [ functools.partial(class_fn, analysis) for class_fn in (analysis._action_handlers.get(action_name, []) + analysis._action_handlers.get('*', [])) ] if fns: args, kwargs = [], {} # Check whether this is a list (positional arguments) # or a dictionary (keyword arguments). if isinstance(message, list): args = message elif isinstance(message, dict): kwargs = message elif message == '__nomessagetoken__': pass else: args = [message] for fn in fns: log.debug('kernel calling {}'.format(fn)) fn(*args, **kwargs) else: # default is to store action name and data as key and value # in analysis.data # # TODO(sven): deprecate this in favor of set_state() in Analysis # with new Datastore value = message if message != '__nomessagetoken__' else None if hasattr(analysis.data, 'set_state'): # TODO(sven): add deprecation warning here? analysis.data.set_state({action_name: value}) else: # TODO(sven): add deprecation warning here? analysis.data[action_name] = value log.debug('kernel done {}'.format(action_name)) if process_id: analysis.emit('__process', {'id': process_id, 'status': 'end'}) if action_name == 'disconnected': log.debug('kernel {} shutting down'.format(analysis.id_)) self.zmq_publish.close() self.zmq_stream_sub.close() self.zmq_sub.close() self.zmq_sub_ctx.destroy()
Executes an process in the analysis with the given message. It also handles the start and stop signals in case an process_id is given. This method is similar to the method in databench.Analysis.
entailment
def event_loop(self): """Event loop.""" try: zmq.eventloop.ioloop.IOLoop.current().start() except KeyboardInterrupt: zmq.eventloop.ioloop.IOLoop.current().stop()
Event loop.
entailment
def emit(self, signal, message, analysis_id): """Emit signal to main. Args: signal: Name of the signal to be emitted. message: Message to be sent. analysis_id: Identifies the instance of this analysis. """ log.debug('kernel {} zmq send ({}): {}' ''.format(analysis_id, signal, message)) self.zmq_publish.send(json.dumps({ 'analysis_id': analysis_id, 'frame': {'signal': signal, 'load': message}, }, default=json_encoder_default).encode('utf-8'))
Emit signal to main. Args: signal: Name of the signal to be emitted. message: Message to be sent. analysis_id: Identifies the instance of this analysis.
entailment
def set_ports(self, port0 = 0x00, port1 = 0x00): 'Writes specified value to the pins defined as output by config_ports() method. Writing to input pins has no effect.' self.bus.write_byte_data(self.address, self.OUTPUT_PORT0, port0) self.bus.write_byte_data(self.address, self.OUTPUT_PORT1, port1) return True
Writes specified value to the pins defined as output by config_ports() method. Writing to input pins has no effect.
entailment
def get_ports(self): 'Reads logical values at pins.' return (self.bus.read_byte_data(self.address, self.STATUS_PORT0), self.bus.read_byte_data(self.address, self.STATUS_PORT1));
Reads logical values at pins.
entailment
def get_config(self): 'Reads logical values at pins.' return (self.bus.read_byte_data(self.address, self.CONTROL_PORT0), self.bus.read_byte_data(self.address, self.CONTROL_PORT1));
Reads logical values at pins.
entailment
def set_pullups(self, port0 = 0x00, port1 = 0x00): 'Sets INPUT (1) or OUTPUT (0) direction on pins. Inversion setting is applicable for input pins 1-inverted 0-noninverted input polarity.' self.bus.write_byte_data(self.address, self.PULLUP_PORT0, port0) self.bus.write_byte_data(self.address, self.PULLUP_PORT1, port1) return
Sets INPUT (1) or OUTPUT (0) direction on pins. Inversion setting is applicable for input pins 1-inverted 0-noninverted input polarity.
entailment
def set_ports(self, port0 = 0x00, port1 = 0x00): 'Writes specified value to the pins defined as output by method. Writing to input pins has no effect.' self.bus.write_byte_data(self.address, self.CONTROL_PORT0, port0) self.bus.write_byte_data(self.address, self.CONTROL_PORT0, port1) return
Writes specified value to the pins defined as output by method. Writing to input pins has no effect.
entailment
def get_gtf_db(gtf, in_memory=False): """ create a gffutils DB """ db_file = gtf + '.db' if gtf.endswith('.gz'): db_file = gtf[:-3] + '.db' if file_exists(db_file): return gffutils.FeatureDB(db_file) db_file = ':memory:' if in_memory else db_file if in_memory or not file_exists(db_file): debug('GTF database does not exist, creating...') infer_extent = guess_infer_extent(gtf) db = gffutils.create_db(gtf, dbfn=db_file, infer_gene_extent=infer_extent) return db else: return gffutils.FeatureDB(db_file)
create a gffutils DB
entailment
def gtf_to_bed(gtf, alt_out_dir=None): """ create a BED file of transcript-level features with attached gene name or gene ids """ out_file = os.path.splitext(gtf)[0] + '.bed' if file_exists(out_file): return out_file if not os.access(os.path.dirname(out_file), os.W_OK | os.X_OK): if not alt_out_dir: raise IOError('Cannot write transcript BED output file %s' % out_file) else: out_file = os.path.join(alt_out_dir, os.path.basename(out_file)) with open(out_file, "w") as out_handle: db = get_gtf_db(gtf) for feature in db.features_of_type('transcript', order_by=("seqid", "start", "end")): chrom = feature.chrom start = feature.start end = feature.end attributes = feature.attributes.keys() strand = feature.strand name = (feature['gene_name'][0] if 'gene_name' in attributes else feature['gene_id'][0]) line = "\t".join([str(x) for x in [chrom, start, end, name, ".", strand]]) out_handle.write(line + "\n") return out_file
create a BED file of transcript-level features with attached gene name or gene ids
entailment
def tx2genefile(gtf, out_file=None): """ write out a file of transcript->gene mappings. use the installed tx2gene.csv if it exists, else write a new one out """ installed_tx2gene = os.path.join(os.path.dirname(gtf), "tx2gene.csv") if file_exists(installed_tx2gene): return installed_tx2gene if file_exists(out_file): return out_file with file_transaction(out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for k, v in transcript_to_gene(gtf).items(): out_handle.write(",".join([k, v]) + "\n") return out_file
write out a file of transcript->gene mappings. use the installed tx2gene.csv if it exists, else write a new one out
entailment
def transcript_to_gene(gtf): """ return a dictionary keyed by transcript_id of the associated gene_id """ gene_lookup = {} for feature in complete_features(get_gtf_db(gtf)): gene_id = feature.attributes.get('gene_id', [None])[0] transcript_id = feature.attributes.get('transcript_id', [None])[0] gene_lookup[transcript_id] = gene_id return gene_lookup
return a dictionary keyed by transcript_id of the associated gene_id
entailment
def set_freq(self, fout, freq): """ Sets new output frequency, required parameters are real current frequency at output and new required frequency. """ hsdiv_tuple = (4, 5, 6, 7, 9, 11) # possible dividers n1div_tuple = (1,) + tuple(range(2,129,2)) # fdco_min = 5670.0 # set maximum as minimum hsdiv = self.get_hs_div() # read curent dividers n1div = self.get_n1_div() # if abs((freq-fout)*1e6/fout) > 3500: # Large change of frequency fdco = fout * hsdiv * n1div # calculate high frequency oscillator fxtal = fdco / self.get_rfreq() # should be fxtal = 114.285 for hsdiv_iter in hsdiv_tuple: # find dividers with minimal power consumption for n1div_iter in n1div_tuple: fdco_new = freq * hsdiv_iter * n1div_iter if (fdco_new >= 4850) and (fdco_new <= 5670): if (fdco_new <= fdco_min): fdco_min = fdco_new hsdiv = hsdiv_iter n1div = n1div_iter rfreq = fdco_min / fxtal self.freeze_dco() # write registers self.set_hs_div(hsdiv) self.set_n1_div(n1div) self.set_rfreq(rfreq) self.unfreeze_dco() self.new_freq() else: # Small change of frequency rfreq = self.get_rfreq() * (freq/fout) self.freeze_m() # write registers self.set_rfreq(rfreq) self.unfreeze_m()
Sets new output frequency, required parameters are real current frequency at output and new required frequency.
entailment
def _get_file_alignment_for_new_binary_file(self, file: File) -> int: """Detects alignment requirements for binary files with new nn::util::BinaryFileHeader.""" if len(file.data) <= 0x20: return 0 bom = file.data[0xc:0xc+2] if bom != b'\xff\xfe' and bom != b'\xfe\xff': return 0 be = bom == b'\xfe\xff' file_size: int = struct.unpack_from(_get_unpack_endian_character(be) + 'I', file.data, 0x1c)[0] if len(file.data) != file_size: return 0 return 1 << file.data[0xe]
Detects alignment requirements for binary files with new nn::util::BinaryFileHeader.
entailment
def check_folders(name): """Only checks and asks questions. Nothing is written to disk.""" if os.getcwd().endswith('analyses'): correct = input('You are in an analyses folder. This will create ' 'another analyses folder inside this one. Do ' 'you want to continue? (y/N)') if correct != 'y': return False if not os.path.exists(os.path.join(os.getcwd(), 'analyses')): correct = input('This is the first analysis here. Do ' 'you want to continue? (y/N)') if correct != 'y': return False if os.path.exists(os.path.join(os.getcwd(), 'analyses', name)): correct = input('An analysis with this name exists already. Do ' 'you want to continue? (y/N)') if correct != 'y': return False return True
Only checks and asks questions. Nothing is written to disk.
entailment
def create_analyses(name, kernel=None): """Create an analysis with given name and suffix. If it does not exist already, it creates the top level analyses folder and it's __init__.py and index.yaml file. """ if not os.path.exists(os.path.join(os.getcwd(), 'analyses')): os.system("mkdir analyses") # __init__.py init_path = os.path.join(os.getcwd(), 'analyses', '__init__.py') if not os.path.exists(init_path): with open(init_path, 'w') as f: pass # index.yaml index_path = os.path.join(os.getcwd(), 'analyses', 'index.yaml') if not os.path.exists(index_path): with open(index_path, 'w') as f: f.write('title: Analyses\n') f.write('description: A short description.\n') f.write('version: 0.1.0\n') f.write('\n') f.write('analyses:\n') if kernel is None: with open(index_path, 'a') as f: f.write(' # automatically inserted by scaffold-databench\n') f.write(' - name: {}\n'.format(name)) f.write(' title: {}\n'.format(name.title())) f.write(' description: A new analysis.\n') f.write(' watch:\n') f.write(' - {}/*.js\n'.format(name)) f.write(' - {}/*.html\n'.format(name))
Create an analysis with given name and suffix. If it does not exist already, it creates the top level analyses folder and it's __init__.py and index.yaml file.
entailment
def create_analysis(name, kernel, src_dir, scaffold_name): """Create analysis files.""" # analysis folder folder = os.path.join(os.getcwd(), 'analyses', name) if not os.path.exists(folder): os.makedirs(folder) else: log.warning('Analysis folder {} already exists.'.format(folder)) # copy all other files for f in os.listdir(src_dir): if f in ('__pycache__',) or \ any(f.endswith(ending) for ending in ('.pyc',)): continue copy_scaffold_file(os.path.join(src_dir, f), os.path.join(folder, f), name, scaffold_name)
Create analysis files.
entailment
def crsConvert(crsIn, crsOut): """ convert between different types of spatial references Parameters ---------- crsIn: int, str or :osgeo:class:`osr.SpatialReference` the input CRS crsOut: {'wkt', 'proj4', 'epsg', 'osr', 'opengis' or 'prettyWkt'} the output CRS type Returns ------- int, str or :osgeo:class:`osr.SpatialReference` the output CRS Examples -------- convert an integer EPSG code to PROJ4: >>> crsConvert(4326, 'proj4') '+proj=longlat +datum=WGS84 +no_defs ' convert a PROJ4 string to an opengis URL: >>> crsConvert('+proj=longlat +datum=WGS84 +no_defs ', 'opengis') 'http://www.opengis.net/def/crs/EPSG/0/4326' convert the opengis URL back to EPSG: >>> crsConvert('http://www.opengis.net/def/crs/EPSG/0/4326', 'epsg') 4326 convert an EPSG compound CRS (WGS84 horizontal + EGM96 vertical) >>> crsConvert('EPSG:4326+5773', 'proj4') '+proj=longlat +datum=WGS84 +geoidgrids=egm96_15.gtx +vunits=m +no_defs ' """ if isinstance(crsIn, osr.SpatialReference): srs = crsIn.Clone() else: srs = osr.SpatialReference() if isinstance(crsIn, int): crsIn = 'EPSG:{}'.format(crsIn) if isinstance(crsIn, str): try: srs.SetFromUserInput(crsIn) except RuntimeError: raise TypeError('crsIn not recognized; must be of type WKT, PROJ4 or EPSG') else: raise TypeError('crsIn must be of type int, str or osr.SpatialReference') if crsOut == 'wkt': return srs.ExportToWkt() elif crsOut == 'prettyWkt': return srs.ExportToPrettyWkt() elif crsOut == 'proj4': return srs.ExportToProj4() elif crsOut == 'epsg': srs.AutoIdentifyEPSG() return int(srs.GetAuthorityCode(None)) elif crsOut == 'opengis': srs.AutoIdentifyEPSG() return 'http://www.opengis.net/def/crs/EPSG/0/{}'.format(srs.GetAuthorityCode(None)) elif crsOut == 'osr': return srs else: raise ValueError('crsOut not recognized; must be either wkt, proj4, opengis or epsg')
convert between different types of spatial references Parameters ---------- crsIn: int, str or :osgeo:class:`osr.SpatialReference` the input CRS crsOut: {'wkt', 'proj4', 'epsg', 'osr', 'opengis' or 'prettyWkt'} the output CRS type Returns ------- int, str or :osgeo:class:`osr.SpatialReference` the output CRS Examples -------- convert an integer EPSG code to PROJ4: >>> crsConvert(4326, 'proj4') '+proj=longlat +datum=WGS84 +no_defs ' convert a PROJ4 string to an opengis URL: >>> crsConvert('+proj=longlat +datum=WGS84 +no_defs ', 'opengis') 'http://www.opengis.net/def/crs/EPSG/0/4326' convert the opengis URL back to EPSG: >>> crsConvert('http://www.opengis.net/def/crs/EPSG/0/4326', 'epsg') 4326 convert an EPSG compound CRS (WGS84 horizontal + EGM96 vertical) >>> crsConvert('EPSG:4326+5773', 'proj4') '+proj=longlat +datum=WGS84 +geoidgrids=egm96_15.gtx +vunits=m +no_defs '
entailment
def haversine(lat1, lon1, lat2, lon2): """ compute the distance in meters between two points in latlon Parameters ---------- lat1: int or float the latitude of point 1 lon1: int or float the longitude of point 1 lat2: int or float the latitude of point 2 lon2: int or float the longitude of point 2 Returns ------- float the distance between point 1 and point 2 in meters """ radius = 6371000 lat1, lon1, lat2, lon2 = map(math.radians, [lat1, lon1, lat2, lon2]) a = math.sin((lat2 - lat1) / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin((lon2 - lon1) / 2) ** 2 c = 2 * math.asin(math.sqrt(a)) return radius * c
compute the distance in meters between two points in latlon Parameters ---------- lat1: int or float the latitude of point 1 lon1: int or float the longitude of point 1 lat2: int or float the latitude of point 2 lon2: int or float the longitude of point 2 Returns ------- float the distance between point 1 and point 2 in meters
entailment
def gdalwarp(src, dst, options): """ a simple wrapper for :osgeo:func:`gdal.Warp` Parameters ---------- src: str, :osgeo:class:`ogr.DataSource` or :osgeo:class:`gdal.Dataset` the input data set dst: str the output data set options: dict additional parameters passed to gdal.Warp; see :osgeo:func:`gdal.WarpOptions` Returns ------- """ try: out = gdal.Warp(dst, src, options=gdal.WarpOptions(**options)) except RuntimeError as e: raise RuntimeError('{}:\n src: {}\n dst: {}\n options: {}'.format(str(e), src, dst, options)) out = None
a simple wrapper for :osgeo:func:`gdal.Warp` Parameters ---------- src: str, :osgeo:class:`ogr.DataSource` or :osgeo:class:`gdal.Dataset` the input data set dst: str the output data set options: dict additional parameters passed to gdal.Warp; see :osgeo:func:`gdal.WarpOptions` Returns -------
entailment
def gdalbuildvrt(src, dst, options=None, void=True): """ a simple wrapper for :osgeo:func:`gdal.BuildVRT` Parameters ---------- src: str, list, :osgeo:class:`ogr.DataSource` or :osgeo:class:`gdal.Dataset` the input data set(s) dst: str the output data set options: dict additional parameters passed to gdal.BuildVRT; see :osgeo:func:`gdal.BuildVRTOptions` void: bool just write the results and don't return anything? If not, the spatial object is returned Returns ------- """ options = {} if options is None else options if 'outputBounds' in options.keys() and gdal.__version__ < '2.4.0': warnings.warn('\ncreating VRT files with subsetted extent is very likely to cause problems. ' 'Please use GDAL version >= 2.4.0, which fixed the problem.\n' 'see here for a description of the problem:\n' ' https://gis.stackexchange.com/questions/314333/' 'sampling-error-using-gdalwarp-on-a-subsetted-vrt\n' 'and here for the release note of GDAL 2.4.0:\n' ' https://trac.osgeo.org/gdal/wiki/Release/2.4.0-News') out = gdal.BuildVRT(dst, src, options=gdal.BuildVRTOptions(**options)) if void: out = None else: return out
a simple wrapper for :osgeo:func:`gdal.BuildVRT` Parameters ---------- src: str, list, :osgeo:class:`ogr.DataSource` or :osgeo:class:`gdal.Dataset` the input data set(s) dst: str the output data set options: dict additional parameters passed to gdal.BuildVRT; see :osgeo:func:`gdal.BuildVRTOptions` void: bool just write the results and don't return anything? If not, the spatial object is returned Returns -------
entailment
def gdal_translate(src, dst, options): """ a simple wrapper for `gdal.Translate <https://gdal.org/python/osgeo.gdal-module.html#Translate>`_ Parameters ---------- src: str, :osgeo:class:`ogr.DataSource` or :osgeo:class:`gdal.Dataset` the input data set dst: str the output data set options: dict additional parameters passed to gdal.Translate; see `gdal.TranslateOptions <http://gdal.org/python/osgeo.gdal-module.html#TranslateOptions>`_ Returns ------- """ out = gdal.Translate(dst, src, options=gdal.TranslateOptions(**options)) out = None
a simple wrapper for `gdal.Translate <https://gdal.org/python/osgeo.gdal-module.html#Translate>`_ Parameters ---------- src: str, :osgeo:class:`ogr.DataSource` or :osgeo:class:`gdal.Dataset` the input data set dst: str the output data set options: dict additional parameters passed to gdal.Translate; see `gdal.TranslateOptions <http://gdal.org/python/osgeo.gdal-module.html#TranslateOptions>`_ Returns -------
entailment
def ogr2ogr(src, dst, options): """ a simple wrapper for gdal.VectorTranslate aka `ogr2ogr <https://www.gdal.org/ogr2ogr.html>`_ Parameters ---------- src: str or :osgeo:class:`ogr.DataSource` the input data set dst: str the output data set options: dict additional parameters passed to gdal.VectorTranslate; see `gdal.VectorTranslateOptions <http://gdal.org/python/osgeo.gdal-module.html#VectorTranslateOptions>`_ Returns ------- """ out = gdal.VectorTranslate(dst, src, options=gdal.VectorTranslateOptions(**options)) out = None
a simple wrapper for gdal.VectorTranslate aka `ogr2ogr <https://www.gdal.org/ogr2ogr.html>`_ Parameters ---------- src: str or :osgeo:class:`ogr.DataSource` the input data set dst: str the output data set options: dict additional parameters passed to gdal.VectorTranslate; see `gdal.VectorTranslateOptions <http://gdal.org/python/osgeo.gdal-module.html#VectorTranslateOptions>`_ Returns -------
entailment
def gdal_rasterize(src, dst, options): """ a simple wrapper for gdal.Rasterize Parameters ---------- src: str or :osgeo:class:`ogr.DataSource` the input data set dst: str the output data set options: dict additional parameters passed to gdal.Rasterize; see :osgeo:func:`gdal.RasterizeOptions` Returns ------- """ out = gdal.Rasterize(dst, src, options=gdal.RasterizeOptions(**options)) out = None
a simple wrapper for gdal.Rasterize Parameters ---------- src: str or :osgeo:class:`ogr.DataSource` the input data set dst: str the output data set options: dict additional parameters passed to gdal.Rasterize; see :osgeo:func:`gdal.RasterizeOptions` Returns -------
entailment
def coordinate_reproject(x, y, s_crs, t_crs): """ reproject a coordinate from one CRS to another Parameters ---------- x: int or float the X coordinate component y: int or float the Y coordinate component s_crs: int, str or :osgeo:class:`osr.SpatialReference` the source CRS. See :func:`~spatialist.auxil.crsConvert` for options. t_crs: int, str or :osgeo:class:`osr.SpatialReference` the target CRS. See :func:`~spatialist.auxil.crsConvert` for options. Returns ------- """ source = crsConvert(s_crs, 'osr') target = crsConvert(t_crs, 'osr') transform = osr.CoordinateTransformation(source, target) point = transform.TransformPoint(x, y)[:2] return point
reproject a coordinate from one CRS to another Parameters ---------- x: int or float the X coordinate component y: int or float the Y coordinate component s_crs: int, str or :osgeo:class:`osr.SpatialReference` the source CRS. See :func:`~spatialist.auxil.crsConvert` for options. t_crs: int, str or :osgeo:class:`osr.SpatialReference` the target CRS. See :func:`~spatialist.auxil.crsConvert` for options. Returns -------
entailment
def set_up_dirs(proc_name, output_dir=None, work_dir=None, log_dir=None): """ Creates output_dir, work_dir, and sets up log """ output_dir = safe_mkdir(adjust_path(output_dir or join(os.getcwd(), proc_name)), 'output_dir') debug('Saving results into ' + output_dir) work_dir = safe_mkdir(work_dir or join(output_dir, 'work'), 'working directory') info('Using work directory ' + work_dir) log_fpath = set_up_log(log_dir or safe_mkdir(join(work_dir, 'log')), proc_name + '.log') return output_dir, work_dir, log_fpath
Creates output_dir, work_dir, and sets up log
entailment
def _iter_content_generator(response, decode_unicode): """Generator used to yield 100 KiB chunks for a given response.""" for chunk in response.iter_content(100 * 1024, decode_unicode=decode_unicode): if decode_unicode: # Replace CRLF newlines with LF, Python will handle # platform specific newlines if written to file. chunk = chunk.replace('\r\n', '\n') # Chunk could be ['...\r', '\n...'], stril trailing \r chunk = chunk.rstrip('\r') yield chunk
Generator used to yield 100 KiB chunks for a given response.
entailment
def _iter_lines_generator(response, decode_unicode): """Iterates over the response data, one line at a time. When stream=True is set on the request, this avoids reading the content at once into memory for large responses. The function is taken from :meth:`requests.models.Response.iter_lines`, but modified to use our :func:`~spacetrack.base._iter_content_generator`. This is because Space-Track uses CRLF newlines, so :meth:`str.splitlines` can cause us to yield blank lines if one chunk ends with CR and the next one starts with LF. .. note:: This method is not reentrant safe. """ pending = None for chunk in _iter_content_generator(response, decode_unicode=decode_unicode): if pending is not None: chunk = pending + chunk lines = chunk.splitlines() if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]: pending = lines.pop() else: pending = None for line in lines: yield line if pending is not None: yield pending
Iterates over the response data, one line at a time. When stream=True is set on the request, this avoids reading the content at once into memory for large responses. The function is taken from :meth:`requests.models.Response.iter_lines`, but modified to use our :func:`~spacetrack.base._iter_content_generator`. This is because Space-Track uses CRLF newlines, so :meth:`str.splitlines` can cause us to yield blank lines if one chunk ends with CR and the next one starts with LF. .. note:: This method is not reentrant safe.
entailment
def _raise_for_status(response): """Raises stored :class:`HTTPError`, if one occurred. This is the :meth:`requests.models.Response.raise_for_status` method, modified to add the response from Space-Track, if given. """ http_error_msg = '' if 400 <= response.status_code < 500: http_error_msg = '%s Client Error: %s for url: %s' % ( response.status_code, response.reason, response.url) elif 500 <= response.status_code < 600: http_error_msg = '%s Server Error: %s for url: %s' % ( response.status_code, response.reason, response.url) if http_error_msg: spacetrack_error_msg = None try: json = response.json() if isinstance(json, Mapping): spacetrack_error_msg = json['error'] except (ValueError, KeyError): pass if not spacetrack_error_msg: spacetrack_error_msg = response.text if spacetrack_error_msg: http_error_msg += '\nSpace-Track response:\n' + spacetrack_error_msg raise requests.HTTPError(http_error_msg, response=response)
Raises stored :class:`HTTPError`, if one occurred. This is the :meth:`requests.models.Response.raise_for_status` method, modified to add the response from Space-Track, if given.
entailment
def generic_request(self, class_, iter_lines=False, iter_content=False, controller=None, parse_types=False, **kwargs): r"""Generic Space-Track query. The request class methods use this method internally; the public API is as follows: .. code-block:: python st.tle_publish(*args, **kw) st.basicspacedata.tle_publish(*args, **kw) st.file(*args, **kw) st.fileshare.file(*args, **kw) st.spephemeris.file(*args, **kw) They resolve to the following calls respectively: .. code-block:: python st.generic_request('tle_publish', *args, **kw) st.generic_request('tle_publish', *args, controller='basicspacedata', **kw) st.generic_request('file', *args, **kw) st.generic_request('file', *args, controller='fileshare', **kw) st.generic_request('file', *args, controller='spephemeris', **kw) Parameters: class\_: Space-Track request class name iter_lines: Yield result line by line iter_content: Yield result in 100 KiB chunks. controller: Optionally specify request controller to use. parse_types: Parse string values in response according to type given in predicate information, e.g. ``'2017-01-01'`` -> ``datetime.date(2017, 1, 1)``. **kwargs: These keywords must match the predicate fields on Space-Track. You may check valid keywords with the following snippet: .. code-block:: python spacetrack = SpaceTrackClient(...) spacetrack.tle.get_predicates() # or spacetrack.get_predicates('tle') See :func:`~spacetrack.operators._stringify_predicate_value` for which Python objects are converted appropriately. Yields: Lines—stripped of newline characters—if ``iter_lines=True`` Yields: 100 KiB chunks if ``iter_content=True`` Returns: Parsed JSON object, unless ``format`` keyword argument is passed. .. warning:: Passing ``format='json'`` will return the JSON **unparsed**. Do not set ``format`` if you want the parsed JSON object returned! """ if iter_lines and iter_content: raise ValueError('iter_lines and iter_content cannot both be True') if 'format' in kwargs and parse_types: raise ValueError('parse_types can only be used if format is unset.') if controller is None: controller = self._find_controller(class_) else: classes = self.request_controllers.get(controller, None) if classes is None: raise ValueError( 'Unknown request controller {!r}'.format(controller)) if class_ not in classes: raise ValueError( 'Unknown request class {!r} for controller {!r}' .format(class_, controller)) # Decode unicode unless class == download, including conversion of # CRLF newlines to LF. decode = (class_ != 'download') if not decode and iter_lines: error = ( 'iter_lines disabled for binary data, since CRLF newlines ' 'split over chunk boundaries would yield extra blank lines. ' 'Use iter_content=True instead.') raise ValueError(error) self.authenticate() url = ('{0}{1}/query/class/{2}' .format(self.base_url, controller, class_)) offline_check = (class_, controller) in self.offline_predicates valid_fields = {p.name for p in self.rest_predicates} predicates = None if not offline_check: # Validate keyword argument names by querying valid predicates from # Space-Track predicates = self.get_predicates(class_, controller) predicate_fields = {p.name for p in predicates} valid_fields |= predicate_fields else: valid_fields |= self.offline_predicates[(class_, controller)] for key, value in kwargs.items(): if key not in valid_fields: raise TypeError( "'{class_}' got an unexpected argument '{key}'" .format(class_=class_, key=key)) if class_ == 'upload' and key == 'file': continue value = _stringify_predicate_value(value) url += '/{key}/{value}'.format(key=key, value=value) logger.debug(requests.utils.requote_uri(url)) if class_ == 'upload': if 'file' not in kwargs: raise TypeError("missing keyword argument: 'file'") resp = self.session.post(url, files={'file': kwargs['file']}) else: resp = self._ratelimited_get(url, stream=iter_lines or iter_content) _raise_for_status(resp) if resp.encoding is None: resp.encoding = 'UTF-8' if iter_lines: return _iter_lines_generator(resp, decode_unicode=decode) elif iter_content: return _iter_content_generator(resp, decode_unicode=decode) else: # If format is specified, return that format unparsed. Otherwise, # parse the default JSON response. if 'format' in kwargs: if decode: data = resp.text # Replace CRLF newlines with LF, Python will handle platform # specific newlines if written to file. data = data.replace('\r\n', '\n') else: data = resp.content return data else: data = resp.json() if predicates is None or not parse_types: return data else: return self._parse_types(data, predicates)
r"""Generic Space-Track query. The request class methods use this method internally; the public API is as follows: .. code-block:: python st.tle_publish(*args, **kw) st.basicspacedata.tle_publish(*args, **kw) st.file(*args, **kw) st.fileshare.file(*args, **kw) st.spephemeris.file(*args, **kw) They resolve to the following calls respectively: .. code-block:: python st.generic_request('tle_publish', *args, **kw) st.generic_request('tle_publish', *args, controller='basicspacedata', **kw) st.generic_request('file', *args, **kw) st.generic_request('file', *args, controller='fileshare', **kw) st.generic_request('file', *args, controller='spephemeris', **kw) Parameters: class\_: Space-Track request class name iter_lines: Yield result line by line iter_content: Yield result in 100 KiB chunks. controller: Optionally specify request controller to use. parse_types: Parse string values in response according to type given in predicate information, e.g. ``'2017-01-01'`` -> ``datetime.date(2017, 1, 1)``. **kwargs: These keywords must match the predicate fields on Space-Track. You may check valid keywords with the following snippet: .. code-block:: python spacetrack = SpaceTrackClient(...) spacetrack.tle.get_predicates() # or spacetrack.get_predicates('tle') See :func:`~spacetrack.operators._stringify_predicate_value` for which Python objects are converted appropriately. Yields: Lines—stripped of newline characters—if ``iter_lines=True`` Yields: 100 KiB chunks if ``iter_content=True`` Returns: Parsed JSON object, unless ``format`` keyword argument is passed. .. warning:: Passing ``format='json'`` will return the JSON **unparsed**. Do not set ``format`` if you want the parsed JSON object returned!
entailment
def _ratelimited_get(self, *args, **kwargs): """Perform get request, handling rate limiting.""" with self._ratelimiter: resp = self.session.get(*args, **kwargs) # It's possible that Space-Track will return HTTP status 500 with a # query rate limit violation. This can happen if a script is cancelled # before it has finished sleeping to satisfy the rate limit and it is # started again. # # Let's catch this specific instance and retry once if it happens. if resp.status_code == 500: # Let's only retry if the error page tells us it's a rate limit # violation. if 'violated your query rate limit' in resp.text: # Mimic the RateLimiter callback behaviour. until = time.time() + self._ratelimiter.period t = threading.Thread(target=self._ratelimit_callback, args=(until,)) t.daemon = True t.start() time.sleep(self._ratelimiter.period) # Now retry with self._ratelimiter: resp = self.session.get(*args, **kwargs) return resp
Perform get request, handling rate limiting.
entailment
def _find_controller(self, class_): """Find first controller that matches given request class. Order is specified by the keys of ``SpaceTrackClient.request_controllers`` (:class:`~collections.OrderedDict`) """ for controller, classes in self.request_controllers.items(): if class_ in classes: return controller else: raise ValueError('Unknown request class {!r}'.format(class_))
Find first controller that matches given request class. Order is specified by the keys of ``SpaceTrackClient.request_controllers`` (:class:`~collections.OrderedDict`)
entailment
def _download_predicate_data(self, class_, controller): """Get raw predicate information for given request class, and cache for subsequent calls. """ self.authenticate() url = ('{0}{1}/modeldef/class/{2}' .format(self.base_url, controller, class_)) logger.debug(requests.utils.requote_uri(url)) resp = self._ratelimited_get(url) _raise_for_status(resp) return resp.json()['data']
Get raw predicate information for given request class, and cache for subsequent calls.
entailment
def get_predicates(self, class_, controller=None): """Get full predicate information for given request class, and cache for subsequent calls. """ if class_ not in self._predicates: if controller is None: controller = self._find_controller(class_) else: classes = self.request_controllers.get(controller, None) if classes is None: raise ValueError( 'Unknown request controller {!r}'.format(controller)) if class_ not in classes: raise ValueError( 'Unknown request class {!r}'.format(class_)) predicates_data = self._download_predicate_data(class_, controller) predicate_objects = self._parse_predicates_data(predicates_data) self._predicates[class_] = predicate_objects return self._predicates[class_]
Get full predicate information for given request class, and cache for subsequent calls.
entailment
def get_predicates(self, class_): """Proxy ``get_predicates`` to client with stored request controller. """ return self.client.get_predicates( class_=class_, controller=self.controller)
Proxy ``get_predicates`` to client with stored request controller.
entailment
def on_install(self, editor): """ Add the folding menu to the editor, on install. :param editor: editor instance on which the mode has been installed to. """ super(FoldingPanel, self).on_install(editor) self.context_menu = QtWidgets.QMenu(_('Folding'), self.editor) action = self.action_collapse = QtWidgets.QAction( _('Collapse'), self.context_menu) action.setShortcut('Shift+-') action.triggered.connect(self._on_action_toggle) self.context_menu.addAction(action) action = self.action_expand = QtWidgets.QAction(_('Expand'), self.context_menu) action.setShortcut('Shift++') action.triggered.connect(self._on_action_toggle) self.context_menu.addAction(action) self.context_menu.addSeparator() action = self.action_collapse_all = QtWidgets.QAction( _('Collapse all'), self.context_menu) action.setShortcut('Ctrl+Shift+-') action.triggered.connect(self._on_action_collapse_all_triggered) self.context_menu.addAction(action) action = self.action_expand_all = QtWidgets.QAction( _('Expand all'), self.context_menu) action.setShortcut('Ctrl+Shift++') action.triggered.connect(self._on_action_expand_all_triggered) self.context_menu.addAction(action) self.editor.add_menu(self.context_menu)
Add the folding menu to the editor, on install. :param editor: editor instance on which the mode has been installed to.
entailment
def _draw_rect(self, rect, painter): """ Draw the background rectangle using the current style primitive color or foldIndicatorBackground if nativeFoldingIndicator is true. :param rect: The fold zone rect to draw :param painter: The widget's painter. """ c = self._custom_color if self._native: c = self.get_system_bck_color() grad = QtGui.QLinearGradient(rect.topLeft(), rect.topRight()) if sys.platform == 'darwin': grad.setColorAt(0, c.lighter(100)) grad.setColorAt(1, c.lighter(110)) outline = c.darker(110) else: grad.setColorAt(0, c.lighter(110)) grad.setColorAt(1, c.lighter(130)) outline = c.darker(100) painter.fillRect(rect, grad) painter.setPen(QtGui.QPen(outline)) painter.drawLine(rect.topLeft() + QtCore.QPointF(1, 0), rect.topRight() - QtCore.QPointF(1, 0)) painter.drawLine(rect.bottomLeft() + QtCore.QPointF(1, 0), rect.bottomRight() - QtCore.QPointF(1, 0)) painter.drawLine(rect.topRight() + QtCore.QPointF(0, 1), rect.bottomRight() - QtCore.QPointF(0, 1)) painter.drawLine(rect.topLeft() + QtCore.QPointF(0, 1), rect.bottomLeft() - QtCore.QPointF(0, 1))
Draw the background rectangle using the current style primitive color or foldIndicatorBackground if nativeFoldingIndicator is true. :param rect: The fold zone rect to draw :param painter: The widget's painter.
entailment
def get_system_bck_color(): """ Gets a system color for drawing the fold scope background. """ def merged_colors(colorA, colorB, factor): maxFactor = 100 colorA = QtGui.QColor(colorA) colorB = QtGui.QColor(colorB) tmp = colorA tmp.setRed((tmp.red() * factor) / maxFactor + (colorB.red() * (maxFactor - factor)) / maxFactor) tmp.setGreen((tmp.green() * factor) / maxFactor + (colorB.green() * (maxFactor - factor)) / maxFactor) tmp.setBlue((tmp.blue() * factor) / maxFactor + (colorB.blue() * (maxFactor - factor)) / maxFactor) return tmp pal = QtWidgets.QApplication.instance().palette() b = pal.window().color() h = pal.highlight().color() return merged_colors(b, h, 50)
Gets a system color for drawing the fold scope background.
entailment
def _draw_fold_indicator(self, top, mouse_over, collapsed, painter): """ Draw the fold indicator/trigger (arrow). :param top: Top position :param mouse_over: Whether the mouse is over the indicator :param collapsed: Whether the trigger is collapsed or not. :param painter: QPainter """ rect = QtCore.QRect(0, top, self.sizeHint().width(), self.sizeHint().height()) if self._native: if os.environ['QT_API'].lower() not in PYQT5_API: opt = QtGui.QStyleOptionViewItemV2() else: opt = QtWidgets.QStyleOptionViewItem() opt.rect = rect opt.state = (QtWidgets.QStyle.State_Active | QtWidgets.QStyle.State_Item | QtWidgets.QStyle.State_Children) if not collapsed: opt.state |= QtWidgets.QStyle.State_Open if mouse_over: opt.state |= (QtWidgets.QStyle.State_MouseOver | QtWidgets.QStyle.State_Enabled | QtWidgets.QStyle.State_Selected) opt.palette.setBrush(QtGui.QPalette.Window, self.palette().highlight()) opt.rect.translate(-2, 0) self.style().drawPrimitive(QtWidgets.QStyle.PE_IndicatorBranch, opt, painter, self) else: index = 0 if not collapsed: index = 2 if mouse_over: index += 1 QtGui.QIcon(self._custom_indicators[index]).paint(painter, rect)
Draw the fold indicator/trigger (arrow). :param top: Top position :param mouse_over: Whether the mouse is over the indicator :param collapsed: Whether the trigger is collapsed or not. :param painter: QPainter
entailment
def _get_scope_highlight_color(self): """ Gets the base scope highlight color (derivated from the editor background) """ color = self.editor.background if color.lightness() < 128: color = drift_color(color, 130) else: color = drift_color(color, 105) return color
Gets the base scope highlight color (derivated from the editor background)
entailment