sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def select_entry(self, *arguments):
"""
Select a password from the available choices.
:param arguments: Refer to :func:`smart_search()`.
:returns: The name of a password (a string) or :data:`None`
(when no password matched the given `arguments`).
"""
matches = self.smart_search(*arguments)
if len(matches) > 1:
logger.info("More than one match, prompting for choice ..")
labels = [entry.name for entry in matches]
return matches[labels.index(prompt_for_choice(labels))]
else:
logger.info("Matched one entry: %s", matches[0].name)
return matches[0]
|
Select a password from the available choices.
:param arguments: Refer to :func:`smart_search()`.
:returns: The name of a password (a string) or :data:`None`
(when no password matched the given `arguments`).
|
entailment
|
def simple_search(self, *keywords):
"""
Perform a simple search for case insensitive substring matches.
:param keywords: The string(s) to search for.
:returns: The matched password names (a generator of strings).
Only passwords whose names matches *all* of the given keywords are
returned.
"""
matches = []
keywords = [kw.lower() for kw in keywords]
logger.verbose(
"Performing simple search on %s (%s) ..",
pluralize(len(keywords), "keyword"),
concatenate(map(repr, keywords)),
)
for entry in self.filtered_entries:
normalized = entry.name.lower()
if all(kw in normalized for kw in keywords):
matches.append(entry)
logger.log(
logging.INFO if matches else logging.VERBOSE,
"Matched %s using simple search.",
pluralize(len(matches), "password"),
)
return matches
|
Perform a simple search for case insensitive substring matches.
:param keywords: The string(s) to search for.
:returns: The matched password names (a generator of strings).
Only passwords whose names matches *all* of the given keywords are
returned.
|
entailment
|
def smart_search(self, *arguments):
"""
Perform a smart search on the given keywords or patterns.
:param arguments: The keywords or patterns to search for.
:returns: The matched password names (a list of strings).
:raises: The following exceptions can be raised:
- :exc:`.NoMatchingPasswordError` when no matching passwords are found.
- :exc:`.EmptyPasswordStoreError` when the password store is empty.
This method first tries :func:`simple_search()` and if that doesn't
produce any matches it will fall back to :func:`fuzzy_search()`. If no
matches are found an exception is raised (see above).
"""
matches = self.simple_search(*arguments)
if not matches:
logger.verbose("Falling back from substring search to fuzzy search ..")
matches = self.fuzzy_search(*arguments)
if not matches:
if len(self.filtered_entries) > 0:
raise NoMatchingPasswordError(
format("No passwords matched the given arguments! (%s)", concatenate(map(repr, arguments)))
)
else:
msg = "You don't have any passwords yet! (no *.gpg files found)"
raise EmptyPasswordStoreError(msg)
return matches
|
Perform a smart search on the given keywords or patterns.
:param arguments: The keywords or patterns to search for.
:returns: The matched password names (a list of strings).
:raises: The following exceptions can be raised:
- :exc:`.NoMatchingPasswordError` when no matching passwords are found.
- :exc:`.EmptyPasswordStoreError` when the password store is empty.
This method first tries :func:`simple_search()` and if that doesn't
produce any matches it will fall back to :func:`fuzzy_search()`. If no
matches are found an exception is raised (see above).
|
entailment
|
def entries(self):
"""A list of :class:`PasswordEntry` objects."""
passwords = []
for store in self.stores:
passwords.extend(store.entries)
return natsort(passwords, key=lambda e: e.name)
|
A list of :class:`PasswordEntry` objects.
|
entailment
|
def context(self):
"""
An execution context created using :mod:`executor.contexts`.
The value of :attr:`context` defaults to a
:class:`~executor.contexts.LocalContext` object with the following
characteristics:
- The working directory of the execution context is set to the
value of :attr:`directory`.
- The environment variable given by :data:`DIRECTORY_VARIABLE` is set
to the value of :attr:`directory`.
:raises: :exc:`.MissingPasswordStoreError` when :attr:`directory`
doesn't exist.
"""
# Make sure the directory exists.
self.ensure_directory_exists()
# Prepare the environment variables.
environment = {DIRECTORY_VARIABLE: self.directory}
try:
# Try to enable the GPG agent in headless sessions.
environment.update(get_gpg_variables())
except Exception:
# If we failed then let's at least make sure that the
# $GPG_TTY environment variable is set correctly.
environment.update(GPG_TTY=execute("tty", capture=True, check=False, tty=True, silent=True))
return LocalContext(directory=self.directory, environment=environment)
|
An execution context created using :mod:`executor.contexts`.
The value of :attr:`context` defaults to a
:class:`~executor.contexts.LocalContext` object with the following
characteristics:
- The working directory of the execution context is set to the
value of :attr:`directory`.
- The environment variable given by :data:`DIRECTORY_VARIABLE` is set
to the value of :attr:`directory`.
:raises: :exc:`.MissingPasswordStoreError` when :attr:`directory`
doesn't exist.
|
entailment
|
def directory(self, value):
"""Normalize the value of :attr:`directory` when it's set."""
# Normalize the value of `directory'.
set_property(self, "directory", parse_path(value))
# Clear the computed values of `context' and `entries'.
clear_property(self, "context")
clear_property(self, "entries")
|
Normalize the value of :attr:`directory` when it's set.
|
entailment
|
def entries(self):
"""A list of :class:`PasswordEntry` objects."""
timer = Timer()
passwords = []
logger.info("Scanning %s ..", format_path(self.directory))
listing = self.context.capture("find", "-type", "f", "-name", "*.gpg", "-print0")
for filename in split(listing, "\0"):
basename, extension = os.path.splitext(filename)
if extension == ".gpg":
# We use os.path.normpath() to remove the leading `./' prefixes
# that `find' adds because it searches the working directory.
passwords.append(PasswordEntry(name=os.path.normpath(basename), store=self))
logger.verbose("Found %s in %s.", pluralize(len(passwords), "password"), timer)
return natsort(passwords, key=lambda e: e.name)
|
A list of :class:`PasswordEntry` objects.
|
entailment
|
def ensure_directory_exists(self):
"""
Make sure :attr:`directory` exists.
:raises: :exc:`.MissingPasswordStoreError` when the password storage
directory doesn't exist.
"""
if not os.path.isdir(self.directory):
msg = "The password storage directory doesn't exist! (%s)"
raise MissingPasswordStoreError(msg % self.directory)
|
Make sure :attr:`directory` exists.
:raises: :exc:`.MissingPasswordStoreError` when the password storage
directory doesn't exist.
|
entailment
|
def format_text(self, include_password=True, use_colors=None, padding=True, filters=()):
"""
Format :attr:`text` for viewing on a terminal.
:param include_password: :data:`True` to include the password in the
formatted text, :data:`False` to exclude the
password from the formatted text.
:param use_colors: :data:`True` to use ANSI escape sequences,
:data:`False` otherwise. When this is :data:`None`
:func:`~humanfriendly.terminal.terminal_supports_colors()`
will be used to detect whether ANSI escape sequences
are supported.
:param padding: :data:`True` to add empty lines before and after the
entry and indent the entry's text with two spaces,
:data:`False` to skip the padding.
:param filters: An iterable of regular expression patterns (defaults to
an empty tuple). If a line in the entry's text matches
one of these patterns it won't be shown on the
terminal.
:returns: The formatted entry (a string).
"""
# Determine whether we can use ANSI escape sequences.
if use_colors is None:
use_colors = terminal_supports_colors()
# Extract the password (first line) from the entry.
lines = self.text.splitlines()
password = lines.pop(0).strip()
# Compile the given patterns to case insensitive regular expressions
# and use them to ignore lines that match any of the given filters.
patterns = [coerce_pattern(f, re.IGNORECASE) for f in filters]
lines = [l for l in lines if not any(p.search(l) for p in patterns)]
text = trim_empty_lines("\n".join(lines))
# Include the password in the formatted text?
if include_password:
text = "Password: %s\n%s" % (password, text)
# Add the name to the entry (only when there's something to show).
if text and not text.isspace():
title = " / ".join(split(self.name, "/"))
if use_colors:
title = ansi_wrap(title, bold=True)
text = "%s\n\n%s" % (title, text)
# Highlight the entry's text using ANSI escape sequences.
lines = []
for line in text.splitlines():
# Check for a "Key: Value" line.
match = KEY_VALUE_PATTERN.match(line)
if match:
key = "%s:" % match.group(1).strip()
value = match.group(2).strip()
if use_colors:
# Highlight the key.
key = ansi_wrap(key, color=HIGHLIGHT_COLOR)
# Underline hyperlinks in the value.
tokens = value.split()
for i in range(len(tokens)):
if "://" in tokens[i]:
tokens[i] = ansi_wrap(tokens[i], underline=True)
# Replace the line with a highlighted version.
line = key + " " + " ".join(tokens)
if padding:
line = " " + line
lines.append(line)
text = "\n".join(lines)
text = trim_empty_lines(text)
if text and padding:
text = "\n%s\n" % text
return text
|
Format :attr:`text` for viewing on a terminal.
:param include_password: :data:`True` to include the password in the
formatted text, :data:`False` to exclude the
password from the formatted text.
:param use_colors: :data:`True` to use ANSI escape sequences,
:data:`False` otherwise. When this is :data:`None`
:func:`~humanfriendly.terminal.terminal_supports_colors()`
will be used to detect whether ANSI escape sequences
are supported.
:param padding: :data:`True` to add empty lines before and after the
entry and indent the entry's text with two spaces,
:data:`False` to skip the padding.
:param filters: An iterable of regular expression patterns (defaults to
an empty tuple). If a line in the entry's text matches
one of these patterns it won't be shown on the
terminal.
:returns: The formatted entry (a string).
|
entailment
|
def get_diaginfo(diaginfo_file):
"""
Read an output's diaginfo.dat file and parse into a DataFrame for
use in selecting and parsing categories.
Parameters
----------
diaginfo_file : str
Path to diaginfo.dat
Returns
-------
DataFrame containing the category information.
"""
widths = [rec.width for rec in diag_recs]
col_names = [rec.name for rec in diag_recs]
dtypes = [rec.type for rec in diag_recs]
usecols = [name for name in col_names if not name.startswith('-')]
diag_df = pd.read_fwf(diaginfo_file, widths=widths, names=col_names,
dtypes=dtypes, comment="#", header=None,
usecols=usecols)
diag_desc = {diag.name: diag.desc for diag in diag_recs
if not diag.name.startswith('-')}
return diag_df, diag_desc
|
Read an output's diaginfo.dat file and parse into a DataFrame for
use in selecting and parsing categories.
Parameters
----------
diaginfo_file : str
Path to diaginfo.dat
Returns
-------
DataFrame containing the category information.
|
entailment
|
def get_tracerinfo(tracerinfo_file):
"""
Read an output's tracerinfo.dat file and parse into a DataFrame for
use in selecting and parsing categories.
Parameters
----------
tracerinfo_file : str
Path to tracerinfo.dat
Returns
-------
DataFrame containing the tracer information.
"""
widths = [rec.width for rec in tracer_recs]
col_names = [rec.name for rec in tracer_recs]
dtypes = [rec.type for rec in tracer_recs]
usecols = [name for name in col_names if not name.startswith('-')]
tracer_df = pd.read_fwf(tracerinfo_file, widths=widths, names=col_names,
dtypes=dtypes, comment="#", header=None,
usecols=usecols)
# Check an edge case related to a bug in GEOS-Chem v12.0.3 which
# erroneously dropped short/long tracer names in certain tracerinfo.dat outputs.
# What we do here is figure out which rows were erroneously processed (they'll
# have NaNs in them) and raise a warning if there are any
na_free = tracer_df.dropna(subset=['tracer', 'scale'])
only_na = tracer_df[~tracer_df.index.isin(na_free.index)]
if len(only_na) > 0:
warn("At least one row in {} wasn't decoded correctly; we strongly"
" recommend you manually check that file to see that all"
" tracers are properly recorded."
.format(tracerinfo_file))
tracer_desc = {tracer.name: tracer.desc for tracer in tracer_recs
if not tracer.name.startswith('-')}
# Process some of the information about which variables are hydrocarbons
# and chemical tracers versus other diagnostics.
def _assign_hydrocarbon(row):
if row['C'] != 1:
row['hydrocarbon'] = True
row['molwt'] = C_MOLECULAR_WEIGHT
else:
row['hydrocarbon'] = False
return row
tracer_df = (
tracer_df
.apply(_assign_hydrocarbon, axis=1)
.assign(chemical=lambda x: x['molwt'].astype(bool))
)
return tracer_df, tracer_desc
|
Read an output's tracerinfo.dat file and parse into a DataFrame for
use in selecting and parsing categories.
Parameters
----------
tracerinfo_file : str
Path to tracerinfo.dat
Returns
-------
DataFrame containing the tracer information.
|
entailment
|
def read_from_bpch(filename, file_position, shape, dtype, endian,
use_mmap=False):
""" Read a chunk of data from a bpch output file.
Parameters
----------
filename : str
Path to file on disk containing the data
file_position : int
Position (bytes) where desired data chunk begins
shape : tuple of ints
Resultant (n-dimensional) shape of requested data; the chunk
will be read sequentially from disk and then re-shaped
dtype : dtype
Dtype of data; for best results, pass a dtype which includes
an endian indicator, e.g. `dtype = np.dtype('>f4')`
endian : str
Endianness of data; should be consistent with `dtype`
use_mmap : bool
Memory map the chunk of data to the file on disk, else read
immediately
Returns
-------
Array with shape `shape` and dtype `dtype` containing the requested
chunk of data from `filename`.
"""
offset = file_position + 4
if use_mmap:
d = np.memmap(filename, dtype=dtype, mode='r', shape=shape,
offset=offset, order='F')
else:
with FortranFile(filename, 'rb', endian) as ff:
ff.seek(file_position)
d = np.array(ff.readline('*f'))
d = d.reshape(shape, order='F')
# As a sanity check, *be sure* that the resulting data block has the
# correct shape, and fail early if it doesn't.
if (d.shape != shape):
raise IOError("Data chunk read from {} does not have the right shape,"
" (expected {} but got {})"
.format(filename, shape, d.shape))
return d
|
Read a chunk of data from a bpch output file.
Parameters
----------
filename : str
Path to file on disk containing the data
file_position : int
Position (bytes) where desired data chunk begins
shape : tuple of ints
Resultant (n-dimensional) shape of requested data; the chunk
will be read sequentially from disk and then re-shaped
dtype : dtype
Dtype of data; for best results, pass a dtype which includes
an endian indicator, e.g. `dtype = np.dtype('>f4')`
endian : str
Endianness of data; should be consistent with `dtype`
use_mmap : bool
Memory map the chunk of data to the file on disk, else read
immediately
Returns
-------
Array with shape `shape` and dtype `dtype` containing the requested
chunk of data from `filename`.
|
entailment
|
def _read(self):
""" Helper function to load the data referenced by this bundle. """
if self._dask:
d = da.from_delayed(
delayed(read_from_bpch, )(
self.filename, self.file_position, self.shape,
self.dtype, self.endian, use_mmap=self._mmap
),
self.shape, self.dtype
)
else:
d = read_from_bpch(
self.filename, self.file_position, self.shape,
self.dtype, self.endian, use_mmap=self._mmap
)
return d
|
Helper function to load the data referenced by this bundle.
|
entailment
|
def close(self):
""" Close this bpch file.
"""
if not self.fp.closed:
for v in list(self.var_data):
del self.var_data[v]
self.fp.close()
|
Close this bpch file.
|
entailment
|
def _read_metadata(self):
""" Read the main metadata packaged within a bpch file, indicating
the output filetype and its title.
"""
filetype = self.fp.readline().strip()
filetitle = self.fp.readline().strip()
# Decode to UTF string, if possible
try:
filetype = str(filetype, 'utf-8')
filetitle = str(filetitle, 'utf-8')
except:
# TODO: Handle this edge-case of converting file metadata more elegantly.
pass
self.__setattr__('filetype', filetype)
self.__setattr__('filetitle', filetitle)
|
Read the main metadata packaged within a bpch file, indicating
the output filetype and its title.
|
entailment
|
def _read_header(self):
""" Process the header information (data model / grid spec) """
self._header_pos = self.fp.tell()
line = self.fp.readline('20sffii')
modelname, res0, res1, halfpolar, center180 = line
self._attributes.update({
"modelname": str(modelname, 'utf-8').strip(),
"halfpolar": halfpolar,
"center180": center180,
"res": (res0, res1)
})
self.__setattr__('modelname', modelname)
self.__setattr__('res', (res0, res1))
self.__setattr__('halfpolar', halfpolar)
self.__setattr__('center180', center180)
# Re-wind the file
self.fp.seek(self._header_pos)
|
Process the header information (data model / grid spec)
|
entailment
|
def _read_var_data(self):
""" Iterate over the block of this bpch file and return handlers
in the form of `BPCHDataBundle`s for access to the data contained
therein.
"""
var_bundles = OrderedDict()
var_attrs = OrderedDict()
n_vars = 0
while self.fp.tell() < self.fsize:
var_attr = OrderedDict()
# read first and second header lines
line = self.fp.readline('20sffii')
modelname, res0, res1, halfpolar, center180 = line
line = self.fp.readline('40si40sdd40s7i')
category_name, number, unit, tau0, tau1, reserved = line[:6]
dim0, dim1, dim2, dim3, dim4, dim5, skip = line[6:]
var_attr['number'] = number
# Decode byte-strings to utf-8
category_name = str(category_name, 'utf-8')
var_attr['category'] = category_name.strip()
unit = str(unit, 'utf-8')
# get additional metadata from tracerinfo / diaginfo
try:
cat_df = self.diaginfo_df[
self.diaginfo_df.name == category_name.strip()
]
# TODO: Safer logic for handling case where more than one
# tracer metadata match was made
# if len(cat_df > 1):
# raise ValueError(
# "More than one category matching {} found in "
# "diaginfo.dat".format(
# category_name.strip()
# )
# )
# Safe now to select the only row in the DataFrame
cat = cat_df.T.squeeze()
tracer_num = int(cat.offset) + int(number)
diag_df = self.tracerinfo_df[
self.tracerinfo_df.tracer == tracer_num
]
# TODO: Safer logic for handling case where more than one
# tracer metadata match was made
# if len(diag_df > 1):
# raise ValueError(
# "More than one tracer matching {:d} found in "
# "tracerinfo.dat".format(tracer_num)
# )
# Safe now to select only row in the DataFrame
diag = diag_df.T.squeeze()
diag_attr = diag.to_dict()
if not unit.strip(): # unit may be empty in bpch
unit = diag_attr['unit'] # but not in tracerinfo
var_attr.update(diag_attr)
except:
diag = {'name': '', 'scale': 1}
var_attr.update(diag)
var_attr['unit'] = unit
vname = diag['name']
fullname = category_name.strip() + "_" + vname
# parse metadata, get data or set a data proxy
if dim2 == 1:
data_shape = (dim0, dim1) # 2D field
else:
data_shape = (dim0, dim1, dim2)
var_attr['original_shape'] = data_shape
# Add proxy time dimension to shape
data_shape = tuple([1, ] + list(data_shape))
origin = (dim3, dim4, dim5)
var_attr['origin'] = origin
timelo, timehi = cf.tau2time(tau0), cf.tau2time(tau1)
pos = self.fp.tell()
# Note that we don't pass a dtype, and assume everything is
# single-fp floats with the correct endian, as hard-coded
var_bundle = BPCHDataBundle(
data_shape, self.endian, self.filename, pos, [timelo, timehi],
metadata=var_attr,
use_mmap=self.use_mmap, dask_delayed=self.dask_delayed
)
self.fp.skipline()
# Save the data as a "bundle" for concatenating in the final step
if fullname in var_bundles:
var_bundles[fullname].append(var_bundle)
else:
var_bundles[fullname] = [var_bundle, ]
var_attrs[fullname] = var_attr
n_vars += 1
self.var_data = var_bundles
self.var_attrs = var_attrs
|
Iterate over the block of this bpch file and return handlers
in the form of `BPCHDataBundle`s for access to the data contained
therein.
|
entailment
|
def broadcast_1d_array(arr, ndim, axis=1):
"""
Broadcast 1-d array `arr` to `ndim` dimensions on the first axis
(`axis`=0) or on the last axis (`axis`=1).
Useful for 'outer' calculations involving 1-d arrays that are related to
different axes on a multidimensional grid.
"""
ext_arr = arr
for i in range(ndim - 1):
ext_arr = np.expand_dims(ext_arr, axis=axis)
return ext_arr
|
Broadcast 1-d array `arr` to `ndim` dimensions on the first axis
(`axis`=0) or on the last axis (`axis`=1).
Useful for 'outer' calculations involving 1-d arrays that are related to
different axes on a multidimensional grid.
|
entailment
|
def get_timestamp(time=True, date=True, fmt=None):
""" Return the current timestamp in machine local time.
Parameters:
-----------
time, date : Boolean
Flag to include the time or date components, respectively,
in the output.
fmt : str, optional
If passed, will override the time/date choice and use as
the format string passed to `strftime`.
"""
time_format = "%H:%M:%S"
date_format = "%m-%d-%Y"
if fmt is None:
if time and date:
fmt = time_format + " " + date_format
elif time:
fmt = time_format
elif date:
fmt = date_format
else:
raise ValueError("One of `date` or `time` must be True!")
return datetime.now().strftime(fmt)
|
Return the current timestamp in machine local time.
Parameters:
-----------
time, date : Boolean
Flag to include the time or date components, respectively,
in the output.
fmt : str, optional
If passed, will override the time/date choice and use as
the format string passed to `strftime`.
|
entailment
|
def fix_attr_encoding(ds):
""" This is a temporary hot-fix to handle the way metadata is encoded
when we read data directly from bpch files. It removes the 'scale_factor'
and 'units' attributes we encode with the data we ingest, converts the
'hydrocarbon' and 'chemical' attribute to a binary integer instead of a
boolean, and removes the 'units' attribute from the "time" dimension since
that too is implicitly encoded.
In future versions of this library, when upstream issues in decoding
data wrapped in dask arrays is fixed, this won't be necessary and will be
removed.
"""
def _maybe_del_attr(da, attr):
""" Possibly delete an attribute on a DataArray if it's present """
if attr in da.attrs:
del da.attrs[attr]
return da
def _maybe_decode_attr(da, attr):
# TODO: Fix this so that bools get written as attributes just fine
""" Possibly coerce an attribute on a DataArray to an easier type
to write to disk. """
# bool -> int
if (attr in da.attrs) and (type(da.attrs[attr] == bool)):
da.attrs[attr] = int(da.attrs[attr])
return da
for v in ds.data_vars:
da = ds[v]
da = _maybe_del_attr(da, 'scale_factor')
da = _maybe_del_attr(da, 'units')
da = _maybe_decode_attr(da, 'hydrocarbon')
da = _maybe_decode_attr(da, 'chemical')
# Also delete attributes on time.
if hasattr(ds, 'time'):
times = ds.time
times = _maybe_del_attr(times, 'units')
return ds
|
This is a temporary hot-fix to handle the way metadata is encoded
when we read data directly from bpch files. It removes the 'scale_factor'
and 'units' attributes we encode with the data we ingest, converts the
'hydrocarbon' and 'chemical' attribute to a binary integer instead of a
boolean, and removes the 'units' attribute from the "time" dimension since
that too is implicitly encoded.
In future versions of this library, when upstream issues in decoding
data wrapped in dask arrays is fixed, this won't be necessary and will be
removed.
|
entailment
|
def after_output(command_status):
"""
Shell sequence to be run after the command output.
The ``command_status`` should be in the range 0-255.
"""
if command_status not in range(256):
raise ValueError("command_status must be an integer in the range 0-255")
sys.stdout.write(AFTER_OUTPUT.format(command_status=command_status))
# Flushing is important as the command timing feature maybe based on
# AFTER_OUTPUT in the future.
sys.stdout.flush()
|
Shell sequence to be run after the command output.
The ``command_status`` should be in the range 0-255.
|
entailment
|
def forwards(self, orm):
"Write your forwards methods here."
# Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..."
for entry_title in orm.NewsEntryTitle.objects.all():
entry = NewsEntry.objects.get(pk=entry_title.entry.pk)
entry.translate(entry_title.language)
entry.title = entry_title.title
entry.slug = entry_title.slug
entry.is_published = entry_title.is_published
entry.save()
|
Write your forwards methods here.
|
entailment
|
def get_cfcompliant_units(units, prefix='', suffix=''):
"""
Get equivalent units that are compatible with the udunits2 library
(thus CF-compliant).
Parameters
----------
units : string
A string representation of the units.
prefix : string
Will be added at the beginning of the returned string
(must be a valid udunits2 expression).
suffix : string
Will be added at the end of the returned string
(must be a valid udunits2 expression).
Returns
-------
A string representation of the conforming units.
References
----------
The udunits2 package : http://www.unidata.ucar.edu/software/udunits/
Notes
-----
This function only relies on the table stored in :attr:`UNITS_MAP_CTM2CF`.
Therefore, the units string returned by this function is not certified to
be compatible with udunits2.
Examples
--------
>>> get_cfcompliant_units('molec/cm2')
'count/cm2'
>>> get_cfcompliant_units('v/v')
'1'
>>> get_cfcompliant_units('ppbC', prefix='3')
'3ppb
"""
compliant_units = units
for gcunits, udunits in UNITS_MAP_CTM2CF:
compliant_units = str.replace(compliant_units, gcunits, udunits)
return prefix + compliant_units + suffix
|
Get equivalent units that are compatible with the udunits2 library
(thus CF-compliant).
Parameters
----------
units : string
A string representation of the units.
prefix : string
Will be added at the beginning of the returned string
(must be a valid udunits2 expression).
suffix : string
Will be added at the end of the returned string
(must be a valid udunits2 expression).
Returns
-------
A string representation of the conforming units.
References
----------
The udunits2 package : http://www.unidata.ucar.edu/software/udunits/
Notes
-----
This function only relies on the table stored in :attr:`UNITS_MAP_CTM2CF`.
Therefore, the units string returned by this function is not certified to
be compatible with udunits2.
Examples
--------
>>> get_cfcompliant_units('molec/cm2')
'count/cm2'
>>> get_cfcompliant_units('v/v')
'1'
>>> get_cfcompliant_units('ppbC', prefix='3')
'3ppb
|
entailment
|
def get_valid_varname(varname):
"""
Replace characters (e.g., ':', '$', '=', '-') of a variable name, which
may cause problems when using with (CF-)netCDF based packages.
Parameters
----------
varname : string
variable name.
Notes
-----
Characters replacement is based on the table stored in
:attr:`VARNAME_MAP_CHAR`.
"""
vname = varname
for s, r in VARNAME_MAP_CHAR:
vname = vname.replace(s, r)
return vname
|
Replace characters (e.g., ':', '$', '=', '-') of a variable name, which
may cause problems when using with (CF-)netCDF based packages.
Parameters
----------
varname : string
variable name.
Notes
-----
Characters replacement is based on the table stored in
:attr:`VARNAME_MAP_CHAR`.
|
entailment
|
def enforce_cf_variable(var, mask_and_scale=True):
""" Given a Variable constructed from GEOS-Chem output, enforce
CF-compliant metadata and formatting.
Until a bug with lazily-loaded data and masking/scaling is resolved in
xarray, you have the option to manually mask and scale the data here.
Parameters
----------
var : xarray.Variable
A variable holding information decoded from GEOS-Chem output.
mask_and_scale : bool
Flag to scale and mask the data given the unit conversions provided
Returns
-------
out : xarray.Variable
The original variable processed to conform to CF standards
.. note::
This method borrows heavily from the ideas in ``xarray.decode_cf_variable``
"""
var = as_variable(var)
data = var._data # avoid loading by accessing _data instead of data
dims = var.dims
attrs = var.attrs.copy()
encoding = var.encoding.copy()
orig_dtype = data.dtype
# Process masking/scaling coordinates. We only expect a "scale" value
# for the units with this output.
if 'scale' in attrs:
scale = attrs.pop('scale')
attrs['scale_factor'] = scale
encoding['scale_factor'] = scale
# TODO: Once the xr.decode_cf bug is fixed, we won't need to manually
# handle masking/scaling
if mask_and_scale:
data = scale*data
# Process units
# TODO: How do we want to handle parts-per-* units? These are not part of
# the udunits standard, and the CF conventions suggest using units
# like 1e-6 for parts-per-million. But we potentially mix mass and
# volume/molar mixing ratios in GEOS-Chem output, so we need a way
# to handle that edge case.
if 'unit' in attrs:
unit = attrs.pop('unit')
unit = get_cfcompliant_units(unit)
attrs['units'] = unit
# TODO: Once the xr.decode_cf bug is fixed, we won't need to manually
# handle masking/scaling
return Variable(dims, data, attrs, encoding=encoding)
|
Given a Variable constructed from GEOS-Chem output, enforce
CF-compliant metadata and formatting.
Until a bug with lazily-loaded data and masking/scaling is resolved in
xarray, you have the option to manually mask and scale the data here.
Parameters
----------
var : xarray.Variable
A variable holding information decoded from GEOS-Chem output.
mask_and_scale : bool
Flag to scale and mask the data given the unit conversions provided
Returns
-------
out : xarray.Variable
The original variable processed to conform to CF standards
.. note::
This method borrows heavily from the ideas in ``xarray.decode_cf_variable``
|
entailment
|
def published(self, check_language=True, language=None, kwargs=None,
exclude_kwargs=None):
"""
Returns all entries, which publication date has been hit or which have
no date and which language matches the current language.
"""
if check_language:
qs = NewsEntry.objects.language(language or get_language()).filter(
is_published=True)
else:
qs = self.get_queryset()
qs = qs.filter(
models.Q(pub_date__lte=now()) | models.Q(pub_date__isnull=True)
)
if kwargs is not None:
qs = qs.filter(**kwargs)
if exclude_kwargs is not None:
qs = qs.exclude(**exclude_kwargs)
return qs.distinct().order_by('-pub_date')
|
Returns all entries, which publication date has been hit or which have
no date and which language matches the current language.
|
entailment
|
def recent(self, check_language=True, language=None, limit=3, exclude=None,
kwargs=None, category=None):
"""
Returns recently published new entries.
"""
if category:
if not kwargs:
kwargs = {}
kwargs['categories__in'] = [category]
qs = self.published(check_language=check_language, language=language,
kwargs=kwargs)
if exclude:
qs = qs.exclude(pk=exclude.pk)
return qs[:limit]
|
Returns recently published new entries.
|
entailment
|
def get_newsentry_meta_description(newsentry):
"""Returns the meta description for the given entry."""
if newsentry.meta_description:
return newsentry.meta_description
# If there is no seo addon found, take the info from the placeholders
text = newsentry.get_description()
if len(text) > 160:
return u'{}...'.format(text[:160])
return text
|
Returns the meta description for the given entry.
|
entailment
|
def render_news_placeholder(context, obj, name=False, truncate=False): # pragma: nocover # NOQA
"""
DEPRECATED: Template tag to render a placeholder from an NewsEntry object
We don't need this any more because we don't have a placeholders M2M field
on the model any more. Just use the default ``render_placeholder`` tag.
"""
warnings.warn(
"render_news_placeholder is deprecated. Use render_placeholder"
" instead", DeprecationWarning, stacklevel=2)
result = ''
if context.get('request'):
if isinstance(name, int):
# If the user doesn't want to use a placeholder name, but a cut, we
# need to check if the user has used the name as a number
truncate = name
name = False
if name:
# If the name of the placeholder slot is given, get, render and
# return it!
try:
result = safe(getattr(obj, name).render(context, None))
except AttributeError:
pass
else:
# If no name is provided get the first placeholder with content
for name in ['excerpt', 'content']:
rendered = ''
try:
rendered = safe(getattr(obj, name).render(context, None))
except AttributeError:
pass
if rendered:
result = rendered
break
if truncate:
return truncatewords_html(result, truncate)
return result
|
DEPRECATED: Template tag to render a placeholder from an NewsEntry object
We don't need this any more because we don't have a placeholders M2M field
on the model any more. Just use the default ``render_placeholder`` tag.
|
entailment
|
def _requirement_filter_by_marker(req):
# type: (pkg_resources.Requirement) -> bool
"""Check if the requirement is satisfied by the marker.
This function checks for a given Requirement whether its environment marker
is satisfied on the current platform. Currently only the python version and
system platform are checked.
"""
if hasattr(req, 'marker') and req.marker:
marker_env = {
'python_version': '.'.join(map(str, sys.version_info[:2])),
'sys_platform': sys.platform
}
if not req.marker.evaluate(environment=marker_env):
return False
return True
|
Check if the requirement is satisfied by the marker.
This function checks for a given Requirement whether its environment marker
is satisfied on the current platform. Currently only the python version and
system platform are checked.
|
entailment
|
def _requirement_find_lowest_possible(req):
# type: (pkg_resources.Requirement) -> List[str]
"""Find lowest required version.
Given a single Requirement, this function calculates the lowest required
version to satisfy it. If the requirement excludes a specific version, then
this version will not be used as the minimal supported version.
Examples
--------
>>> req = pkg_resources.Requirement.parse("foobar>=1.0,>2")
>>> _requirement_find_lowest_possible(req)
['foobar', '>=', '1.0']
>>> req = pkg_resources.Requirement.parse("baz>=1.3,>3,!=1.5")
>>> _requirement_find_lowest_possible(req)
['baz', '>=', '1.3']
"""
version_dep = None # type: Optional[str]
version_comp = None # type: Optional[str]
for dep in req.specs:
version = pkg_resources.parse_version(dep[1])
# we don't want to have a not supported version as minimal version
if dep[0] == '!=':
continue
# try to use the lowest version available
# i.e. for ">=0.8.4,>=0.9.7", select "0.8.4"
if (not version_dep or
version < pkg_resources.parse_version(version_dep)):
version_dep = dep[1]
version_comp = dep[0]
assert (version_dep is None and version_comp is None) or \
(version_dep is not None and version_comp is not None)
return [
x for x in (req.unsafe_name, version_comp, version_dep)
if x is not None]
|
Find lowest required version.
Given a single Requirement, this function calculates the lowest required
version to satisfy it. If the requirement excludes a specific version, then
this version will not be used as the minimal supported version.
Examples
--------
>>> req = pkg_resources.Requirement.parse("foobar>=1.0,>2")
>>> _requirement_find_lowest_possible(req)
['foobar', '>=', '1.0']
>>> req = pkg_resources.Requirement.parse("baz>=1.3,>3,!=1.5")
>>> _requirement_find_lowest_possible(req)
['baz', '>=', '1.3']
|
entailment
|
def _requirements_sanitize(req_list):
# type: (List[str]) -> List[str]
"""
Cleanup a list of requirement strings (e.g. from requirements.txt) to only
contain entries valid for this platform and with the lowest required version
only.
Example
-------
>>> from sys import version_info
>>> _requirements_sanitize([
... 'foo>=3.0',
... "monotonic>=1.0,>0.1;python_version=='2.4'",
... "bar>1.0;python_version=='{}.{}'".format(version_info[0], version_info[1])
... ])
['foo >= 3.0', 'bar > 1.0']
"""
filtered_req_list = (
_requirement_find_lowest_possible(req) for req in
(pkg_resources.Requirement.parse(s) for s in req_list)
if _requirement_filter_by_marker(req)
)
return [" ".join(req) for req in filtered_req_list]
|
Cleanup a list of requirement strings (e.g. from requirements.txt) to only
contain entries valid for this platform and with the lowest required version
only.
Example
-------
>>> from sys import version_info
>>> _requirements_sanitize([
... 'foo>=3.0',
... "monotonic>=1.0,>0.1;python_version=='2.4'",
... "bar>1.0;python_version=='{}.{}'".format(version_info[0], version_info[1])
... ])
['foo >= 3.0', 'bar > 1.0']
|
entailment
|
def _ensure_coroutine_function(func):
"""Return a coroutine function.
func: either a coroutine function or a regular function
Note a coroutine function is not a coroutine!
"""
if asyncio.iscoroutinefunction(func):
return func
else:
@asyncio.coroutine
def coroutine_function(evt):
func(evt)
yield
return coroutine_function
|
Return a coroutine function.
func: either a coroutine function or a regular function
Note a coroutine function is not a coroutine!
|
entailment
|
def location(self):
"""Return a string uniquely identifying the event.
This string can be used to find the event in the event store UI (cf. id
attribute, which is the UUID that at time of writing doesn't let you
easily find the event).
"""
if self._location is None:
self._location = "{}/{}-{}".format(
self.stream,
self.type,
self.sequence,
)
return self._location
|
Return a string uniquely identifying the event.
This string can be used to find the event in the event store UI (cf. id
attribute, which is the UUID that at time of writing doesn't let you
easily find the event).
|
entailment
|
async def find_backwards(self, stream_name, predicate, predicate_label='predicate'):
"""Return first event matching predicate, or None if none exists.
Note: 'backwards', both here and in Event Store, means 'towards the
event emitted furthest in the past'.
"""
logger = self._logger.getChild(predicate_label)
logger.info('Fetching first matching event')
uri = self._head_uri
try:
page = await self._fetcher.fetch(uri)
except HttpNotFoundError as e:
raise StreamNotFoundError() from e
while True:
evt = next(page.iter_events_matching(predicate), None)
if evt is not None:
return evt
uri = page.get_link("next")
if uri is None:
logger.warning("No matching event found")
return None
page = await self._fetcher.fetch(uri)
|
Return first event matching predicate, or None if none exists.
Note: 'backwards', both here and in Event Store, means 'towards the
event emitted furthest in the past'.
|
entailment
|
def main():
"""Command line interface for the ``qpass`` program."""
# Initialize logging to the terminal.
coloredlogs.install()
# Prepare for command line argument parsing.
action = show_matching_entry
program_opts = dict(exclude_list=[])
show_opts = dict(filters=[], use_clipboard=is_clipboard_supported())
verbosity = 0
# Parse the command line arguments.
try:
options, arguments = getopt.gnu_getopt(
sys.argv[1:],
"elnp:f:x:vqh",
["edit", "list", "no-clipboard", "password-store=", "filter=", "exclude=", "verbose", "quiet", "help"],
)
for option, value in options:
if option in ("-e", "--edit"):
action = edit_matching_entry
elif option in ("-l", "--list"):
action = list_matching_entries
elif option in ("-n", "--no-clipboard"):
show_opts["use_clipboard"] = False
elif option in ("-p", "--password-store"):
stores = program_opts.setdefault("stores", [])
stores.append(PasswordStore(directory=value))
elif option in ("-f", "--filter"):
show_opts["filters"].append(value)
elif option in ("-x", "--exclude"):
program_opts["exclude_list"].append(value)
elif option in ("-v", "--verbose"):
coloredlogs.increase_verbosity()
verbosity += 1
elif option in ("-q", "--quiet"):
coloredlogs.decrease_verbosity()
verbosity -= 1
elif option in ("-h", "--help"):
usage(__doc__)
return
else:
raise Exception("Unhandled option! (programming error)")
if not (arguments or action == list_matching_entries):
usage(__doc__)
return
except Exception as e:
warning("Error: %s", e)
sys.exit(1)
# Execute the requested action.
try:
show_opts["quiet"] = verbosity < 0
kw = show_opts if action == show_matching_entry else {}
action(QuickPass(**program_opts), arguments, **kw)
except PasswordStoreError as e:
# Known issues don't get a traceback.
logger.error("%s", e)
sys.exit(1)
except KeyboardInterrupt:
# If the user interrupted an interactive prompt they most likely did so
# intentionally, so there's no point in generating more output here.
sys.exit(1)
|
Command line interface for the ``qpass`` program.
|
entailment
|
def edit_matching_entry(program, arguments):
"""Edit the matching entry."""
entry = program.select_entry(*arguments)
entry.context.execute("pass", "edit", entry.name)
|
Edit the matching entry.
|
entailment
|
def list_matching_entries(program, arguments):
"""List the entries matching the given keywords/patterns."""
output("\n".join(entry.name for entry in program.smart_search(*arguments)))
|
List the entries matching the given keywords/patterns.
|
entailment
|
def show_matching_entry(program, arguments, use_clipboard=True, quiet=False, filters=()):
"""Show the matching entry on the terminal (and copy the password to the clipboard)."""
entry = program.select_entry(*arguments)
if not quiet:
formatted_entry = entry.format_text(include_password=not use_clipboard, filters=filters)
if formatted_entry and not formatted_entry.isspace():
output(formatted_entry)
if use_clipboard:
entry.copy_password()
|
Show the matching entry on the terminal (and copy the password to the clipboard).
|
entailment
|
def parse_parameters(payflowpro_response_data):
"""
Parses a set of Payflow Pro response parameter name and value pairs into
a list of PayflowProObjects, and returns a tuple containing the object
list and a dictionary containing any unconsumed data.
The first item in the object list will always be the Response object, and
the RecurringPayments object (if any) will be last.
The presence of any unconsumed data in the resulting dictionary probably
indicates an error or oversight in the PayflowProObject definitions.
"""
def build_class(klass, unconsumed_data):
known_att_names_set = set(klass.base_fields.keys())
available_atts_set = known_att_names_set.intersection(unconsumed_data)
if available_atts_set:
available_atts = dict()
for name in available_atts_set:
available_atts[name] = unconsumed_data[name]
del unconsumed_data[name]
return klass(**available_atts)
return None
unconsumed_data = payflowpro_response_data.copy()
# Parse the response data first
response = build_class(Response, unconsumed_data)
result_objects = [response]
# Parse the remaining data
for klass in object.__class__.__subclasses__(PayflowProObject):
obj = build_class(klass, unconsumed_data)
if obj:
result_objects.append(obj)
# Special handling of RecurringPayments
p_count = 1
payments = []
while ("p_result%d" % p_count) in unconsumed_data:
payments.append(RecurringPayment(
p_result = unconsumed_data.pop("p_result%d" % p_count, None),
p_pnref = unconsumed_data.pop("p_pnref%d" % p_count, None),
p_transtate = unconsumed_data.pop("p_transtate%d" % p_count, None),
p_tender = unconsumed_data.pop("p_tender%d" % p_count, None),
p_transtime = unconsumed_data.pop("p_transtime%d" % p_count, None),
p_amt = unconsumed_data.pop("p_amt%d" % p_count, None)))
p_count += 1
if payments:
result_objects.append(RecurringPayments(payments=payments))
return (result_objects, unconsumed_data,)
|
Parses a set of Payflow Pro response parameter name and value pairs into
a list of PayflowProObjects, and returns a tuple containing the object
list and a dictionary containing any unconsumed data.
The first item in the object list will always be the Response object, and
the RecurringPayments object (if any) will be last.
The presence of any unconsumed data in the resulting dictionary probably
indicates an error or oversight in the PayflowProObject definitions.
|
entailment
|
def convert(document, canvas, items=None, tounicode=None):
"""
Convert 'items' stored in 'canvas' to SVG 'document'.
If 'items' is None, then all items are convered.
tounicode is a function that get text and returns
it's unicode representation. It should be used when
national characters are used on canvas.
Return list of XML elements
"""
tk = canvas.tk
global segment
if items is None: # default: all items
items = canvas.find_all()
supported_item_types = \
set(["line", "oval", "polygon", "rectangle", "text", "arc"])
if tounicode is None:
try:
# python3
bytes
tounicode = lambda x: x
except NameError:
# python2
tounicode = lambda text: str(text).encode("utf-8")
elements = []
for item in items:
# skip unsupported items
itemtype = canvas.type(item)
if itemtype not in supported_item_types:
emit_warning("Items of type '%s' are not supported." % itemtype)
continue
# get item coords
coords = canvas.coords(item)
# get item options;
# options is a dict: opt. name -> opt. actual value
tmp = canvas.itemconfigure(item)
options = dict((v0, v4) for v0, v1, v2, v3, v4 in tmp.values())
# get state of item
state = options['state']
if 'current' in options['tags']:
options['state'] = ACTIVE
elif options['state'] == '':
options['state'] = 'normal'
else:
# left state unchanged
assert options['state'] in ['normal', DISABLED, 'hidden']
# skip hidden items
if options['state'] == 'hidden': continue
def get(name, default=""):
if state == ACTIVE and options.get(state + name):
return options.get(state + name)
if state == DISABLED and options.get(state + name):
return options.get(state + name)
if options.get(name):
return options.get(name)
else:
return default
if itemtype == 'line':
options['outline'] = ''
options['activeoutline'] = ''
options['disabledoutline'] = ''
elif itemtype == 'arc' and options['style'] == ARC:
options['fill'] = ''
options['activefill'] = ''
options['disabledfill'] = ''
style = {}
style["stroke"] = HTMLcolor(canvas, get("outline"))
if get("fill"):
style["fill"] = HTMLcolor(canvas, get("fill"))
else:
style["fill"] = "none"
width = float(options['width'])
if state == ACTIVE:
width = max(float(options['activewidth']), width)
elif state == DISABLED:
try:
disabledwidth = options['disabledwidth']
except KeyError:
# Text item might not have 'disabledwidth' option. This raises
# the exception in course of processing of such item.
# Default value is 0. Hence, it shall not affect width.
pass
else:
if float(disabledwidth) > 0:
width = disabledwidth
if width != 1.0:
style['stroke-width'] = width
if width:
dash = canvas.itemcget(item, 'dash')
if state == DISABLED and canvas.itemcget(item, 'disableddash'):
dash = canvas.itemcget(item, 'disableddash')
elif state == ACTIVE and canvas.itemcget(item, 'activedash'):
dash = canvas.itemcget(item, 'activedash')
if dash != '':
try:
dash = tuple(map(int, dash.split()))
except ValueError:
# int can't parse literal, dash defined with -.,_
linewidth = float(get('width'))
dash = parse_dash(dash, linewidth)
style['stroke-dasharray'] = ",".join(map(str, dash))
style['stroke-dashoffset'] = options['dashoffset']
if itemtype == 'line':
# in this case, outline is set with fill property
style["fill"], style["stroke"] = "none", style["fill"]
style['stroke-linecap'] = cap_style[options['capstyle']]
if options['smooth'] in ['1', 'bezier', 'true']:
element = smoothline(document, coords)
elif options['smooth'] == 'raw':
element = cubic_bezier(document, coords)
elif options['smooth'] == '0':
if len(coords) == 4:
# segment
element = segment(document, coords)
else:
# polyline
element = polyline(document, coords)
style['fill'] = "none"
style['stroke-linejoin'] = join_style[options['joinstyle']]
else:
emit_warning("Unknown smooth type: %s. Falling back to smooth=0" % options['smooth'])
element = polyline(coords)
style['stroke-linejoin'] = join_style[options['joinstyle']]
elements.append(element)
if options['arrow'] in [FIRST, BOTH]:
arrow = arrow_head(document, coords[2], coords[3], coords[0], coords[1], options['arrowshape'])
arrow.setAttribute('fill', style['stroke'])
elements.append(arrow)
if options['arrow'] in [LAST, BOTH]:
arrow = arrow_head(document, coords[-4], coords[-3], coords[-2], coords[-1], options['arrowshape'])
arrow.setAttribute('fill', style['stroke'])
elements.append(arrow)
elif itemtype == 'polygon':
if options['smooth'] in ['1', 'bezier', 'true']:
element = smoothpolygon(document, coords)
elif options['smooth'] == '0':
element = polygon(document, coords)
else:
emit_warning("Unknown smooth type: %s. Falling back to smooth=0" % options['smooth'])
element = polygon(document, coords)
elements.append(element)
style['fill-rule'] = 'evenodd'
style['stroke-linejoin'] = join_style[options['joinstyle']]
elif itemtype == 'oval':
element = oval(document, coords)
elements.append(element)
elif itemtype == 'rectangle':
element = rectangle(document, coords)
elements.append(element)
elif itemtype == 'arc':
element = arc(document, coords, options['start'], options['extent'], options['style'])
if options['style'] == ARC:
style['fill'] = "none"
elements.append(element)
elif itemtype == 'text':
style['stroke'] = '' # no stroke
# setup geometry
xmin, ymin, xmax, ymax = canvas.bbox(item)
x = coords[0]
# set y at 'dominant-baseline'
y = ymin + font_metrics(tk, options['font'], 'ascent')
element = setattribs(
document.createElement('text'),
x = x, y = y
)
elements.append(element)
element.appendChild(document.createTextNode(
tounicode(canvas.itemcget(item, 'text'))
))
# 2. Setup style
actual = font_actual(tk, options['font'])
style['fill'] = HTMLcolor(canvas, get('fill'))
style["text-anchor"] = text_anchor[options["anchor"]]
style['font-family'] = actual['family']
# size
size = float(actual['size'])
if size > 0: # size in points
style['font-size'] = "%spt" % size
else: # size in pixels
style['font-size'] = "%s" % (-size)
style['font-style'] = font_style[actual['slant']]
style['font-weight'] = font_weight[actual['weight']]
# overstrike/underline
if actual['overstrike'] and actual['underline']:
style['text-decoration'] = 'underline line-through'
elif actual['overstrike']:
style['text-decoration'] = 'line-through'
elif actual['underline']:
style['text-decoration'] = 'underline'
for attr, value in style.items():
if value != '': # create only nonempty attributes
element.setAttribute(attr, str(value))
return elements
|
Convert 'items' stored in 'canvas' to SVG 'document'.
If 'items' is None, then all items are convered.
tounicode is a function that get text and returns
it's unicode representation. It should be used when
national characters are used on canvas.
Return list of XML elements
|
entailment
|
def SVGdocument():
"Create default SVG document"
import xml.dom.minidom
implementation = xml.dom.minidom.getDOMImplementation()
doctype = implementation.createDocumentType(
"svg", "-//W3C//DTD SVG 1.1//EN",
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"
)
document= implementation.createDocument(None, "svg", doctype)
document.documentElement.setAttribute(
'xmlns', 'http://www.w3.org/2000/svg'
)
return document
|
Create default SVG document
|
entailment
|
def segment_to_line(document, coords):
"polyline with 2 vertices using <line> tag"
return setattribs(
document.createElement('line'),
x1 = coords[0],
y1 = coords[1],
x2 = coords[2],
y2 = coords[3],
)
|
polyline with 2 vertices using <line> tag
|
entailment
|
def polyline(document, coords):
"polyline with more then 2 vertices"
points = []
for i in range(0, len(coords), 2):
points.append("%s,%s" % (coords[i], coords[i+1]))
return setattribs(
document.createElement('polyline'),
points = ' '.join(points),
)
|
polyline with more then 2 vertices
|
entailment
|
def smoothline(document, coords):
"smoothed polyline"
element = document.createElement('path')
path = []
points = [(coords[i], coords[i+1]) for i in range(0, len(coords), 2)]
def pt(points):
x0, y0 = points[0]
x1, y1 = points[1]
p0 = (2*x0-x1, 2*y0-y1)
x0, y0 = points[-1]
x1, y1 = points[-2]
pn = (2*x0-x1, 2*y0-y1)
p = [p0] + points[1:-1] + [pn]
for i in range(1, len(points)-1):
a = p[i-1]
b = p[i]
c = p[i+1]
yield lerp(a, b, 0.5), b, lerp(b, c, 0.5)
for i, (A, B, C) in enumerate(pt(points)):
if i == 0:
path.append("M%s,%s Q%s,%s %s,%s" % (A[0], A[1], B[0], B[1], C[0], C[1]))
else:
path.append("T%s,%s" % (C[0], C[1]))
element.setAttribute('d', ' '.join(path))
return element
|
smoothed polyline
|
entailment
|
def cubic_bezier(document, coords):
"cubic bezier polyline"
element = document.createElement('path')
points = [(coords[i], coords[i+1]) for i in range(0, len(coords), 2)]
path = ["M%s %s" %points[0]]
for n in xrange(1, len(points), 3):
A, B, C = points[n:n+3]
path.append("C%s,%s %s,%s %s,%s" % (A[0], A[1], B[0], B[1], C[0], C[1]))
element.setAttribute('d', ' '.join(path))
return element
|
cubic bezier polyline
|
entailment
|
def smoothpolygon(document, coords):
"smoothed filled polygon"
element = document.createElement('path')
path = []
points = [(coords[i], coords[i+1]) for i in range(0, len(coords), 2)]
def pt(points):
p = points
n = len(points)
for i in range(0, len(points)):
a = p[(i-1) % n]
b = p[i]
c = p[(i+1) % n]
yield lerp(a, b, 0.5), b, lerp(b, c, 0.5)
for i, (A, B, C) in enumerate(pt(points)):
if i == 0:
path.append("M%s,%s Q%s,%s %s,%s" % (A[0], A[1], B[0], B[1], C[0], C[1]))
else:
path.append("T%s,%s" % (C[0], C[1]))
path.append("z")
element.setAttribute('d', ' '.join(path))
return element
|
smoothed filled polygon
|
entailment
|
def oval(document, coords):
"circle/ellipse"
x1, y1, x2, y2 = coords
# circle
if x2-x1 == y2-y1:
return setattribs(document.createElement('circle'),
cx = (x1+x2)/2,
cy = (y1+y2)/2,
r = abs(x2-x1)/2,
)
# ellipse
else:
return setattribs(document.createElement('ellipse'),
cx = (x1+x2)/2,
cy = (y1+y2)/2,
rx = abs(x2-x1)/2,
ry = abs(y2-y1)/2,
)
return element
|
circle/ellipse
|
entailment
|
def arc(document, bounding_rect, start, extent, style):
"arc, pieslice (filled), arc with chord (filled)"
(x1, y1, x2, y2) = bounding_rect
import math
cx = (x1 + x2)/2.0
cy = (y1 + y2)/2.0
rx = (x2 - x1)/2.0
ry = (y2 - y1)/2.0
start = math.radians(float(start))
extent = math.radians(float(extent))
# from SVG spec:
# http://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes
x1 = rx * math.cos(start) + cx
y1 = -ry * math.sin(start) + cy # XXX: ry is negated here
x2 = rx * math.cos(start + extent) + cx
y2 = -ry * math.sin(start + extent) + cy # XXX: ry is negated here
if abs(extent) > math.pi:
fa = 1
else:
fa = 0
if extent > 0.0:
fs = 0
else:
fs = 1
path = []
# common: arc
path.append('M%s,%s' % (x1, y1))
path.append('A%s,%s 0 %d %d %s,%s' % (rx, ry, fa, fs, x2, y2))
if style == ARC:
pass
elif style == CHORD:
path.append('z')
else: # default: pieslice
path.append('L%s,%s' % (cx, cy))
path.append('z')
return setattribs(document.createElement('path'), d = ''.join(path))
|
arc, pieslice (filled), arc with chord (filled)
|
entailment
|
def HTMLcolor(canvas, color):
"returns Tk color in form '#rrggbb' or '#rgb'"
if color:
# r, g, b \in [0..2**16]
r, g, b = ["%02x" % (c // 256) for c in canvas.winfo_rgb(color)]
if (r[0] == r[1]) and (g[0] == g[1]) and (b[0] == b[1]):
# shorter form #rgb
return "#" + r[0] + g[0] + b[0]
else:
return "#" + r + g + b
else:
return color
|
returns Tk color in form '#rrggbb' or '#rgb
|
entailment
|
def arrow_head(document, x0, y0, x1, y1, arrowshape):
"make arrow head at (x1,y1), arrowshape is tuple (d1, d2, d3)"
import math
dx = x1 - x0
dy = y1 - y0
poly = document.createElement('polygon')
d = math.sqrt(dx*dx + dy*dy)
if d == 0.0: # XXX: equal, no "close enough"
return poly
try:
d1, d2, d3 = list(map(float, arrowshape))
except ValueError:
d1, d2, d3 = map(float, arrowshape.split())
P0 = (x0, y0)
P1 = (x1, y1)
xa, ya = lerp(P1, P0, d1/d)
xb, yb = lerp(P1, P0, d2/d)
t = d3/d
xc, yc = dx*t, dy*t
points = [
x1, y1,
xb - yc, yb + xc,
xa, ya,
xb + yc, yb - xc,
]
poly.setAttribute('points', ' '.join(map(str, points)))
return poly
|
make arrow head at (x1,y1), arrowshape is tuple (d1, d2, d3)
|
entailment
|
def font_actual(tkapp, font):
"actual font parameters"
tmp = tkapp.call('font', 'actual', font)
return dict(
(tmp[i][1:], tmp[i+1]) for i in range(0, len(tmp), 2)
)
|
actual font parameters
|
entailment
|
def parse_dash(string, width):
"parse dash pattern specified with string"
# DashConvert from {tk-sources}/generic/tkCanvUtil.c
w = max(1, int(width + 0.5))
n = len(string)
result = []
for i, c in enumerate(string):
if c == " " and len(result):
result[-1] += w + 1
elif c == "_":
result.append(8*w)
result.append(4*w)
elif c == "-":
result.append(6*w)
result.append(4*w)
elif c == ",":
result.append(4*w)
result.append(4*w)
elif c == ".":
result.append(2*w)
result.append(4*w)
return result
|
parse dash pattern specified with string
|
entailment
|
def prof_altitude(pressure, p_coef=(-0.028389, -0.0493698, 0.485718, 0.278656,
-17.5703, 48.0926)):
"""
Return altitude for given pressure.
This function evaluates a polynomial at log10(pressure) values.
Parameters
----------
pressure : array-like
pressure values [hPa].
p_coef : array-like
coefficients of the polynomial (default values are for the US
Standard Atmosphere).
Returns
-------
altitude : array-like
altitude values [km] (same shape than the pressure input array).
See Also
--------
prof_pressure : Returns pressure for
given altitude.
prof_temperature : Returns air temperature for
given altitude.
Notes
-----
Default coefficient values represent a 5th degree polynomial which had
been fitted to USSA data from 0-100 km. Accuracy is on the order of 1% for
0-100 km and 0.5% below 30 km. This function, with default values, may thus
produce bad results with pressure less than about 3e-4 hPa.
Examples
--------
>>> prof_altitude([1000, 800, 600])
array([ 0.1065092 , 1.95627858, 4.2060627 ])
"""
pressure = np.asarray(pressure)
altitude = np.polyval(p_coef, np.log10(pressure.flatten()))
return altitude.reshape(pressure.shape)
|
Return altitude for given pressure.
This function evaluates a polynomial at log10(pressure) values.
Parameters
----------
pressure : array-like
pressure values [hPa].
p_coef : array-like
coefficients of the polynomial (default values are for the US
Standard Atmosphere).
Returns
-------
altitude : array-like
altitude values [km] (same shape than the pressure input array).
See Also
--------
prof_pressure : Returns pressure for
given altitude.
prof_temperature : Returns air temperature for
given altitude.
Notes
-----
Default coefficient values represent a 5th degree polynomial which had
been fitted to USSA data from 0-100 km. Accuracy is on the order of 1% for
0-100 km and 0.5% below 30 km. This function, with default values, may thus
produce bad results with pressure less than about 3e-4 hPa.
Examples
--------
>>> prof_altitude([1000, 800, 600])
array([ 0.1065092 , 1.95627858, 4.2060627 ])
|
entailment
|
def prof_pressure(altitude, z_coef=(1.94170e-9, -5.14580e-7, 4.57018e-5,
-1.55620e-3, -4.61994e-2, 2.99955)):
"""
Return pressure for given altitude.
This function evaluates a polynomial at altitudes values.
Parameters
----------
altitude : array-like
altitude values [km].
z_coef : array-like
coefficients of the polynomial (default values are for the US
Standard Atmosphere).
Returns
-------
pressure : array-like
pressure values [hPa] (same shape than the altitude input array).
See Also
--------
prof_altitude : Returns altitude for
given pressure.
prof_temperature : Returns air temperature for
given altitude.
Notes
-----
Default coefficient values represent a 5th degree polynomial which had
been fitted to USA data from 0-100 km. Accuracy is on the order of 1% for
0-100 km and 0.5% below 30 km. This function, with default values, may thus
produce bad results with altitude > 100 km.
Examples
--------
>>> prof_pressure([0, 10, 20])
array([ 998.96437334, 264.658697 , 55.28114631])
"""
altitude = np.asarray(altitude)
pressure = np.power(10, np.polyval(z_coef, altitude.flatten()))
return pressure.reshape(altitude.shape)
|
Return pressure for given altitude.
This function evaluates a polynomial at altitudes values.
Parameters
----------
altitude : array-like
altitude values [km].
z_coef : array-like
coefficients of the polynomial (default values are for the US
Standard Atmosphere).
Returns
-------
pressure : array-like
pressure values [hPa] (same shape than the altitude input array).
See Also
--------
prof_altitude : Returns altitude for
given pressure.
prof_temperature : Returns air temperature for
given altitude.
Notes
-----
Default coefficient values represent a 5th degree polynomial which had
been fitted to USA data from 0-100 km. Accuracy is on the order of 1% for
0-100 km and 0.5% below 30 km. This function, with default values, may thus
produce bad results with altitude > 100 km.
Examples
--------
>>> prof_pressure([0, 10, 20])
array([ 998.96437334, 264.658697 , 55.28114631])
|
entailment
|
def _find_references(model_name, references=None):
"""
Iterate over model references for `model_name`
and return a list of parent model specifications (including those of
`model_name`, ordered from parent to child).
"""
references = references or []
references.append(model_name)
ref = MODELS[model_name].get('reference')
if ref is not None:
_find_references(ref, references)
parent_models = [m for m in references]
parent_models.reverse()
return parent_models
|
Iterate over model references for `model_name`
and return a list of parent model specifications (including those of
`model_name`, ordered from parent to child).
|
entailment
|
def _get_model_info(model_name):
"""
Get the grid specifications for a given model.
Parameters
----------
model_name : string
Name of the model. Supports multiple formats
(e.g., 'GEOS5', 'GEOS-5' or 'GEOS_5').
Returns
-------
specifications : dict
Grid specifications as a dictionary.
Raises
------
ValueError
If the model is not supported (see `models`) or if the given
`model_name` corresponds to several entries in the list of
supported models.
"""
# trying to get as much as possible a valid model name from the given
# `model_name`, using regular expressions.
split_name = re.split(r'[\-_\s]', model_name.strip().upper())
sep_chars = ('', ' ', '-', '_')
gen_seps = itertools.combinations_with_replacement(
sep_chars, len(split_name) - 1
)
test_names = ("".join((n for n in itertools.chain(*list(zip(split_name,
s + ('',))))))
for s in gen_seps)
match_names = list([name for name in test_names if name
in _get_supported_models()])
if not len(match_names):
raise ValueError("Model '{0}' is not supported".format(model_name))
elif len(match_names) > 1:
raise ValueError("Multiple matched models for given model name '{0}'"
.format(model_name))
valid_model_name = match_names[0]
parent_models = _find_references(valid_model_name)
model_spec = dict()
for m in parent_models:
model_spec.update(MODELS[m])
model_spec.pop('reference')
model_spec['model_family'] = parent_models[0]
model_spec['model_name'] = valid_model_name
return model_spec
|
Get the grid specifications for a given model.
Parameters
----------
model_name : string
Name of the model. Supports multiple formats
(e.g., 'GEOS5', 'GEOS-5' or 'GEOS_5').
Returns
-------
specifications : dict
Grid specifications as a dictionary.
Raises
------
ValueError
If the model is not supported (see `models`) or if the given
`model_name` corresponds to several entries in the list of
supported models.
|
entailment
|
def _get_archive_filelist(filename):
# type: (str) -> List[str]
"""Extract the list of files from a tar or zip archive.
Args:
filename: name of the archive
Returns:
Sorted list of files in the archive, excluding './'
Raises:
ValueError: when the file is neither a zip nor a tar archive
FileNotFoundError: when the provided file does not exist (for Python 3)
IOError: when the provided file does not exist (for Python 2)
"""
names = [] # type: List[str]
if tarfile.is_tarfile(filename):
with tarfile.open(filename) as tar_file:
names = sorted(tar_file.getnames())
elif zipfile.is_zipfile(filename):
with zipfile.ZipFile(filename) as zip_file:
names = sorted(zip_file.namelist())
else:
raise ValueError("Can not get filenames from '{!s}'. "
"Not a tar or zip file".format(filename))
if "./" in names:
names.remove("./")
return names
|
Extract the list of files from a tar or zip archive.
Args:
filename: name of the archive
Returns:
Sorted list of files in the archive, excluding './'
Raises:
ValueError: when the file is neither a zip nor a tar archive
FileNotFoundError: when the provided file does not exist (for Python 3)
IOError: when the provided file does not exist (for Python 2)
|
entailment
|
def _augment_book(self, uuid, event):
"""
Checks if the newly created object is a book and only has an ISBN.
If so, tries to fetch the book data off the internet.
:param uuid: uuid of book to augment
:param client: requesting client
"""
try:
if not isbnmeta:
self.log(
"No isbntools found! Install it to get full "
"functionality!",
lvl=warn)
return
new_book = objectmodels['book'].find_one({'uuid': uuid})
try:
if len(new_book.isbn) != 0:
self.log('Got a lookup candidate: ', new_book._fields)
try:
meta = isbnmeta(
new_book.isbn,
service=self.config.isbnservice
)
mapping = libraryfieldmapping[
self.config.isbnservice
]
new_meta = {}
for key in meta.keys():
if key in mapping:
if isinstance(mapping[key], tuple):
name, conv = mapping[key]
try:
new_meta[name] = conv(meta[key])
except ValueError:
self.log(
'Bad value from lookup:',
name, conv, key
)
else:
new_meta[mapping[key]] = meta[key]
new_book.update(new_meta)
new_book.save()
self._notify_result(event, new_book)
self.log("Book successfully augmented from ",
self.config.isbnservice)
except Exception as e:
self.log("Error during meta lookup: ", e, type(e),
new_book.isbn, lvl=error, exc=True)
error_response = {
'component': 'hfos.alert.manager',
'action': 'notify',
'data': {
'type': 'error',
'message': 'Could not look up metadata, sorry:' + str(e)
}
}
self.log(event, event.client, pretty=True)
self.fireEvent(send(event.client.uuid, error_response))
except Exception as e:
self.log("Error during book update.", e, type(e),
exc=True, lvl=error)
except Exception as e:
self.log("Book creation notification error: ", uuid, e, type(e),
lvl=error, exc=True)
|
Checks if the newly created object is a book and only has an ISBN.
If so, tries to fetch the book data off the internet.
:param uuid: uuid of book to augment
:param client: requesting client
|
entailment
|
def serial_ports():
""" Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
Courtesy: Thomas ( http://stackoverflow.com/questions/12090503
/listing-available-com-ports-with-python )
"""
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
|
Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
Courtesy: Thomas ( http://stackoverflow.com/questions/12090503
/listing-available-com-ports-with-python )
|
entailment
|
def opened(self, *args):
"""Initiates communication with the remote controlled device.
:param args:
"""
self._serial_open = True
self.log("Opened: ", args, lvl=debug)
self._send_command(b'l,1') # Saying hello, shortly
self.log("Turning off engine, pump and neutralizing rudder")
self._send_command(b'v')
self._handle_servo(self._machine_channel, 0)
self._handle_servo(self._rudder_channel, 127)
self._set_digital_pin(self._pump_channel, 0)
# self._send_command(b'h')
self._send_command(b'l,0')
self._send_command(b'm,HFOS Control')
|
Initiates communication with the remote controlled device.
:param args:
|
entailment
|
def on_machinerequest(self, event):
"""
Sets a new machine speed.
:param event:
"""
self.log("Updating new machine power: ", event.controlvalue)
self._handle_servo(self._machine_channel, event.controlvalue)
|
Sets a new machine speed.
:param event:
|
entailment
|
def on_rudderrequest(self, event):
"""
Sets a new rudder angle.
:param event:
"""
self.log("Updating new rudder angle: ", event.controlvalue)
self._handle_servo(self._rudder_channel, event.controlvalue)
|
Sets a new rudder angle.
:param event:
|
entailment
|
def on_pumprequest(self, event):
"""
Activates or deactivates a connected pump.
:param event:
"""
self.log("Updating pump status: ", event.controlvalue)
self._set_digital_pin(self._pump_channel, event.controlvalue)
|
Activates or deactivates a connected pump.
:param event:
|
entailment
|
def provisionList(items, database_name, overwrite=False, clear=False, skip_user_check=False):
"""Provisions a list of items according to their schema
:param items: A list of provisionable items.
:param database_object: A warmongo database object
:param overwrite: Causes existing items to be overwritten
:param clear: Clears the collection first (Danger!)
:param skip_user_check: Skips checking if a system user is existing already (for user provisioning)
:return:
"""
log('Provisioning', items, database_name, lvl=debug)
system_user = None
def get_system_user():
"""Retrieves the node local system user"""
user = objectmodels['user'].find_one({'name': 'System'})
try:
log('System user uuid: ', user.uuid, lvl=verbose)
return user.uuid
except AttributeError as e:
log('No system user found:', e, lvl=warn)
log('Please install the user provision to setup a system user or check your database configuration',
lvl=error)
return False
# TODO: Do not check this on specific objects but on the model (i.e. once)
def needs_owner(obj):
"""Determines whether a basic object has an ownership field"""
for privilege in obj._fields.get('perms', None):
if 'owner' in obj._fields['perms'][privilege]:
return True
return False
import pymongo
from hfos.database import objectmodels, dbhost, dbport, dbname
database_object = objectmodels[database_name]
log(dbhost, dbname)
# TODO: Fix this to make use of the dbhost
client = pymongo.MongoClient(dbhost, dbport)
db = client[dbname]
if not skip_user_check:
system_user = get_system_user()
if not system_user:
return
else:
# TODO: Evaluate what to do instead of using a hardcoded UUID
# This is usually only here for provisioning the system user
# One way to avoid this, is to create (instead of provision)
# this one upon system installation.
system_user = '0ba87daa-d315-462e-9f2e-6091d768fd36'
col_name = database_object.collection_name()
if clear is True:
log("Clearing collection for", col_name, lvl=warn)
db.drop_collection(col_name)
counter = 0
for no, item in enumerate(items):
new_object = None
item_uuid = item['uuid']
log("Validating object (%i/%i):" % (no + 1, len(items)), item_uuid, lvl=debug)
if database_object.count({'uuid': item_uuid}) > 0:
log('Object already present', lvl=warn)
if overwrite is False:
log("Not updating item", item, lvl=warn)
else:
log("Overwriting item: ", item_uuid, lvl=warn)
new_object = database_object.find_one({'uuid': item_uuid})
new_object._fields.update(item)
else:
new_object = database_object(item)
if new_object is not None:
try:
if needs_owner(new_object):
if not hasattr(new_object, 'owner'):
log('Adding system owner to object.', lvl=verbose)
new_object.owner = system_user
except Exception as e:
log('Error during ownership test:', e, type(e),
exc=True, lvl=error)
try:
new_object.validate()
new_object.save()
counter += 1
except ValidationError as e:
raise ValidationError(
"Could not provision object: " + str(item_uuid), e)
log("Provisioned %i out of %i items successfully." % (counter, len(items)))
|
Provisions a list of items according to their schema
:param items: A list of provisionable items.
:param database_object: A warmongo database object
:param overwrite: Causes existing items to be overwritten
:param clear: Clears the collection first (Danger!)
:param skip_user_check: Skips checking if a system user is existing already (for user provisioning)
:return:
|
entailment
|
def DefaultExtension(schema_obj, form_obj, schemata=None):
"""Create a default field"""
if schemata is None:
schemata = ['systemconfig', 'profile', 'client']
DefaultExtends = {
'schema': {
"properties/modules": [
schema_obj
]
},
'form': {
'modules': {
'items/': form_obj
}
}
}
output = {}
for schema in schemata:
output[schema] = DefaultExtends
return output
|
Create a default field
|
entailment
|
def copytree(root_src_dir, root_dst_dir, hardlink=True):
"""Copies a whole directory tree"""
for src_dir, dirs, files in os.walk(root_src_dir):
dst_dir = src_dir.replace(root_src_dir, root_dst_dir, 1)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
for file_ in files:
src_file = os.path.join(src_dir, file_)
dst_file = os.path.join(dst_dir, file_)
try:
if os.path.exists(dst_file):
if hardlink:
hfoslog('Removing frontend link:', dst_file,
emitter='BUILDER', lvl=verbose)
os.remove(dst_file)
else:
hfoslog('Overwriting frontend file:', dst_file,
emitter='BUILDER', lvl=verbose)
hfoslog('Hardlinking ', src_file, dst_dir, emitter='BUILDER',
lvl=verbose)
if hardlink:
os.link(src_file, dst_file)
else:
copy(src_file, dst_dir)
except PermissionError as e:
hfoslog(
" No permission to remove/create target %s for "
"frontend:" % ('link' if hardlink else 'copy'),
dst_dir, e, emitter='BUILDER', lvl=error)
except Exception as e:
hfoslog("Error during", 'link' if hardlink else 'copy',
"creation:", type(e), e, emitter='BUILDER',
lvl=error)
hfoslog('Done linking', root_dst_dir, emitter='BUILDER',
lvl=verbose)
|
Copies a whole directory tree
|
entailment
|
def install_frontend(instance='default', forcereload=False, forcerebuild=False,
forcecopy=True, install=True, development=False, build_type='dist'):
"""Builds and installs the frontend"""
hfoslog("Updating frontend components", emitter='BUILDER')
components = {}
loadable_components = {}
# TODO: Fix this up, it is probably not a sane way to get at the real root
if development:
frontendroot = os.path.abspath(os.path.dirname(os.path.realpath(
__file__)) + "../../../frontend")
else:
frontendroot = '/opt/hfos/frontend'
frontendtarget = os.path.join('/var/lib/hfos', instance, 'frontend')
if install:
cmdline = ["npm", "install"]
hfoslog("Running", cmdline, lvl=verbose,
emitter='BUILDER')
npminstall = Popen(cmdline, cwd=frontendroot)
out, err = npminstall.communicate()
npminstall.wait()
hfoslog("Frontend dependency installing done: ", out,
err, lvl=debug, emitter='BUILDER')
if True: # try:
from pkg_resources import iter_entry_points
entry_point_tuple = (
iter_entry_points(group='hfos.base', name=None),
iter_entry_points(group='hfos.sails', name=None),
iter_entry_points(group='hfos.components', name=None)
)
for iterator in entry_point_tuple:
for entry_point in iterator:
try:
name = entry_point.name
location = entry_point.dist.location
loaded = entry_point.load()
hfoslog("Entry point: ", entry_point,
name,
entry_point.resolve().__module__, lvl=debug,
emitter='BUILDER')
component_name = entry_point.resolve().__module__.split('.')[1]
hfoslog("Loaded: ", loaded, lvl=verbose, emitter='BUILDER')
comp = {
'location': location,
'version': str(entry_point.dist.parsed_version),
'description': loaded.__doc__
}
frontend = os.path.join(location, 'frontend')
hfoslog("Checking component frontend parts: ",
frontend, lvl=verbose, emitter='BUILDER')
if os.path.isdir(
frontend) and frontend != frontendroot:
comp['frontend'] = frontend
else:
hfoslog("Component without frontend "
"directory:", comp, lvl=debug,
emitter='BUILDER')
components[component_name] = comp
loadable_components[component_name] = loaded
hfoslog("Loaded component:", comp, lvl=verbose,
emitter='BUILDER')
except Exception as e:
hfoslog("Could not inspect entrypoint: ", e,
type(e), entry_point, iterator, lvl=error,
exc=True, emitter='BUILDER')
# except Exception as e:
# hfoslog("Error: ", e, type(e), lvl=error, exc=True, emitter='BUILDER')
# return
hfoslog('Components after lookup:', sorted(list(components.keys())), emitter='BUILDER')
def _update_frontends(install=True):
hfoslog("Checking unique frontend locations: ",
loadable_components, lvl=debug, emitter='BUILDER')
importlines = []
modules = []
for name, component in components.items():
if 'frontend' in component:
origin = component['frontend']
target = os.path.join(frontendroot, 'src', 'components',
name)
target = os.path.normpath(target)
if install:
reqfile = os.path.join(origin, 'requirements.txt')
if os.path.exists(reqfile):
# TODO: Speed this up by collecting deps first then doing one single install call
hfoslog("Installing package dependencies", lvl=debug,
emitter='BUILDER')
with open(reqfile, 'r') as f:
cmdline = ["npm", "install"]
for line in f.readlines():
cmdline.append(line.replace("\n", ""))
hfoslog("Running", cmdline, lvl=verbose,
emitter='BUILDER')
npminstall = Popen(cmdline, cwd=frontendroot)
out, err = npminstall.communicate()
npminstall.wait()
hfoslog("Frontend installing done: ", out,
err, lvl=debug, emitter='BUILDER')
# if target in ('/', '/boot', '/usr', '/home', '/root',
# '/var'):
# hfoslog("Unsafe frontend deletion target path, "
# "NOT proceeding! ", target, lvl=critical,
# emitter='BUILDER')
hfoslog("Copying:", origin, target, lvl=debug,
emitter='BUILDER')
copytree(origin, target)
for modulefilename in glob(target + '/*.module.js'):
modulename = os.path.basename(modulefilename).split(
".module.js")[0]
line = u"import {s} from './components/{p}/{" \
u"s}.module';\nmodules.push({s});\n".format(
s=modulename, p=name)
if modulename not in modules:
importlines += line
modules.append(modulename)
else:
hfoslog("Module without frontend:", name, component,
lvl=debug, emitter='BUILDER')
with open(os.path.join(frontendroot, 'src', 'main.tpl.js'),
"r") as f:
main = "".join(f.readlines())
parts = main.split("/* COMPONENT SECTION */")
if len(parts) != 3:
hfoslog("Frontend loader seems damaged! Please check!",
lvl=critical, emitter='BUILDER')
return
try:
with open(os.path.join(frontendroot, 'src', 'main.js'),
"w") as f:
f.write(parts[0])
f.write("/* COMPONENT SECTION:BEGIN */\n")
for line in importlines:
f.write(line)
f.write("/* COMPONENT SECTION:END */\n")
f.write(parts[2])
except Exception as e:
hfoslog("Error during frontend package info writing. Check "
"permissions! ", e, lvl=error, emitter='BUILDER')
def _rebuild_frontend():
hfoslog("Starting frontend build.", lvl=warn, emitter='BUILDER')
npmbuild = Popen(["npm", "run", build_type], cwd=frontendroot)
out, err = npmbuild.communicate()
try:
npmbuild.wait()
except Exception as e:
hfoslog("Error during frontend build", e, type(e),
exc=True, lvl=error, emitter='BUILDER')
return
hfoslog("Frontend build done: ", out, err, lvl=debug, emitter='BUILDER')
copytree(os.path.join(frontendroot, build_type),
frontendtarget, hardlink=False)
copytree(os.path.join(frontendroot, 'assets'),
os.path.join(frontendtarget, 'assets'),
hardlink=False)
hfoslog("Frontend deployed", emitter='BUILDER')
hfoslog("Checking component frontend bits in ", frontendroot,
lvl=verbose, emitter='BUILDER')
_update_frontends(install=install)
if forcerebuild:
_rebuild_frontend()
hfoslog("Done: Install Frontend", emitter='BUILDER')
|
Builds and installs the frontend
|
entailment
|
def config(ctx):
"""[GROUP] Configuration management operations"""
from hfos import database
database.initialize(ctx.obj['dbhost'], ctx.obj['dbname'])
from hfos.schemata.component import ComponentConfigSchemaTemplate
ctx.obj['col'] = model_factory(ComponentConfigSchemaTemplate)
|
[GROUP] Configuration management operations
|
entailment
|
def delete(ctx, componentname):
"""Delete an existing component configuration. This will trigger
the creation of its default configuration upon next restart."""
col = ctx.obj['col']
if col.count({'name': componentname}) > 1:
log('More than one component configuration of this name! Try '
'one of the uuids as argument. Get a list with "config '
'list"')
return
log('Deleting component configuration', componentname,
emitter='MANAGE')
configuration = col.find_one({'name': componentname})
if configuration is None:
configuration = col.find_one({'uuid': componentname})
if configuration is None:
log('Component configuration not found:', componentname,
emitter='MANAGE')
return
configuration.delete()
log('Done')
|
Delete an existing component configuration. This will trigger
the creation of its default configuration upon next restart.
|
entailment
|
def show(ctx, component):
"""Show the stored, active configuration of a component."""
col = ctx.obj['col']
if col.count({'name': component}) > 1:
log('More than one component configuration of this name! Try '
'one of the uuids as argument. Get a list with "config '
'list"')
return
if component is None:
configurations = col.find()
for configuration in configurations:
log("%-15s : %s" % (configuration.name,
configuration.uuid),
emitter='MANAGE')
else:
configuration = col.find_one({'name': component})
if configuration is None:
configuration = col.find_one({'uuid': component})
if configuration is None:
log('No component with that name or uuid found.')
return
print(json.dumps(configuration.serializablefields(), indent=4))
|
Show the stored, active configuration of a component.
|
entailment
|
def separate_string(string):
"""
>>> separate_string("test <2>")
(['test ', ''], ['2'])
"""
string_list = regex.split(r'<(?![!=])', regex.sub(r'>', '<', string))
return string_list[::2], string_list[1::2]
|
>>> separate_string("test <2>")
(['test ', ''], ['2'])
|
entailment
|
def overlapping(start1, end1, start2, end2):
"""
>>> overlapping(0, 5, 6, 7)
False
>>> overlapping(1, 2, 0, 4)
True
>>> overlapping(5,6,0,5)
False
"""
return not ((start1 <= start2 and start1 <= end2 and end1 <= end2 and end1 <= start2) or
(start1 >= start2 and start1 >= end2 and end1 >= end2 and end1 >= start2))
|
>>> overlapping(0, 5, 6, 7)
False
>>> overlapping(1, 2, 0, 4)
True
>>> overlapping(5,6,0,5)
False
|
entailment
|
def remove_lower_overlapping(current, higher):
"""
>>> remove_lower_overlapping([], [('a', 0, 5)])
[('a', 0, 5)]
>>> remove_lower_overlapping([('z', 0, 4)], [('a', 0, 5)])
[('a', 0, 5)]
>>> remove_lower_overlapping([('z', 5, 6)], [('a', 0, 5)])
[('z', 5, 6), ('a', 0, 5)]
"""
for (match, h_start, h_end) in higher:
overlaps = list(overlapping_at(h_start, h_end, current))
for overlap in overlaps:
del current[overlap]
if len(overlaps) > 0:
# Keeps order in place
current.insert(overlaps[0], (match, h_start, h_end))
else:
current.append((match, h_start, h_end))
return current
|
>>> remove_lower_overlapping([], [('a', 0, 5)])
[('a', 0, 5)]
>>> remove_lower_overlapping([('z', 0, 4)], [('a', 0, 5)])
[('a', 0, 5)]
>>> remove_lower_overlapping([('z', 5, 6)], [('a', 0, 5)])
[('z', 5, 6), ('a', 0, 5)]
|
entailment
|
def debugrequest(self, event):
"""Handler for client-side debug requests"""
try:
self.log("Event: ", event.__dict__, lvl=critical)
if event.data == "storejson":
self.log("Storing received object to /tmp", lvl=critical)
fp = open('/tmp/hfosdebugger_' + str(
event.user.useruuid) + "_" + str(uuid4()), "w")
json.dump(event.data, fp, indent=True)
fp.close()
if event.data == "memdebug":
self.log("Memory hogs:", lvl=critical)
objgraph.show_most_common_types(limit=20)
if event.data == "growth":
self.log("Memory growth since last call:", lvl=critical)
objgraph.show_growth()
if event.data == "graph":
self._drawgraph()
if event.data == "exception":
class TestException(BaseException):
"""Generic exception to test exception monitoring"""
pass
raise TestException
if event.data == "heap":
self.log("Heap log:", self.heapy.heap(), lvl=critical)
if event.data == "buildfrontend":
self.log("Sending frontend build command")
self.fireEvent(frontendbuildrequest(force=True), "setup")
if event.data == "logtail":
self.fireEvent(logtailrequest(event.user, None, None,
event.client), "logger")
if event.data == "trigger_anchorwatch":
from hfos.anchor.anchorwatcher import cli_trigger_anchorwatch
self.fireEvent(cli_trigger_anchorwatch())
except Exception as e:
self.log("Exception during debug handling:", e, type(e),
lvl=critical)
|
Handler for client-side debug requests
|
entailment
|
def stdin_read(self, data):
"""read Event (on channel ``stdin``)
This is the event handler for ``read`` events specifically from the
``stdin`` channel. This is triggered each time stdin has data that
it has read.
"""
data = data.strip().decode("utf-8")
self.log("Incoming:", data, lvl=verbose)
if len(data) == 0:
self.log('Use /help to get a list of enabled cli hooks')
return
if data[0] == "/":
cmd = data[1:]
args = []
if ' ' in cmd:
cmd, args = cmd.split(' ', maxsplit=1)
args = args.split(' ')
if cmd in self.hooks:
self.log('Firing hooked event:', cmd, args, lvl=debug)
self.fireEvent(self.hooks[cmd](*args))
# TODO: Move these out, so we get a simple logic here
elif cmd == 'frontend':
self.log("Sending %s frontend rebuild event" %
("(forced)" if 'force' in args else ''))
self.fireEvent(
frontendbuildrequest(force='force' in args,
install='install' in args),
"setup")
elif cmd == 'backend':
self.log("Sending backend reload event")
self.fireEvent(componentupdaterequest(force=False), "setup")
else:
self.log('Unknown Command:', cmd, '. Use /help to get a list of enabled '
'cli hooks')
|
read Event (on channel ``stdin``)
This is the event handler for ``read`` events specifically from the
``stdin`` channel. This is triggered each time stdin has data that
it has read.
|
entailment
|
def register_event(self, event):
"""Registers a new command line interface event hook as command"""
self.log('Registering event hook:', event.cmd, event.thing,
pretty=True, lvl=verbose)
self.hooks[event.cmd] = event.thing
|
Registers a new command line interface event hook as command
|
entailment
|
def populate_user_events():
"""Generate a list of all registered authorized and anonymous events"""
global AuthorizedEvents
global AnonymousEvents
def inheritors(klass):
"""Find inheritors of a specified object class"""
subclasses = {}
subclasses_set = set()
work = [klass]
while work:
parent = work.pop()
for child in parent.__subclasses__():
if child not in subclasses_set:
# pprint(child.__dict__)
name = child.__module__ + "." + child.__name__
if name.startswith('hfos'):
subclasses_set.add(child)
event = {
'event': child,
'name': name,
'doc': child.__doc__,
'args': []
}
if child.__module__ in subclasses:
subclasses[child.__module__][
child.__name__] = event
else:
subclasses[child.__module__] = {
child.__name__: event
}
work.append(child)
return subclasses
# TODO: Change event system again, to catch authorized (i.e. "user") as
# well as normal events, so they can be processed by Automat
# NormalEvents = inheritors(Event)
AuthorizedEvents = inheritors(authorizedevent)
AnonymousEvents = inheritors(anonymousevent)
|
Generate a list of all registered authorized and anonymous events
|
entailment
|
def db(ctx):
"""[GROUP] Database management operations"""
from hfos import database
database.initialize(ctx.obj['dbhost'], ctx.obj['dbname'])
ctx.obj['db'] = database
|
[GROUP] Database management operations
|
entailment
|
def clear(ctx, schema):
"""Clears an entire database collection irrevocably. Use with caution!"""
response = _ask('Are you sure you want to delete the collection "%s"' % (
schema), default='N', data_type='bool')
if response is True:
host, port = ctx.obj['dbhost'].split(':')
client = pymongo.MongoClient(host=host, port=int(port))
database = client[ctx.obj['dbname']]
log("Clearing collection for", schema, lvl=warn,
emitter='MANAGE')
result = database.drop_collection(schema)
if not result['ok']:
log("Could not drop collection:", lvl=error)
log(result, pretty=True, lvl=error)
else:
log("Done")
|
Clears an entire database collection irrevocably. Use with caution!
|
entailment
|
def provision_system_config(items, database_name, overwrite=False, clear=False, skip_user_check=False):
"""Provision a basic system configuration"""
from hfos.provisions.base import provisionList
from hfos.database import objectmodels
default_system_config_count = objectmodels['systemconfig'].count({
'name': 'Default System Configuration'})
if default_system_config_count == 0 or (clear or overwrite):
provisionList([SystemConfiguration], 'systemconfig', overwrite, clear, skip_user_check)
hfoslog('Provisioning: System: Done.', emitter='PROVISIONS')
else:
hfoslog('Default system configuration already present.', lvl=warn,
emitter='PROVISIONS')
|
Provision a basic system configuration
|
entailment
|
def ICALImporter(ctx, filename, all, owner, calendar, create_calendar, clear_calendar, dry, execfilter):
"""Calendar Importer for iCal (ics) files
"""
log('iCal importer running')
objectmodels = ctx.obj['db'].objectmodels
if objectmodels['user'].count({'name': owner}) > 0:
owner_object = objectmodels['user'].find_one({'name': owner})
elif objectmodels['user'].count({'uuid': owner}) > 0:
owner_object = objectmodels['user'].find_one({'uuid': owner})
else:
log('User unknown. Specify either uuid or name.', lvl=warn)
return
log('Found user')
if objectmodels['calendar'].count({'name': calendar}) > 0:
calendar = objectmodels['calendar'].find_one({'name': calendar})
elif objectmodels['calendar'].count({'uuid': owner}) > 0:
calendar = objectmodels['calendar'].find_one({'uuid': calendar})
elif create_calendar:
calendar = objectmodels['calendar']({
'uuid': std_uuid(),
'name': calendar
})
else:
log('Calendar unknown and no --create-calendar specified. Specify either uuid or name of an existing calendar.',
lvl=warn)
return
log('Found calendar')
if clear_calendar is True:
log('Clearing calendar events')
for item in objectmodels['event'].find({'calendar': calendar.uuid}):
item.delete()
with open(filename, 'rb') as file_object:
caldata = Calendar.from_ical(file_object.read())
keys = {
'class': 'str',
'created': 'dt',
'description': 'str',
'dtstart': 'dt',
'dtend': 'dt',
'timestamp': 'dt',
'modified': 'dt',
'location': 'str',
'status': 'str',
'summary': 'str',
'uid': 'str'
}
mapping = {
'description': 'summary',
'summary': 'name'
}
imports = []
def ical_import_filter(original, logfacilty):
log('Passthrough filter')
return original
if execfilter is not None:
import os
textFilePath = os.path.abspath(os.path.join(os.path.curdir, execfilter))
textFileFolder = os.path.dirname(textFilePath)
from importlib.machinery import SourceFileLoader
filter_module = SourceFileLoader("importfilter", textFilePath).load_module()
ical_import_filter = filter_module.ical_import_filter
for event in caldata.walk():
if event.name == 'VEVENT':
log(event, lvl=verbose, pretty=True)
initializer = {
'uuid': std_uuid(),
'calendar': calendar.uuid,
}
for item in keys:
thing = event.get(item, None)
if thing is None:
thing = 'NO-' + item
else:
if keys[item] == 'str':
thing = str(thing)
else:
thing = parser.parse(str(thing.dt))
thing = thing.isoformat()
if item in mapping:
item_assignment = mapping[item]
else:
item_assignment = item
initializer[item_assignment] = thing
new_event = objectmodels['event'](initializer)
new_event = ical_import_filter(new_event, log)
imports.append(new_event)
log(new_event, lvl=debug)
for ev in imports:
log(ev.summary)
if not dry:
log('Bulk creating events')
objectmodels['event'].bulk_create(imports)
calendar.save()
else:
log('Dry run - nothing stored.', lvl=warn)
|
Calendar Importer for iCal (ics) files
|
entailment
|
def make_migrations(schema=None):
"""Create migration data for a specified schema"""
entrypoints = {}
old = {}
def apply_migrations(migrations, new_model):
"""Apply migration data to compile an up to date model"""
def get_path(raw_path):
"""Get local path of schema definition"""
print("RAW PATH:", raw_path, type(raw_path))
path = []
for item in raw_path.split("["):
print(item)
item = item.rstrip("]")
item = item.replace('"', '')
item = item.replace("'", '')
try:
item = int(item)
except ValueError:
pass
path.append(item)
path.remove('root')
print("PATH:", path)
return path
def apply_entry(changetype, change, result):
"""Upgrade with a single migration"""
def apply_removes(removes, result):
"""Delete removed fields"""
for remove in removes:
path = get_path(remove)
amount = dpath.util.delete(result, path)
assert amount == 1
return result
def apply_additions(additions, result):
"""Add newly added fields"""
for addition in additions:
path = get_path(addition)
entry = additions[addition]
hfoslog('Adding:', entry, 'at', path)
dpath.util.new(result, path, entry)
return result
if changetype == 'type_changes':
hfoslog('Creating new object')
result = change['root']['new_value']
return result
if changetype == 'dictionary_item_added':
hfoslog('Adding items')
result = apply_additions(change, result)
elif changetype == 'dictionary_item_removed':
hfoslog('Removing items')
result = apply_removes(change, result)
elif changetype == 'values_changed':
hfoslog("Changing items' types")
for item in change:
path = get_path(item)
hfoslog('Changing', path, 'from',
change[item]['old_value'], ' to',
change[item]['new_value'])
assert dpath.util.get(result, path) == change[item][
'old_value']
amount = dpath.util.set(result, path, change[item][
'new_value'])
assert amount == 1
return result
def get_renames(migrations):
"""Check migrations for renamed fields"""
hfoslog('Checking for rename operations:')
pprint(migrations)
for entry in migrations:
added = entry.get('dictionary_item_added', None)
removed = entry.get('dictionary_item_removed', None)
renames = []
if added and removed:
for addition in added:
path = get_path(addition)
for removal in removed:
removed_path = get_path(removal)
if path[:-1] == removed_path[:-1]:
hfoslog('Possible rename detected:', removal, '->',
addition)
renames.append((removed_path, path))
return renames
result = {}
for no, migration in enumerate(migrations):
hfoslog('Migrating', no)
hfoslog('Migration:', migration, lvl=debug)
renamed = get_renames(migrations)
for entry in migration:
result = apply_entry(entry, migration[entry], result)
pprint(result)
return result
def write_migration(schema, counter, path, previous, current):
"""Write out complete migration data"""
filename = "%s_%04i.json" % (schema, counter)
migration = DeepDiff(previous, current, verbose_level=2).json
if migration == "{}":
hfoslog('Nothing changed - no new migration data.', lvl=warn)
return
print('Writing migration: ', os.path.join(path, filename))
pprint(migration)
with open(os.path.join(path, filename), 'w') as f:
f.write(migration)
for schema_entrypoint in iter_entry_points(group='hfos.schemata',
name=None):
try:
hfoslog("Schemata found: ", schema_entrypoint.name, lvl=debug,
emitter='DB')
if schema is not None and schema_entrypoint.name != schema:
continue
entrypoints[schema_entrypoint.name] = schema_entrypoint
pprint(schema_entrypoint.dist.location)
schema_top = schema_entrypoint.dist.location
schema_migrations = schema_entrypoint.module_name.replace(
'schemata', 'migrations').replace('.', '/')
path = os.path.join(schema_top, schema_migrations)
new_model = schema_entrypoint.load()['schema']
migrations = []
try:
for file in sorted(os.listdir(path)):
if not file.endswith('.json'):
continue
fullpath = os.path.join(path, file)
hfoslog('Importing migration', fullpath)
with open(fullpath, 'r') as f:
migration = DeepDiff.from_json(f.read())
migrations.append(migration)
hfoslog('Successfully imported')
if len(migrations) == 0:
raise ImportError
pprint(migrations)
model = apply_migrations(migrations, new_model)
write_migration(schema, len(migrations) + 1, path, model,
new_model)
except ImportError as e:
hfoslog('No previous migrations for', schema, e,
type(e), exc=True)
if len(migrations) == 0:
write_migration(schema, 1, path, None, new_model)
except (ImportError, DistributionNotFound) as e:
hfoslog("Problematic schema: ", e, type(e),
schema_entrypoint.name, exc=True, lvl=warn,
emitter='SCHEMATA')
hfoslog("Found schemata: ", sorted(entrypoints.keys()), lvl=debug,
emitter='SCHEMATA')
pprint(entrypoints)
def make_single_migration(old, new):
pass
|
Create migration data for a specified schema
|
entailment
|
def userlogin(self, event):
"""Provides the newly authenticated user with a backlog and general
channel status information"""
try:
user_uuid = event.useruuid
user = objectmodels['user'].find_one({'uuid': user_uuid})
if user_uuid not in self.lastlogs:
self.log('Setting up lastlog for a new user.', lvl=debug)
lastlog = objectmodels['chatlastlog']({
'owner': user_uuid,
'uuid': std_uuid(),
'channels': {}
})
lastlog.save()
self.lastlogs[user_uuid] = lastlog
self.users[user_uuid] = user
self.user_attention[user_uuid] = None
self._send_status(user_uuid, event.clientuuid)
except Exception as e:
self.log('Error during chat setup of user:', e, type(e), exc=True)
|
Provides the newly authenticated user with a backlog and general
channel status information
|
entailment
|
def join(self, event):
"""Chat event handler for incoming events
:param event: say-event with incoming chat message
"""
try:
channel_uuid = event.data
user_uuid = event.user.uuid
if channel_uuid in self.chat_channels:
self.log('User joins a known channel', lvl=debug)
if user_uuid in self.chat_channels[channel_uuid].users:
self.log('User already joined', lvl=warn)
else:
self.chat_channels[channel_uuid].users.append(user_uuid)
self.chat_channels[channel_uuid].save()
packet = {
'component': 'hfos.chat.host',
'action': 'join',
'data': channel_uuid
}
self.fireEvent(send(event.client.uuid, packet))
else:
self.log('Request to join unavailable channel', lvl=warn)
except Exception as e:
self.log('Join error:', e, type(e), exc=True, lvl=error)
|
Chat event handler for incoming events
:param event: say-event with incoming chat message
|
entailment
|
def say(self, event):
"""Chat event handler for incoming events
:param event: say-event with incoming chat message
"""
try:
userid = event.user.uuid
recipient = self._get_recipient(event)
content = self._get_content(event)
message = objectmodels['chatmessage']({
'timestamp': time(),
'recipient': recipient,
'sender': userid,
'content': content,
'uuid': std_uuid()
})
message.save()
chat_packet = {
'component': 'hfos.chat.host',
'action': 'say',
'data': message.serializablefields()
}
if recipient in self.chat_channels:
for useruuid in self.users:
if useruuid in self.chat_channels[recipient].users:
self.log('User in channel', lvl=debug)
self.update_lastlog(useruuid, recipient)
self.log('Sending message', lvl=debug)
self.fireEvent(send(useruuid, chat_packet,
sendtype='user'))
except Exception as e:
self.log("Error: '%s' %s" % (e, type(e)), exc=True, lvl=error)
|
Chat event handler for incoming events
:param event: say-event with incoming chat message
|
entailment
|
def install_docs(instance, clear_target):
"""Builds and installs the complete HFOS documentation."""
_check_root()
def make_docs():
"""Trigger a Sphinx make command to build the documentation."""
log("Generating HTML documentation")
try:
build = Popen(
[
'make',
'html'
],
cwd='docs/'
)
build.wait()
except Exception as e:
log("Problem during documentation building: ", e, type(e),
exc=True, lvl=error)
return False
return True
make_docs()
# If these need changes, make sure they are watertight and don't remove
# wanted stuff!
target = os.path.join('/var/lib/hfos', instance, 'frontend/docs')
source = 'docs/build/html'
log("Updating documentation directory:", target)
if not os.path.exists(os.path.join(os.path.curdir, source)):
log(
"Documentation not existing yet. Run python setup.py "
"build_sphinx first.", lvl=error)
return
if os.path.exists(target):
log("Path already exists: " + target)
if clear_target:
log("Cleaning up " + target, lvl=warn)
shutil.rmtree(target)
log("Copying docs to " + target)
copy_tree(source, target)
log("Done: Install Docs")
|
Builds and installs the complete HFOS documentation.
|
entailment
|
def var(ctx, clear_target, clear_all):
"""Install variable data to /var/[lib,cache]/hfos"""
install_var(str(ctx.obj['instance']), clear_target, clear_all)
|
Install variable data to /var/[lib,cache]/hfos
|
entailment
|
def install_var(instance, clear_target, clear_all):
"""Install required folders in /var"""
_check_root()
log("Checking frontend library and cache directories",
emitter='MANAGE')
uid = pwd.getpwnam("hfos").pw_uid
gid = grp.getgrnam("hfos").gr_gid
join = os.path.join
# If these need changes, make sure they are watertight and don't remove
# wanted stuff!
target_paths = (
'/var/www/challenges', # For LetsEncrypt acme certificate challenges
join('/var/lib/hfos', instance),
join('/var/local/hfos', instance),
join('/var/local/hfos', instance, 'backup'),
join('/var/cache/hfos', instance),
join('/var/cache/hfos', instance, 'tilecache'),
join('/var/cache/hfos', instance, 'rastertiles'),
join('/var/cache/hfos', instance, 'rastercache')
)
logfile = "/var/log/hfos-" + instance + ".log"
for item in target_paths:
if os.path.exists(item):
log("Path already exists: " + item)
if clear_all or (clear_target and 'cache' in item):
log("Cleaning up: " + item, lvl=warn)
shutil.rmtree(item)
if not os.path.exists(item):
log("Creating path: " + item)
os.mkdir(item)
os.chown(item, uid, gid)
# Touch logfile to make sure it exists
open(logfile, "a").close()
os.chown(logfile, uid, gid)
log("Done: Install Var")
|
Install required folders in /var
|
entailment
|
def provisions(ctx, provision, clear_existing, overwrite, list_provisions):
"""Install default provisioning data"""
install_provisions(ctx, provision, clear_existing, overwrite, list_provisions)
|
Install default provisioning data
|
entailment
|
def install_provisions(ctx, provision, clear_provisions=False, overwrite=False, list_provisions=False):
"""Install default provisioning data"""
log("Installing HFOS default provisions")
# from hfos.logger import verbosity, events
# verbosity['console'] = verbosity['global'] = events
from hfos import database
database.initialize(ctx.obj['dbhost'], ctx.obj['dbname'])
from hfos.provisions import build_provision_store
provision_store = build_provision_store()
def sort_dependencies(items):
"""Topologically sort the dependency tree"""
g = networkx.DiGraph()
log('Sorting dependencies')
for key, item in items:
log('key: ', key, 'item:', item, pretty=True, lvl=debug)
dependencies = item.get('dependencies', [])
if isinstance(dependencies, str):
dependencies = [dependencies]
if key not in g:
g.add_node(key)
for link in dependencies:
g.add_edge(key, link)
if not networkx.is_directed_acyclic_graph(g):
log('Cycles in provosioning dependency graph detected!', lvl=error)
log('Involved provisions:', list(networkx.simple_cycles(g)), lvl=error)
topology = list(networkx.algorithms.topological_sort(g))
topology.reverse()
log(topology, pretty=True)
return topology
if list_provisions:
sort_dependencies(provision_store.items())
exit()
def provision_item(item):
"""Provision a single provisioning element"""
method = item.get('method', provisionList)
model = item.get('model')
data = item.get('data')
method(data, model, overwrite=overwrite, clear=clear_provisions)
if provision is not None:
if provision in provision_store:
log("Provisioning ", provision, pretty=True)
provision_item(provision_store[provision])
else:
log("Unknown provision: ", provision, "\nValid provisions are",
list(provision_store.keys()),
lvl=error,
emitter='MANAGE')
else:
for name in sort_dependencies(provision_store.items()):
log("Provisioning", name, pretty=True)
provision_item(provision_store[name])
log("Done: Install Provisions")
|
Install default provisioning data
|
entailment
|
def install_modules(wip):
"""Install the plugin modules"""
def install_module(hfos_module):
"""Install a single module via setuptools"""
try:
setup = Popen(
[
sys.executable,
'setup.py',
'develop'
],
cwd='modules/' + hfos_module + "/"
)
setup.wait()
except Exception as e:
log("Problem during module installation: ", hfos_module, e,
type(e), exc=True, lvl=error)
return False
return True
# TODO: Sort module dependencies via topological sort or let pip do this in future.
# # To get the module dependencies:
# packages = {}
# for provision_entrypoint in iter_entry_points(group='hfos.provisions',
# name=None):
# log("Found packages: ", provision_entrypoint.dist.project_name, lvl=warn)
#
# _package_name = provision_entrypoint.dist.project_name
# _package = pkg_resources.working_set.by_key[_package_name]
#
# print([str(r) for r in _package.requires()]) # retrieve deps from setup.py
modules_production = [
# TODO: Poor man's dependency management, as long as the modules are
# installed from local sources and they're not available on pypi,
# which would handle real dependency management for us:
'navdata',
# Now all the rest:
'alert',
'automat',
'busrepeater',
'calendar',
'countables',
'dash',
# 'dev',
'enrol',
'mail',
'maps',
'nmea',
'nodestate',
'project',
'webguides',
'wiki'
]
modules_wip = [
'calc',
'camera',
'chat',
'comms',
'contacts',
'crew',
'equipment',
'filemanager',
'garden',
'heroic',
'ldap',
'library',
'logbook',
'protocols',
'polls',
'mesh',
'robot',
'switchboard',
'shareables',
]
installables = modules_production
if wip:
installables.extend(modules_wip)
success = []
failed = []
for installable in installables:
log('Installing module ', installable)
if install_module(installable):
success.append(installable)
else:
failed.append(installable)
log('Installed modules: ', success)
if len(failed) > 0:
log('Failed modules: ', failed)
log('Done: Install Modules')
|
Install the plugin modules
|
entailment
|
def service(ctx):
"""Install systemd service configuration"""
install_service(ctx.obj['instance'], ctx.obj['dbhost'], ctx.obj['dbname'], ctx.obj['port'])
|
Install systemd service configuration
|
entailment
|
def install_service(instance, dbhost, dbname, port):
"""Install systemd service configuration"""
_check_root()
log("Installing systemd service")
launcher = os.path.realpath(__file__).replace('manage', 'launcher')
executable = sys.executable + " " + launcher
executable += " --instance " + instance
executable += " --dbname " + dbname + " --dbhost " + dbhost
executable += " --port " + port
executable += " --dolog --logfile /var/log/hfos-" + instance + ".log"
executable += " --logfileverbosity 30 -q"
definitions = {
'instance': instance,
'executable': executable
}
service_name = 'hfos-' + instance + '.service'
write_template_file(os.path.join('dev/templates', service_template),
os.path.join('/etc/systemd/system/', service_name),
definitions)
Popen([
'systemctl',
'enable',
service_name
])
log('Launching service')
Popen([
'systemctl',
'start',
service_name
])
log("Done: Install Service")
|
Install systemd service configuration
|
entailment
|
def nginx(ctx, hostname):
"""Install nginx configuration"""
install_nginx(ctx.obj['dbhost'], ctx.obj['dbname'], ctx.obj['port'], hostname)
|
Install nginx configuration
|
entailment
|
def install_nginx(instance, dbhost, dbname, port, hostname=None):
"""Install nginx configuration"""
_check_root()
log("Installing nginx configuration")
if hostname is None:
try:
configuration = _get_system_configuration(dbhost, dbname)
hostname = configuration.hostname
except Exception as e:
log('Exception:', e, type(e), exc=True, lvl=error)
log("""Could not determine public fully qualified hostname!
Check systemconfig (see db view and db modify commands) or specify
manually with --hostname host.domain.tld
Using 'localhost' for now""", lvl=warn)
hostname = 'localhost'
definitions = {
'instance': instance,
'server_public_name': hostname,
'ssl_certificate': cert_file,
'ssl_key': key_file,
'host_url': 'http://127.0.0.1:%i/' % port
}
if distribution == 'DEBIAN':
configuration_file = '/etc/nginx/sites-available/hfos.%s.conf' % instance
configuration_link = '/etc/nginx/sites-enabled/hfos.%s.conf' % instance
elif distribution == 'ARCH':
configuration_file = '/etc/nginx/nginx.conf'
configuration_link = None
else:
log('Unsure how to proceed, you may need to specify your '
'distribution', lvl=error)
return
log('Writing nginx HFOS site definition')
write_template_file(os.path.join('dev/templates', nginx_configuration),
configuration_file,
definitions)
if configuration_link is not None:
log('Enabling nginx HFOS site (symlink)')
if not os.path.exists(configuration_link):
os.symlink(configuration_file, configuration_link)
log('Restarting nginx service')
Popen([
'systemctl',
'restart',
'nginx.service'
])
log("Done: Install nginx configuration")
|
Install nginx configuration
|
entailment
|
def install_cert(selfsigned):
"""Install a local SSL certificate"""
_check_root()
if selfsigned:
log('Generating self signed (insecure) certificate/key '
'combination')
try:
os.mkdir('/etc/ssl/certs/hfos')
except FileExistsError:
pass
except PermissionError:
log("Need root (e.g. via sudo) to generate ssl certificate")
sys.exit(1)
def create_self_signed_cert():
"""Create a simple self signed SSL certificate"""
# create a key pair
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 1024)
if os.path.exists(cert_file):
try:
certificate = open(cert_file, "rb").read()
old_cert = crypto.load_certificate(crypto.FILETYPE_PEM,
certificate)
serial = old_cert.get_serial_number() + 1
except (crypto.Error, OSError) as e:
log('Could not read old certificate to increment '
'serial:', type(e), e, exc=True, lvl=warn)
serial = 1
else:
serial = 1
# create a self-signed certificate
certificate = crypto.X509()
certificate.get_subject().C = "DE"
certificate.get_subject().ST = "Berlin"
certificate.get_subject().L = "Berlin"
# noinspection PyPep8
certificate.get_subject().O = "Hackerfleet"
certificate.get_subject().OU = "Hackerfleet"
certificate.get_subject().CN = gethostname()
certificate.set_serial_number(serial)
certificate.gmtime_adj_notBefore(0)
certificate.gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60)
certificate.set_issuer(certificate.get_subject())
certificate.set_pubkey(k)
certificate.sign(k, b'sha512')
open(key_file, "wt").write(str(
crypto.dump_privatekey(crypto.FILETYPE_PEM, k),
encoding="ASCII"))
open(cert_file, "wt").write(str(
crypto.dump_certificate(crypto.FILETYPE_PEM, certificate),
encoding="ASCII"))
open(combined_file, "wt").write(str(
crypto.dump_certificate(crypto.FILETYPE_PEM, certificate),
encoding="ASCII") + str(
crypto.dump_privatekey(crypto.FILETYPE_PEM, k),
encoding="ASCII"))
create_self_signed_cert()
log('Done: Install Cert')
else:
# TODO
log('Not implemented yet. You can build your own certificate and '
'store it in /etc/ssl/certs/hfos/server-cert.pem - it should '
'be a certificate with key, as this is used server side and '
'there is no way to enter a separate key.', lvl=error)
|
Install a local SSL certificate
|
entailment
|
def frontend(ctx, dev, rebuild, no_install, build_type):
"""Build and install frontend"""
install_frontend(instance=ctx.obj['instance'],
forcerebuild=rebuild,
development=dev,
install=not no_install,
build_type=build_type)
|
Build and install frontend
|
entailment
|
def install_all(ctx, clear_all):
"""Default-Install everything installable
\b
This includes
* System user (hfos.hfos)
* Self signed certificate
* Variable data locations (/var/lib/hfos and /var/cache/hfos)
* All the official modules in this repository
* Default module provisioning data
* Documentation
* systemd service descriptor
It does NOT build and install the HTML5 frontend."""
_check_root()
instance = ctx.obj['instance']
dbhost = ctx.obj['dbhost']
dbname = ctx.obj['dbname']
port = ctx.obj['port']
install_system_user()
install_cert(selfsigned=True)
install_var(instance, clear_target=clear_all, clear_all=clear_all)
install_modules(wip=False)
install_provisions(provision=None, clear_provisions=clear_all)
install_docs(instance, clear_target=clear_all)
install_service(instance, dbhost, dbname, port)
install_nginx(instance, dbhost, dbname, port)
log('Done')
|
Default-Install everything installable
\b
This includes
* System user (hfos.hfos)
* Self signed certificate
* Variable data locations (/var/lib/hfos and /var/cache/hfos)
* All the official modules in this repository
* Default module provisioning data
* Documentation
* systemd service descriptor
It does NOT build and install the HTML5 frontend.
|
entailment
|
def uninstall():
"""Uninstall data and resource locations"""
_check_root()
response = _ask("This will delete all data of your HFOS installations! Type"
"YES to continue:", default="N", show_hint=False)
if response == 'YES':
shutil.rmtree('/var/lib/hfos')
shutil.rmtree('/var/cache/hfos')
|
Uninstall data and resource locations
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.