Search is not available for this dataset
text
stringlengths 75
104k
|
|---|
def queue_action(self, queue, *args, **kwargs):
"""Function that specifies the interaction with a
:class:`.ResourceQueue` upon departure.
When departuring from a :class:`.ResourceQueue` (or a
:class:`.QueueServer`), this method is called. If the agent
does not already have a resource then it decrements the number
of servers at :class:`.ResourceQueue` by one. Note that this
only applies to :class:`ResourceQueue's<.ResourceQueue>`.
Parameters
----------
queue : :class:`.QueueServer`
The instance of the queue that the ``ResourceAgent`` will
interact with.
"""
if isinstance(queue, ResourceQueue):
if self._has_resource:
self._has_resource = False
self._had_resource = True
else:
if queue.num_servers > 0:
queue.set_num_servers(queue.num_servers - 1)
self._has_resource = True
self._had_resource = False
|
def next_event(self):
"""Simulates the queue forward one event.
This method behaves identically to a :class:`.LossQueue` if the
arriving/departing agent is anything other than a
:class:`.ResourceAgent`. The differences are;
Arriving:
* If the :class:`.ResourceAgent` has a resource then it deletes
the agent upon arrival and adds one to ``num_servers``.
* If the :class:`.ResourceAgent` is arriving without a resource
then nothing special happens.
Departing:
* If the :class:`.ResourceAgent` does not have a resource, then
``num_servers`` decreases by one and the agent then *has a
resource*.
Use :meth:`~QueueServer.simulate` for simulating instead.
"""
if isinstance(self._arrivals[0], ResourceAgent):
if self._departures[0]._time < self._arrivals[0]._time:
return super(ResourceQueue, self).next_event()
elif self._arrivals[0]._time < infty:
if self._arrivals[0]._has_resource:
arrival = heappop(self._arrivals)
self._current_t = arrival._time
self._num_total -= 1
self.set_num_servers(self.num_servers + 1)
if self.collect_data:
t = arrival._time
if arrival.agent_id not in self.data:
self.data[arrival.agent_id] = [[t, t, t, len(self.queue), self.num_system]]
else:
self.data[arrival.agent_id].append([t, t, t, len(self.queue), self.num_system])
if self._arrivals[0]._time < self._departures[0]._time:
self._time = self._arrivals[0]._time
else:
self._time = self._departures[0]._time
elif self.num_system < self.num_servers:
super(ResourceQueue, self).next_event()
else:
self.num_blocked += 1
self._num_arrivals += 1
self._num_total -= 1
arrival = heappop(self._arrivals)
self._current_t = arrival._time
if self.collect_data:
if arrival.agent_id not in self.data:
self.data[arrival.agent_id] = [[arrival._time, 0, 0, len(self.queue), self.num_system]]
else:
self.data[arrival.agent_id].append([arrival._time, 0, 0, len(self.queue), self.num_system])
if self._arrivals[0]._time < self._departures[0]._time:
self._time = self._arrivals[0]._time
else:
self._time = self._departures[0]._time
else:
return super(ResourceQueue, self).next_event()
|
def size(self, s):
"""Returns the number of elements in the set that ``s`` belongs to.
Parameters
----------
s : object
An object
Returns
-------
out : int
The number of elements in the set that ``s`` belongs to.
"""
leader = self.find(s)
return self._size[leader]
|
def find(self, s):
"""Locates the leader of the set to which the element ``s`` belongs.
Parameters
----------
s : object
An object that the ``UnionFind`` contains.
Returns
-------
object
The leader of the set that contains ``s``.
"""
pSet = [s]
parent = self._leader[s]
while parent != self._leader[parent]:
pSet.append(parent)
parent = self._leader[parent]
if len(pSet) > 1:
for a in pSet:
self._leader[a] = parent
return parent
|
def union(self, a, b):
"""Merges the set that contains ``a`` with the set that contains ``b``.
Parameters
----------
a, b : objects
Two objects whose sets are to be merged.
"""
s1, s2 = self.find(a), self.find(b)
if s1 != s2:
r1, r2 = self._rank[s1], self._rank[s2]
if r2 > r1:
r1, r2 = r2, r1
s1, s2 = s2, s1
if r1 == r2:
self._rank[s1] += 1
self._leader[s2] = s1
self._size[s1] += self._size[s2]
self.nClusters -= 1
|
def generate_transition_matrix(g, seed=None):
"""Generates a random transition matrix for the graph ``g``.
Parameters
----------
g : :any:`networkx.DiGraph`, :class:`numpy.ndarray`, dict, etc.
Any object that :any:`DiGraph<networkx.DiGraph>` accepts.
seed : int (optional)
An integer used to initialize numpy's psuedo-random number
generator.
Returns
-------
mat : :class:`~numpy.ndarray`
Returns a transition matrix where ``mat[i, j]`` is the
probability of transitioning from vertex ``i`` to vertex ``j``.
If there is no edge connecting vertex ``i`` to vertex ``j``
then ``mat[i, j] = 0``.
"""
g = _test_graph(g)
if isinstance(seed, numbers.Integral):
np.random.seed(seed)
nV = g.number_of_nodes()
mat = np.zeros((nV, nV))
for v in g.nodes():
ind = [e[1] for e in sorted(g.out_edges(v))]
deg = len(ind)
if deg == 1:
mat[v, ind] = 1
elif deg > 1:
probs = np.ceil(np.random.rand(deg) * 100) / 100.
if np.isclose(np.sum(probs), 0):
probs[np.random.randint(deg)] = 1
mat[v, ind] = probs / np.sum(probs)
return mat
|
def generate_random_graph(num_vertices=250, prob_loop=0.5, **kwargs):
"""Creates a random graph where the edges have different types.
This method calls :func:`.minimal_random_graph`, and then adds
a loop to each vertex with ``prob_loop`` probability. It then
calls :func:`.set_types_random` on the resulting graph.
Parameters
----------
num_vertices : int (optional, default: 250)
The number of vertices in the graph.
prob_loop : float (optional, default: 0.5)
The probability that a loop gets added to a vertex.
**kwargs :
Any parameters to send to :func:`.minimal_random_graph` or
:func:`.set_types_random`.
Returns
-------
:class:`.QueueNetworkDiGraph`
A graph with the position of the vertex set as a property.
The position property is called ``pos``. Also, the ``edge_type``
edge property is set for each edge.
Examples
--------
The following generates a directed graph with 50 vertices where half
the edges are type 1 and 1/4th are type 2 and 1/4th are type 3:
>>> import queueing_tool as qt
>>> pTypes = {1: 0.5, 2: 0.25, 3: 0.25}
>>> g = qt.generate_random_graph(100, proportions=pTypes, seed=17)
>>> non_loops = [e for e in g.edges() if e[0] != e[1]]
>>> p1 = np.sum([g.ep(e, 'edge_type') == 1 for e in non_loops])
>>> float(p1) / len(non_loops) # doctest: +ELLIPSIS
0.486...
>>> p2 = np.sum([g.ep(e, 'edge_type') == 2 for e in non_loops])
>>> float(p2) / len(non_loops) # doctest: +ELLIPSIS
0.249...
>>> p3 = np.sum([g.ep(e, 'edge_type') == 3 for e in non_loops])
>>> float(p3) / len(non_loops) # doctest: +ELLIPSIS
0.264...
To make an undirected graph with 25 vertices where there are 4
different edge types with random proportions:
>>> p = np.random.rand(4)
>>> p = p / sum(p)
>>> p = {k + 1: p[k] for k in range(4)}
>>> g = qt.generate_random_graph(num_vertices=25, is_directed=False, proportions=p)
Note that none of the edge types in the above example are 0. It is
recommended use edge type indices starting at 1, since 0 is
typically used for terminal edges.
"""
g = minimal_random_graph(num_vertices, **kwargs)
for v in g.nodes():
e = (v, v)
if not g.is_edge(e):
if np.random.uniform() < prob_loop:
g.add_edge(*e)
g = set_types_random(g, **kwargs)
return g
|
def generate_pagerank_graph(num_vertices=250, **kwargs):
"""Creates a random graph where the vertex types are
selected using their pagerank.
Calls :func:`.minimal_random_graph` and then
:func:`.set_types_rank` where the ``rank`` keyword argument
is given by :func:`networkx.pagerank`.
Parameters
----------
num_vertices : int (optional, the default is 250)
The number of vertices in the graph.
**kwargs :
Any parameters to send to :func:`.minimal_random_graph` or
:func:`.set_types_rank`.
Returns
-------
:class:`.QueueNetworkDiGraph`
A graph with a ``pos`` vertex property and the ``edge_type``
edge property.
Notes
-----
This function sets the edge types of a graph to be either 1, 2, or
3. It sets the vertices to type 2 by selecting the top
``pType2 * g.number_of_nodes()`` vertices given by the
:func:`~networkx.pagerank` of the graph. A loop is added
to all vertices identified this way (if one does not exist
already). It then randomly sets vertices close to the type 2
vertices as type 3, and adds loops to these vertices as well. These
loops then have edge types that correspond to the vertices type.
The rest of the edges are set to type 1.
"""
g = minimal_random_graph(num_vertices, **kwargs)
r = np.zeros(num_vertices)
for k, pr in nx.pagerank(g).items():
r[k] = pr
g = set_types_rank(g, rank=r, **kwargs)
return g
|
def minimal_random_graph(num_vertices, seed=None, **kwargs):
"""Creates a connected graph with random vertex locations.
Parameters
----------
num_vertices : int
The number of vertices in the graph.
seed : int (optional)
An integer used to initialize numpy's psuedorandom number
generators.
**kwargs :
Unused.
Returns
-------
:class:`.QueueNetworkDiGraph`
A graph with a ``pos`` vertex property for each vertex's
position.
Notes
-----
This function first places ``num_vertices`` points in the unit square
randomly (using the uniform distribution). Then, for every vertex
``v``, all other vertices with Euclidean distance less or equal to
``r`` are connect by an edge --- where ``r`` is the smallest number
such that the graph ends up connected.
"""
if isinstance(seed, numbers.Integral):
np.random.seed(seed)
points = np.random.random((num_vertices, 2)) * 10
edges = []
for k in range(num_vertices - 1):
for j in range(k + 1, num_vertices):
v = points[k] - points[j]
edges.append((k, j, v[0]**2 + v[1]**2))
mytype = [('n1', int), ('n2', int), ('distance', np.float)]
edges = np.array(edges, dtype=mytype)
edges = np.sort(edges, order='distance')
unionF = UnionFind([k for k in range(num_vertices)])
g = nx.Graph()
for n1, n2, dummy in edges:
unionF.union(n1, n2)
g.add_edge(n1, n2)
if unionF.nClusters == 1:
break
pos = {j: p for j, p in enumerate(points)}
g = QueueNetworkDiGraph(g.to_directed())
g.set_pos(pos)
return g
|
def set_types_random(g, proportions=None, loop_proportions=None, seed=None,
**kwargs):
"""Randomly sets ``edge_type`` (edge type) properties of the graph.
This function randomly assigns each edge a type. The probability of
an edge being a specific type is proscribed in the
``proportions``, ``loop_proportions`` variables.
Parameters
----------
g : :any:`networkx.DiGraph`, :class:`numpy.ndarray`, dict, etc.
Any object that :any:`DiGraph<networkx.DiGraph>` accepts.
proportions : dict (optional, default: ``{k: 0.25 for k in range(1, 4)}``)
A dictionary of edge types and proportions, where the keys are
the types and the values are the proportion of non-loop edges
that are expected to be of that type. The values can must sum
to one.
loop_proportions : dict (optional, default: ``{k: 0.25 for k in range(4)}``)
A dictionary of edge types and proportions, where the keys are
the types and the values are the proportion of loop edges
that are expected to be of that type. The values can must sum
to one.
seed : int (optional)
An integer used to initialize numpy's psuedorandom number
generator.
**kwargs :
Unused.
Returns
-------
:class:`.QueueNetworkDiGraph`
Returns the a graph with an ``edge_type`` edge property.
Raises
------
TypeError
Raised when the parameter ``g`` is not of a type that can be
made into a :any:`networkx.DiGraph`.
ValueError
Raises a :exc:`~ValueError` if the ``pType`` values do not sum
to one.
Notes
-----
If ``pTypes`` is not explicitly specified in the arguments, then it
defaults to four types in the graph (types 0, 1, 2, and 3). It sets
non-loop edges to be either 1, 2, or 3 33\% chance, and loops are
types 0, 1, 2, 3 with 25\% chance.
"""
g = _test_graph(g)
if isinstance(seed, numbers.Integral):
np.random.seed(seed)
if proportions is None:
proportions = {k: 1. / 3 for k in range(1, 4)}
if loop_proportions is None:
loop_proportions = {k: 1. / 4 for k in range(4)}
edges = [e for e in g.edges() if e[0] != e[1]]
loops = [e for e in g.edges() if e[0] == e[1]]
props = list(proportions.values())
lprops = list(loop_proportions.values())
if not np.isclose(sum(props), 1.0):
raise ValueError("proportions values must sum to one.")
if not np.isclose(sum(lprops), 1.0):
raise ValueError("loop_proportions values must sum to one.")
eTypes = {}
types = list(proportions.keys())
values = np.random.choice(types, size=len(edges), replace=True, p=props)
for k, e in enumerate(edges):
eTypes[e] = values[k]
types = list(loop_proportions.keys())
values = np.random.choice(types, size=len(loops), replace=True, p=lprops)
for k, e in enumerate(loops):
eTypes[e] = values[k]
g.new_edge_property('edge_type')
for e in g.edges():
g.set_ep(e, 'edge_type', eTypes[e])
return g
|
def set_types_rank(g, rank, pType2=0.1, pType3=0.1, seed=None, **kwargs):
"""Creates a stylized graph. Sets edge and types using `pagerank`_.
This function sets the edge types of a graph to be either 1, 2, or
3. It sets the vertices to type 2 by selecting the top
``pType2 * g.number_of_nodes()`` vertices given by the
:func:`~networkx.pagerank` of the graph. A loop is added
to all vertices identified this way (if one does not exist
already). It then randomly sets vertices close to the type 2
vertices as type 3, and adds loops to these vertices as well. These
loops then have edge types the correspond to the vertices type. The
rest of the edges are set to type 1.
.. _pagerank: http://en.wikipedia.org/wiki/PageRank
Parameters
----------
g : :any:`networkx.DiGraph`, :class:`~numpy.ndarray`, dict, etc.
Any object that :any:`DiGraph<networkx.DiGraph>` accepts.
rank : :class:`numpy.ndarray`
An ordering of the vertices.
pType2 : float (optional, default: 0.1)
Specifies the proportion of vertices that will be of type 2.
pType3 : float (optional, default: 0.1)
Specifies the proportion of vertices that will be of type 3 and
that are near pType2 vertices.
seed : int (optional)
An integer used to initialize numpy's psuedo-random number
generator.
**kwargs :
Unused.
Returns
-------
:class:`.QueueNetworkDiGraph`
Returns the a graph with an ``edge_type`` edge property.
Raises
------
TypeError
Raised when the parameter ``g`` is not of a type that can be
made into a :any:`DiGraph<networkx.DiGraph>`.
"""
g = _test_graph(g)
if isinstance(seed, numbers.Integral):
np.random.seed(seed)
tmp = np.sort(np.array(rank))
nDests = int(np.ceil(g.number_of_nodes() * pType2))
dests = np.where(rank >= tmp[-nDests])[0]
if 'pos' not in g.vertex_properties():
g.set_pos()
dest_pos = np.array([g.vp(v, 'pos') for v in dests])
nFCQ = int(pType3 * g.number_of_nodes())
min_g_dist = np.ones(nFCQ) * np.infty
ind_g_dist = np.ones(nFCQ, int)
r, theta = np.random.random(nFCQ) / 500., np.random.random(nFCQ) * 360.
xy_pos = np.array([r * np.cos(theta), r * np.sin(theta)]).transpose()
g_pos = xy_pos + dest_pos[np.array(np.mod(np.arange(nFCQ), nDests), int)]
for v in g.nodes():
if v not in dests:
tmp = np.array([_calculate_distance(g.vp(v, 'pos'), g_pos[k, :]) for k in range(nFCQ)])
min_g_dist = np.min((tmp, min_g_dist), 0)
ind_g_dist[min_g_dist == tmp] = v
ind_g_dist = np.unique(ind_g_dist)
fcqs = set(ind_g_dist[:min(nFCQ, len(ind_g_dist))])
dests = set(dests)
g.new_vertex_property('loop_type')
for v in g.nodes():
if v in dests:
g.set_vp(v, 'loop_type', 3)
if not g.is_edge((v, v)):
g.add_edge(v, v)
elif v in fcqs:
g.set_vp(v, 'loop_type', 2)
if not g.is_edge((v, v)):
g.add_edge(v, v)
g.new_edge_property('edge_type')
for e in g.edges():
g.set_ep(e, 'edge_type', 1)
for v in g.nodes():
if g.vp(v, 'loop_type') in [2, 3]:
e = (v, v)
if g.vp(v, 'loop_type') == 2:
g.set_ep(e, 'edge_type', 2)
else:
g.set_ep(e, 'edge_type', 3)
return g
|
def strip_comment_marker(text):
""" Strip # markers at the front of a block of comment text.
"""
lines = []
for line in text.splitlines():
lines.append(line.lstrip('#'))
text = textwrap.dedent('\n'.join(lines))
return text
|
def get_class_traits(klass):
""" Yield all of the documentation for trait definitions on a class object.
"""
# FIXME: gracefully handle errors here or in the caller?
source = inspect.getsource(klass)
cb = CommentBlocker()
cb.process_file(StringIO(source))
mod_ast = compiler.parse(source)
class_ast = mod_ast.node.nodes[0]
for node in class_ast.code.nodes:
# FIXME: handle other kinds of assignments?
if isinstance(node, compiler.ast.Assign):
name = node.nodes[0].name
rhs = unparse(node.expr).strip()
doc = strip_comment_marker(cb.search_for_comment(node.lineno, default=''))
yield name, rhs, doc
|
def add(self, string, start, end, line):
""" Add lines to the block.
"""
if string.strip():
# Only add if not entirely whitespace.
self.start_lineno = min(self.start_lineno, start[0])
self.end_lineno = max(self.end_lineno, end[0])
|
def process_file(self, file):
""" Process a file object.
"""
if sys.version_info[0] >= 3:
nxt = file.__next__
else:
nxt = file.next
for token in tokenize.generate_tokens(nxt):
self.process_token(*token)
self.make_index()
|
def process_token(self, kind, string, start, end, line):
""" Process a single token.
"""
if self.current_block.is_comment:
if kind == tokenize.COMMENT:
self.current_block.add(string, start, end, line)
else:
self.new_noncomment(start[0], end[0])
else:
if kind == tokenize.COMMENT:
self.new_comment(string, start, end, line)
else:
self.current_block.add(string, start, end, line)
|
def new_noncomment(self, start_lineno, end_lineno):
""" We are transitioning from a noncomment to a comment.
"""
block = NonComment(start_lineno, end_lineno)
self.blocks.append(block)
self.current_block = block
|
def new_comment(self, string, start, end, line):
""" Possibly add a new comment.
Only adds a new comment if this comment is the only thing on the line.
Otherwise, it extends the noncomment block.
"""
prefix = line[:start[1]]
if prefix.strip():
# Oops! Trailing comment, not a comment block.
self.current_block.add(string, start, end, line)
else:
# A comment block.
block = Comment(start[0], end[0], string)
self.blocks.append(block)
self.current_block = block
|
def make_index(self):
""" Make the index mapping lines of actual code to their associated
prefix comments.
"""
for prev, block in zip(self.blocks[:-1], self.blocks[1:]):
if not block.is_comment:
self.index[block.start_lineno] = prev
|
def search_for_comment(self, lineno, default=None):
""" Find the comment block just before the given line number.
Returns None (or the specified default) if there is no such block.
"""
if not self.index:
self.make_index()
block = self.index.get(lineno, None)
text = getattr(block, 'text', default)
return text
|
def _generate_contents(self, tar):
"""
Adds configuration files to tarfile instance.
:param tar: tarfile instance
:returns: None
"""
uci = self.render(files=False)
# create a list with all the packages (and remove empty entries)
packages = packages_pattern.split(uci)
if '' in packages:
packages.remove('')
# create an UCI file for each configuration package used
for package in packages:
lines = package.split('\n')
package_name = lines[0]
text_contents = '\n'.join(lines[2:])
self._add_file(tar=tar,
name='{0}{1}'.format(config_path, package_name),
contents=text_contents)
|
def _load(self, config):
"""
Loads config from string or dict
"""
if isinstance(config, six.string_types):
try:
config = json.loads(config)
except ValueError:
pass
if not isinstance(config, dict):
raise TypeError('config block must be an istance '
'of dict or a valid NetJSON string')
return config
|
def _merge_config(self, config, templates):
"""
Merges config with templates
"""
if not templates:
return config
# type check
if not isinstance(templates, list):
raise TypeError('templates argument must be an instance of list')
# merge templates with main configuration
result = {}
config_list = templates + [config]
for merging in config_list:
result = merge_config(result, self._load(merging), self.list_identifiers)
return result
|
def _render_files(self):
"""
Renders additional files specified in ``self.config['files']``
"""
output = ''
# render files
files = self.config.get('files', [])
# add delimiter
if files:
output += '\n{0}\n\n'.format(self.FILE_SECTION_DELIMITER)
for f in files:
mode = f.get('mode', DEFAULT_FILE_MODE)
# add file to output
file_output = '# path: {0}\n'\
'# mode: {1}\n\n'\
'{2}\n\n'.format(f['path'], mode, f['contents'])
output += file_output
return output
|
def render(self, files=True):
"""
Converts the configuration dictionary into the corresponding configuration format
:param files: whether to include "additional files" in the output or not;
defaults to ``True``
:returns: string with output
"""
self.validate()
# convert NetJSON config to intermediate data structure
if self.intermediate_data is None:
self.to_intermediate()
# support multiple renderers
renderers = getattr(self, 'renderers', None) or [self.renderer]
# convert intermediate data structure to native configuration
output = ''
for renderer_class in renderers:
renderer = renderer_class(self)
output += renderer.render()
# remove reference to renderer instance (not needed anymore)
del renderer
# are we required to include
# additional files?
if files:
# render additional files
files_output = self._render_files()
if files_output:
# max 2 new lines
output += files_output.replace('\n\n\n', '\n\n')
# return the configuration
return output
|
def json(self, validate=True, *args, **kwargs):
"""
returns a string formatted as **NetJSON DeviceConfiguration**;
performs validation before returning output;
``*args`` and ``*kwargs`` will be passed to ``json.dumps``;
:returns: string
"""
if validate:
self.validate()
# automatically adds NetJSON type
config = deepcopy(self.config)
config.update({'type': 'DeviceConfiguration'})
return json.dumps(config, *args, **kwargs)
|
def generate(self):
"""
Returns a ``BytesIO`` instance representing an in-memory tar.gz archive
containing the native router configuration.
:returns: in-memory tar.gz archive, instance of ``BytesIO``
"""
tar_bytes = BytesIO()
tar = tarfile.open(fileobj=tar_bytes, mode='w')
self._generate_contents(tar)
self._process_files(tar)
tar.close()
tar_bytes.seek(0) # set pointer to beginning of stream
# `mtime` parameter of gzip file must be 0, otherwise any checksum operation
# would return a different digest even when content is the same.
# to achieve this we must use the python `gzip` library because the `tarfile`
# library does not seem to offer the possibility to modify the gzip `mtime`.
gzip_bytes = BytesIO()
gz = gzip.GzipFile(fileobj=gzip_bytes, mode='wb', mtime=0)
gz.write(tar_bytes.getvalue())
gz.close()
gzip_bytes.seek(0) # set pointer to beginning of stream
return gzip_bytes
|
def write(self, name, path='./'):
"""
Like ``generate`` but writes to disk.
:param name: file name, the tar.gz extension will be added automatically
:param path: directory where the file will be written to, defaults to ``./``
:returns: None
"""
byte_object = self.generate()
file_name = '{0}.tar.gz'.format(name)
if not path.endswith('/'):
path += '/'
f = open('{0}{1}'.format(path, file_name), 'wb')
f.write(byte_object.getvalue())
f.close()
|
def _process_files(self, tar):
"""
Adds files specified in self.config['files'] to tarfile instance.
:param tar: tarfile instance
:returns: None
"""
# insert additional files
for file_item in self.config.get('files', []):
path = file_item['path']
# remove leading slashes from path
if path.startswith('/'):
path = path[1:]
self._add_file(tar=tar,
name=path,
contents=file_item['contents'],
mode=file_item.get('mode', DEFAULT_FILE_MODE))
|
def _add_file(self, tar, name, contents, mode=DEFAULT_FILE_MODE):
"""
Adds a single file in tarfile instance.
:param tar: tarfile instance
:param name: string representing filename or path
:param contents: string representing file contents
:param mode: string representing file mode, defaults to 644
:returns: None
"""
byte_contents = BytesIO(contents.encode('utf8'))
info = tarfile.TarInfo(name=name)
info.size = len(contents)
# mtime must be 0 or any checksum operation
# will return a different digest even when content is the same
info.mtime = 0
info.type = tarfile.REGTYPE
info.mode = int(mode, 8) # permissions converted to decimal notation
tar.addfile(tarinfo=info, fileobj=byte_contents)
|
def to_intermediate(self):
"""
Converts the NetJSON configuration dictionary (self.config)
to the intermediate data structure (self.intermediate_data) that will
be then used by the renderer class to generate the router configuration
"""
self.validate()
self.intermediate_data = OrderedDict()
for converter_class in self.converters:
# skip unnecessary loop cycles
if not converter_class.should_run_forward(self.config):
continue
converter = converter_class(self)
value = converter.to_intermediate()
# maintain backward compatibility with backends
# that are currently in development by GSoC students
# TODO for >= 0.6.2: remove once all backends have upgraded
if value and isinstance(value, (tuple, list)): # pragma: nocover
value = OrderedDict(value)
if value:
self.intermediate_data = merge_config(self.intermediate_data,
value,
list_identifiers=['.name'])
|
def parse(self, native):
"""
Parses a native configuration and converts
it to a NetJSON configuration dictionary
"""
if not hasattr(self, 'parser') or not self.parser:
raise NotImplementedError('Parser class not specified')
parser = self.parser(native)
self.intermediate_data = parser.intermediate_data
del parser
self.to_netjson()
|
def to_netjson(self):
"""
Converts the intermediate data structure (self.intermediate_data)
to the NetJSON configuration dictionary (self.config)
"""
self.__backup_intermediate_data()
self.config = OrderedDict()
for converter_class in self.converters:
if not converter_class.should_run_backward(self.intermediate_data):
continue
converter = converter_class(self)
value = converter.to_netjson()
if value:
self.config = merge_config(self.config,
value,
list_identifiers=self.list_identifiers)
self.__restore_intermediate_data()
self.validate()
|
def merge_config(template, config, list_identifiers=None):
"""
Merges ``config`` on top of ``template``.
Conflicting keys are handled in the following way:
* simple values (eg: ``str``, ``int``, ``float``, ecc) in ``config`` will
overwrite the ones in ``template``
* values of type ``list`` in both ``config`` and ``template`` will be
merged using to the ``merge_list`` function
* values of type ``dict`` will be merged recursively
:param template: template ``dict``
:param config: config ``dict``
:param list_identifiers: ``list`` or ``None``
:returns: merged ``dict``
"""
result = template.copy()
for key, value in config.items():
if isinstance(value, dict):
node = result.get(key, OrderedDict())
result[key] = merge_config(node, value)
elif isinstance(value, list) and isinstance(result.get(key), list):
result[key] = merge_list(result[key], value, list_identifiers)
else:
result[key] = value
return result
|
def merge_list(list1, list2, identifiers=None):
"""
Merges ``list2`` on top of ``list1``.
If both lists contain dictionaries which have keys specified
in ``identifiers`` which have equal values, those dicts will
be merged (dicts in ``list2`` will override dicts in ``list1``).
The remaining elements will be summed in order to create a list
which contains elements of both lists.
:param list1: ``list`` from template
:param list2: ``list`` from config
:param identifiers: ``list`` or ``None``
:returns: merged ``list``
"""
identifiers = identifiers or []
dict_map = {'list1': OrderedDict(), 'list2': OrderedDict()}
counter = 1
for list_ in [list1, list2]:
container = dict_map['list{0}'.format(counter)]
for el in list_:
# merge by internal python id by default
key = id(el)
# if el is a dict, merge by keys specified in ``identifiers``
if isinstance(el, dict):
for id_key in identifiers:
if id_key in el:
key = el[id_key]
break
container[key] = deepcopy(el)
counter += 1
merged = merge_config(dict_map['list1'], dict_map['list2'])
return list(merged.values())
|
def evaluate_vars(data, context=None):
"""
Evaluates variables in ``data``
:param data: data structure containing variables, may be
``str``, ``dict`` or ``list``
:param context: ``dict`` containing variables
:returns: modified data structure
"""
context = context or {}
if isinstance(data, (dict, list)):
if isinstance(data, dict):
loop_items = data.items()
elif isinstance(data, list):
loop_items = enumerate(data)
for key, value in loop_items:
data[key] = evaluate_vars(value, context)
elif isinstance(data, six.string_types):
vars_found = var_pattern.findall(data)
for var in vars_found:
var = var.strip()
# if found multiple variables, create a new regexp pattern for each
# variable, otherwise different variables would get the same value
# (see https://github.com/openwisp/netjsonconfig/issues/55)
if len(vars_found) > 1:
pattern = r'\{\{(\s*%s\s*)\}\}' % var
# in case of single variables, use the precompiled
# regexp pattern to save computation
else:
pattern = var_pattern
if var in context:
data = re.sub(pattern, context[var], data)
return data
|
def get_copy(dict_, key, default=None):
"""
Looks for a key in a dictionary, if found returns
a deepcopied value, otherwise returns default value
"""
value = dict_.get(key, default)
if value:
return deepcopy(value)
return value
|
def type_cast(self, item, schema=None):
"""
Loops over item and performs type casting
according to supplied schema fragment
"""
if schema is None:
schema = self._schema
properties = schema['properties']
for key, value in item.items():
if key not in properties:
continue
try:
json_type = properties[key]['type']
except KeyError:
json_type = None
if json_type == 'integer' and not isinstance(value, int):
value = int(value)
elif json_type == 'boolean' and not isinstance(value, bool):
value = value == '1'
item[key] = value
return item
|
def to_intermediate(self):
"""
Converts the NetJSON configuration dictionary (``self.config``)
to intermediate data structure (``self.intermediate_datra``)
"""
result = OrderedDict()
# copy netjson dictionary
netjson = get_copy(self.netjson, self.netjson_key)
if isinstance(netjson, list):
# iterate over copied netjson data structure
for index, block in enumerate(netjson):
result = self.to_intermediate_loop(block, result, index + 1)
else:
result = self.to_intermediate_loop(netjson, result)
# return result, expects dict
return result
|
def to_netjson(self, remove_block=True):
"""
Converts the intermediate data structure (``self.intermediate_datra``)
to a NetJSON configuration dictionary (``self.config``)
"""
result = OrderedDict()
# copy list
intermediate_data = list(self.intermediate_data[self.intermediate_key])
# iterate over copied intermediate data structure
for index, block in enumerate(intermediate_data):
if self.should_skip_block(block):
continue
# remove processed block from intermediate data
# this makes processing remaining blocks easier
# for some backends
if remove_block:
self.intermediate_data[self.intermediate_key].remove(block)
# specific converter operations are delegated
# to the ``to_netjson_loop`` method
result = self.to_netjson_loop(block, result, index + 1)
# return result, expects dict
return result
|
def _add_unique_file(self, item):
"""
adds a file in self.config['files'] only if not present already
"""
if item not in self.config['files']:
self.config['files'].append(item)
|
def _get_install_context(self):
"""
returns the template context for install.sh and uninstall.sh
"""
config = self.config
# layer2 VPN list
l2vpn = []
for vpn in self.config.get('openvpn', []):
if vpn.get('dev_type') != 'tap':
continue
tap = vpn.copy()
l2vpn.append(tap)
# bridge list
bridges = []
for interface in self.config.get('interfaces', []):
if interface['type'] != 'bridge':
continue
bridge = interface.copy()
if bridge.get('addresses'):
bridge['proto'] = interface['addresses'][0].get('proto')
bridge['ip'] = interface['addresses'][0].get('address')
bridges.append(bridge)
# crontabs present?
cron = False
for _file in config.get('files', []):
path = _file['path']
if path.startswith('/crontabs') or path.startswith('crontabs'):
cron = True
break
# return context
return dict(hostname=config['general']['hostname'], # hostname is required
l2vpn=l2vpn,
bridges=bridges,
radios=config.get('radios', []), # radios might be empty
cron=cron)
|
def _add_install(self, context):
"""
generates install.sh and adds it to included files
"""
contents = self._render_template('install.sh', context)
self.config.setdefault('files', []) # file list might be empty
# add install.sh to list of included files
self._add_unique_file({
"path": "/install.sh",
"contents": contents,
"mode": "755"
})
|
def _add_uninstall(self, context):
"""
generates uninstall.sh and adds it to included files
"""
contents = self._render_template('uninstall.sh', context)
self.config.setdefault('files', []) # file list might be empty
# add uninstall.sh to list of included files
self._add_unique_file({
"path": "/uninstall.sh",
"contents": contents,
"mode": "755"
})
|
def _add_tc_script(self):
"""
generates tc_script.sh and adds it to included files
"""
# fill context
context = dict(tc_options=self.config.get('tc_options', []))
# import pdb; pdb.set_trace()
contents = self._render_template('tc_script.sh', context)
self.config.setdefault('files', []) # file list might be empty
# add tc_script.sh to list of included files
self._add_unique_file({
"path": "/tc_script.sh",
"contents": contents,
"mode": "755"
})
|
def _generate_contents(self, tar):
"""
Adds configuration files to tarfile instance.
:param tar: tarfile instance
:returns: None
"""
uci = self.render(files=False)
# create a list with all the packages (and remove empty entries)
packages = re.split('package ', uci)
if '' in packages:
packages.remove('')
# create a file for each configuration package used
for package in packages:
lines = package.split('\n')
package_name = lines[0]
text_contents = '\n'.join(lines[2:])
text_contents = 'package {0}\n\n{1}'.format(package_name, text_contents)
self._add_file(tar=tar,
name='uci/{0}.conf'.format(package_name),
contents=text_contents)
# prepare template context for install and uninstall scripts
template_context = self._get_install_context()
# add install.sh to included files
self._add_install(template_context)
# add uninstall.sh to included files
self._add_uninstall(template_context)
# add vpn up and down scripts
self._add_openvpn_scripts()
# add tc_script
self._add_tc_script()
|
def render(self):
"""
Renders configuration by using the jinja2 templating engine
"""
# get jinja2 template
template_name = '{0}.jinja2'.format(self.get_name())
template = self.template_env.get_template(template_name)
# render template and cleanup
context = getattr(self.backend, 'intermediate_data', {})
output = template.render(data=context)
return self.cleanup(output)
|
def __intermediate_addresses(self, interface):
"""
converts NetJSON address to
UCI intermediate data structure
"""
address_list = self.get_copy(interface, 'addresses')
# do not ignore interfaces if they do not contain any address
if not address_list:
return [{'proto': 'none'}]
result = []
static = {}
dhcp = []
for address in address_list:
family = address.get('family')
# dhcp
if address['proto'] == 'dhcp':
address['proto'] = 'dhcp' if family == 'ipv4' else 'dhcpv6'
dhcp.append(self.__intermediate_address(address))
continue
if 'gateway' in address:
uci_key = 'gateway' if family == 'ipv4' else 'ip6gw'
interface[uci_key] = address['gateway']
# static
address_key = 'ipaddr' if family == 'ipv4' else 'ip6addr'
static.setdefault(address_key, [])
static[address_key].append('{address}/{mask}'.format(**address))
static.update(self.__intermediate_address(address))
if static:
# do not use CIDR notation when using a single ipv4
# see https://github.com/openwisp/netjsonconfig/issues/54
if len(static.get('ipaddr', [])) == 1:
network = ip_interface(six.text_type(static['ipaddr'][0]))
static['ipaddr'] = str(network.ip)
static['netmask'] = str(network.netmask)
# do not use lists when using a single ipv6 address
# (avoids to change output of existing configuration)
if len(static.get('ip6addr', [])) == 1:
static['ip6addr'] = static['ip6addr'][0]
result.append(static)
if dhcp:
result += dhcp
return result
|
def __intermediate_interface(self, interface, uci_name):
"""
converts NetJSON interface to
UCI intermediate data structure
"""
interface.update({
'.type': 'interface',
'.name': uci_name,
'ifname': interface.pop('name')
})
if 'network' in interface:
del interface['network']
if 'mac' in interface:
# mac address of wireless interface must
# be set in /etc/config/wireless, therfore
# we can skip this in /etc/config/network
if interface.get('type') != 'wireless':
interface['macaddr'] = interface['mac']
del interface['mac']
if 'autostart' in interface:
interface['auto'] = interface['autostart']
del interface['autostart']
if 'disabled' in interface:
interface['enabled'] = not interface['disabled']
del interface['disabled']
if 'wireless' in interface:
del interface['wireless']
if 'addresses' in interface:
del interface['addresses']
return interface
|
def __intermediate_address(self, address):
"""
deletes NetJSON address keys
"""
for key in self._address_keys:
if key in address:
del address[key]
return address
|
def __intermediate_bridge(self, interface, i):
"""
converts NetJSON bridge to
UCI intermediate data structure
"""
# ensure type "bridge" is only given to one logical interface
if interface['type'] == 'bridge' and i < 2:
bridge_members = ' '.join(interface.pop('bridge_members'))
# put bridge members in ifname attribute
if bridge_members:
interface['ifname'] = bridge_members
# if no members, this is an empty bridge
else:
interface['bridge_empty'] = True
del interface['ifname']
# bridge has already been defined
# but we need to add more references to it
elif interface['type'] == 'bridge' and i >= 2:
# openwrt adds "br-" prefix to bridge interfaces
# we need to take this into account when referring
# to these physical names
if 'br-' not in interface['ifname']:
interface['ifname'] = 'br-{ifname}'.format(**interface)
# do not repeat bridge attributes (they have already been processed)
for attr in ['type', 'bridge_members', 'stp', 'gateway']:
if attr in interface:
del interface[attr]
elif interface['type'] != 'bridge':
del interface['type']
return interface
|
def __intermediate_proto(self, interface, address):
"""
determines UCI interface "proto" option
"""
# proto defaults to static
address_proto = address.pop('proto', 'static')
if 'proto' not in interface:
return address_proto
else:
# allow override on interface level
return interface.pop('proto')
|
def __intermediate_dns_servers(self, uci, address):
"""
determines UCI interface "dns" option
"""
# allow override
if 'dns' in uci:
return uci['dns']
# ignore if using DHCP or if "proto" is none
if address['proto'] in ['dhcp', 'dhcpv6', 'none']:
return None
dns = self.netjson.get('dns_servers', None)
if dns:
return ' '.join(dns)
|
def __intermediate_dns_search(self, uci, address):
"""
determines UCI interface "dns_search" option
"""
# allow override
if 'dns_search' in uci:
return uci['dns_search']
# ignore if "proto" is none
if address['proto'] == 'none':
return None
dns_search = self.netjson.get('dns_search', None)
if dns_search:
return ' '.join(dns_search)
|
def _list_errors(e):
"""
Returns a list of violated schema fragments and related error messages
:param e: ``jsonschema.exceptions.ValidationError`` instance
"""
error_list = []
for value, error in zip(e.validator_value, e.context):
error_list.append((value, error.message))
if error.context:
error_list += _list_errors(error)
return error_list
|
def __intermediate_hwmode(self, radio):
"""
possible return values are: 11a, 11b, 11g
"""
protocol = radio['protocol']
if protocol in ['802.11a', '802.11b', '802.11g']:
# return 11a, 11b or 11g
return protocol[4:]
# determine hwmode depending on channel used
if radio['channel'] is 0:
# when using automatic channel selection, we need an
# additional parameter to determine the frequency band
return radio.get('hwmode')
elif radio['channel'] <= 13:
return '11g'
else:
return '11a'
|
def __intermediate_htmode(self, radio):
"""
only for mac80211 driver
"""
protocol = radio.pop('protocol')
channel_width = radio.pop('channel_width')
# allow overriding htmode
if 'htmode' in radio:
return radio['htmode']
if protocol == '802.11n':
return 'HT{0}'.format(channel_width)
elif protocol == '802.11ac':
return 'VHT{0}'.format(channel_width)
# disables n
return 'NONE'
|
def __netjson_protocol(self, radio):
"""
determines NetJSON protocol radio attribute
"""
htmode = radio.get('htmode')
hwmode = radio.get('hwmode', None)
if htmode.startswith('HT'):
return '802.11n'
elif htmode.startswith('VHT'):
return '802.11ac'
return '802.{0}'.format(hwmode)
|
def __netjson_channel_width(self, radio):
"""
determines NetJSON channel_width radio attribute
"""
htmode = radio.pop('htmode')
if htmode == 'NONE':
return 20
channel_width = htmode.replace('VHT', '').replace('HT', '')
# we need to override htmode
if '+' in channel_width or '-' in channel_width:
radio['htmode'] = htmode
channel_width = channel_width[0:-1]
return int(channel_width)
|
def cleanup(self, output):
"""
Generates consistent OpenWRT/LEDE UCI output
"""
# correct indentation
output = output.replace(' ', '')\
.replace('\noption', '\n\toption')\
.replace('\nlist', '\n\tlist')
# convert True to 1 and False to 0
output = output.replace('True', '1')\
.replace('False', '0')
# max 2 consecutive \n delimiters
output = output.replace('\n\n\n', '\n\n')
# if output is present
# ensure it always ends with 1 new line
if output.endswith('\n\n'):
return output[0:-1]
return output
|
def _generate_contents(self, tar):
"""
Adds configuration files to tarfile instance.
:param tar: tarfile instance
:returns: None
"""
text = self.render(files=False)
# create a list with all the packages (and remove empty entries)
vpn_instances = vpn_pattern.split(text)
if '' in vpn_instances:
vpn_instances.remove('')
# create a file for each VPN
for vpn in vpn_instances:
lines = vpn.split('\n')
vpn_name = lines[0]
text_contents = '\n'.join(lines[2:])
# do not end with double new line
if text_contents.endswith('\n\n'):
text_contents = text_contents[0:-1]
self._add_file(tar=tar,
name='{0}{1}'.format(vpn_name, config_suffix),
contents=text_contents)
|
def auto_client(cls, host, server, ca_path=None, ca_contents=None,
cert_path=None, cert_contents=None, key_path=None,
key_contents=None):
"""
Returns a configuration dictionary representing an OpenVPN client configuration
that is compatible with the passed server configuration.
:param host: remote VPN server
:param server: dictionary representing a single OpenVPN server configuration
:param ca_path: optional string representing path to CA, will consequently add
a file in the resulting configuration dictionary
:param ca_contents: optional string representing contents of CA file
:param cert_path: optional string representing path to certificate, will consequently add
a file in the resulting configuration dictionary
:param cert_contents: optional string representing contents of cert file
:param key_path: optional string representing path to key, will consequently add
a file in the resulting configuration dictionary
:param key_contents: optional string representing contents of key file
:returns: dictionary representing a single OpenVPN client configuration
"""
# client defaults
client = {
"mode": "p2p",
"nobind": True,
"resolv_retry": "infinite",
"tls_client": True
}
# remote
port = server.get('port') or 1195
client['remote'] = [{'host': host, 'port': port}]
# proto
if server.get('proto') == 'tcp-server':
client['proto'] = 'tcp-client'
else:
client['proto'] = 'udp'
# determine if pull must be True
if 'server' in server or 'server_bridge' in server:
client['pull'] = True
# tls_client
if 'tls_server' not in server or not server['tls_server']:
client['tls_client'] = False
# ns_cert_type
ns_cert_type = {None: '',
'': '',
'client': 'server'}
client['ns_cert_type'] = ns_cert_type[server.get('ns_cert_type')]
# remote_cert_tls
remote_cert_tls = {None: '',
'': '',
'client': 'server'}
client['remote_cert_tls'] = remote_cert_tls[server.get('remote_cert_tls')]
copy_keys = ['name', 'dev_type', 'dev', 'comp_lzo', 'auth',
'cipher', 'ca', 'cert', 'key', 'pkcs12', 'mtu_disc', 'mtu_test',
'fragment', 'mssfix', 'keepalive', 'persist_tun', 'mute',
'persist_key', 'script_security', 'user', 'group', 'log',
'mute_replay_warnings', 'secret', 'reneg_sec', 'tls_timeout',
'tls_cipher', 'float', 'fast_io', 'verb']
for key in copy_keys:
if key in server:
client[key] = server[key]
files = cls._auto_client_files(client, ca_path, ca_contents,
cert_path, cert_contents,
key_path, key_contents)
return {
'openvpn': [client],
'files': files
}
|
def _auto_client_files(cls, client, ca_path=None, ca_contents=None, cert_path=None,
cert_contents=None, key_path=None, key_contents=None):
"""
returns a list of NetJSON extra files for automatically generated clients
produces side effects in ``client`` dictionary
"""
files = []
if ca_path and ca_contents:
client['ca'] = ca_path
files.append(dict(path=ca_path,
contents=ca_contents,
mode=DEFAULT_FILE_MODE))
if cert_path and cert_contents:
client['cert'] = cert_path
files.append(dict(path=cert_path,
contents=cert_contents,
mode=DEFAULT_FILE_MODE))
if key_path and key_contents:
client['key'] = key_path
files.append(dict(path=key_path,
contents=key_contents,
mode=DEFAULT_FILE_MODE,))
return files
|
def get_install_requires():
"""
parse requirements.txt, ignore links, exclude comments
"""
requirements = []
for line in open('requirements.txt').readlines():
# skip to next iteration if comment or empty line
if line.startswith('#') or line == '' or line.startswith('http') or line.startswith('git'):
continue
# add line to requirements
requirements.append(line.replace('\n', ''))
# add py2-ipaddress if python2
if sys.version_info.major < 3:
requirements.append('py2-ipaddress')
return requirements
|
def events(self, **kwargs):
"""Get all events for this report. Additional arguments may also be
specified that will be passed to the query function.
"""
return self.__api.events(query=EqualsOperator("report", self.hash_),
**kwargs)
|
def facts(self, **kwargs):
"""Get all facts of this node. Additional arguments may also be
specified that will be passed to the query function.
"""
return self.__api.facts(query=EqualsOperator("certname", self.name),
**kwargs)
|
def fact(self, name):
"""Get a single fact from this node."""
facts = self.facts(name=name)
return next(fact for fact in facts)
|
def resources(self, type_=None, title=None, **kwargs):
"""Get all resources of this node or all resources of the specified
type. Additional arguments may also be specified that will be passed
to the query function.
"""
if type_ is None:
resources = self.__api.resources(
query=EqualsOperator("certname", self.name),
**kwargs)
elif type_ is not None and title is None:
resources = self.__api.resources(
type_=type_,
query=EqualsOperator("certname", self.name),
**kwargs)
else:
resources = self.__api.resources(
type_=type_,
title=title,
query=EqualsOperator("certname", self.name),
**kwargs)
return resources
|
def resource(self, type_, title, **kwargs):
"""Get a resource matching the supplied type and title. Additional
arguments may also be specified that will be passed to the query
function.
"""
resources = self.__api.resources(
type_=type_,
title=title,
query=EqualsOperator("certname", self.name),
**kwargs)
return next(resource for resource in resources)
|
def reports(self, **kwargs):
"""Get all reports for this node. Additional arguments may also be
specified that will be passed to the query function.
"""
return self.__api.reports(
query=EqualsOperator("certname", self.name),
**kwargs)
|
def base_url(self):
"""A base_url that will be used to construct the final
URL we're going to query against.
:returns: A URL of the form: ``proto://host:port``.
:rtype: :obj:`string`
"""
return '{proto}://{host}:{port}{url_path}'.format(
proto=self.protocol,
host=self.host,
port=self.port,
url_path=self.url_path,
)
|
def _url(self, endpoint, path=None):
"""The complete URL we will end up querying. Depending on the
endpoint we pass in this will result in different URL's with
different prefixes.
:param endpoint: The PuppetDB API endpoint we want to query.
:type endpoint: :obj:`string`
:param path: An additional path if we don't wish to query the\
bare endpoint.
:type path: :obj:`string`
:returns: A URL constructed from :func:`base_url` with the\
apropraite API version/prefix and the rest of the path added\
to it.
:rtype: :obj:`string`
"""
log.debug('_url called with endpoint: {0} and path: {1}'.format(
endpoint, path))
try:
endpoint = ENDPOINTS[endpoint]
except KeyError:
# If we reach this we're trying to query an endpoint that doesn't
# exist. This shouldn't happen unless someone made a booboo.
raise APIError
url = '{base_url}/{endpoint}'.format(
base_url=self.base_url,
endpoint=endpoint,
)
if path is not None:
url = '{0}/{1}'.format(url, quote(path))
return url
|
def _query(self, endpoint, path=None, query=None,
order_by=None, limit=None, offset=None, include_total=False,
summarize_by=None, count_by=None, count_filter=None,
request_method='GET'):
"""This method actually querries PuppetDB. Provided an endpoint and an
optional path and/or query it will fire a request at PuppetDB. If
PuppetDB can be reached and answers within the timeout we'll decode
the response and give it back or raise for the HTTP Status Code
PuppetDB gave back.
:param endpoint: The PuppetDB API endpoint we want to query.
:type endpoint: :obj:`string`
:param path: An additional path if we don't wish to query the\
bare endpoint.
:type path: :obj:`string`
:param query: (optional) A query to further narrow down the resultset.
:type query: :obj:`string`
:param order_by: (optional) Set the order parameters for the resultset.
:type order_by: :obj:`string`
:param limit: (optional) Tell PuppetDB to limit it's response to this\
number of objects.
:type limit: :obj:`int`
:param offset: (optional) Tell PuppetDB to start it's response from\
the given offset. This is useful for implementing pagination\
but is not supported just yet.
:type offset: :obj:`string`
:param include_total: (optional) Include the total number of results
:type order_by: :obj:`bool`
:param summarize_by: (optional) Specify what type of object you'd like\
to see counts at the event-counts and aggregate-event-counts \
endpoints
:type summarize_by: :obj:`string`
:param count_by: (optional) Specify what type of object is counted
:type count_by: :obj:`string`
:param count_filter: (optional) Specify a filter for the results
:type count_filter: :obj:`string`
:raises: :class:`~pypuppetdb.errors.EmptyResponseError`
:returns: The decoded response from PuppetDB
:rtype: :obj:`dict` or :obj:`list`
"""
log.debug('_query called with endpoint: {0}, path: {1}, query: {2}, '
'limit: {3}, offset: {4}, summarize_by {5}, count_by {6}, '
'count_filter: {7}'.format(endpoint, path, query, limit,
offset, summarize_by, count_by,
count_filter))
url = self._url(endpoint, path=path)
payload = {}
if query is not None:
payload['query'] = query
if order_by is not None:
payload[PARAMETERS['order_by']] = order_by
if limit is not None:
payload['limit'] = limit
if include_total is True:
payload[PARAMETERS['include_total']] = \
json.dumps(include_total)
if offset is not None:
payload['offset'] = offset
if summarize_by is not None:
payload[PARAMETERS['summarize_by']] = summarize_by
if count_by is not None:
payload[PARAMETERS['count_by']] = count_by
if count_filter is not None:
payload[PARAMETERS['counts_filter']] = count_filter
if not (payload):
payload = None
if not self.token:
auth = (self.username, self.password)
else:
auth = None
try:
if request_method.upper() == 'GET':
r = self._session.get(url, params=payload,
verify=self.ssl_verify,
cert=(self.ssl_cert, self.ssl_key),
timeout=self.timeout,
auth=auth)
elif request_method.upper() == 'POST':
r = self._session.post(url,
data=json.dumps(payload, default=str),
verify=self.ssl_verify,
cert=(self.ssl_cert, self.ssl_key),
timeout=self.timeout,
auth=auth)
else:
log.error("Only GET or POST supported, {0} unsupported".format(
request_method))
raise APIError
r.raise_for_status()
# get total number of results if requested with include-total
# just a quick hack - needs improvement
if 'X-Records' in r.headers:
self.last_total = r.headers['X-Records']
else:
self.last_total = None
json_body = r.json()
if json_body is not None:
return json_body
else:
del json_body
raise EmptyResponseError
except requests.exceptions.Timeout:
log.error("{0} {1}:{2} over {3}.".format(ERROR_STRINGS['timeout'],
self.host, self.port,
self.protocol.upper()))
raise
except requests.exceptions.ConnectionError:
log.error("{0} {1}:{2} over {3}.".format(ERROR_STRINGS['refused'],
self.host, self.port,
self.protocol.upper()))
raise
except requests.exceptions.HTTPError as err:
log.error("{0} {1}:{2} over {3}.".format(err.response.text,
self.host, self.port,
self.protocol.upper()))
raise
|
def nodes(self, unreported=2, with_status=False, **kwargs):
"""Query for nodes by either name or query. If both aren't
provided this will return a list of all nodes. This method
also fetches the nodes status and event counts of the latest
report from puppetdb.
:param with_status: (optional) include the node status in the\
returned nodes
:type with_status: :bool:
:param unreported: (optional) amount of hours when a node gets
marked as unreported
:type unreported: :obj:`None` or integer
:param \*\*kwargs: The rest of the keyword arguments are passed
to the _query function
:returns: A generator yieling Nodes.
:rtype: :class:`pypuppetdb.types.Node`
"""
nodes = self._query('nodes', **kwargs)
now = datetime.datetime.utcnow()
# If we happen to only get one node back it
# won't be inside a list so iterating over it
# goes boom. Therefor we wrap a list around it.
if type(nodes) == dict:
nodes = [nodes, ]
if with_status:
latest_events = self.event_counts(
query=EqualsOperator("latest_report?", True),
summarize_by='certname'
)
for node in nodes:
node['status_report'] = None
node['events'] = None
if with_status:
status = [s for s in latest_events
if s['subject']['title'] == node['certname']]
try:
node['status_report'] = node['latest_report_status']
if status:
node['events'] = status[0]
except KeyError:
if status:
node['events'] = status = status[0]
if status['successes'] > 0:
node['status_report'] = 'changed'
if status['noops'] > 0:
node['status_report'] = 'noop'
if status['failures'] > 0:
node['status_report'] = 'failed'
else:
node['status_report'] = 'unchanged'
# node report age
if node['report_timestamp'] is not None:
try:
last_report = json_to_datetime(
node['report_timestamp'])
last_report = last_report.replace(tzinfo=None)
unreported_border = now - timedelta(hours=unreported)
if last_report < unreported_border:
delta = (now - last_report)
node['unreported'] = True
node['unreported_time'] = '{0}d {1}h {2}m'.format(
delta.days,
int(delta.seconds / 3600),
int((delta.seconds % 3600) / 60)
)
except AttributeError:
node['unreported'] = True
if not node['report_timestamp']:
node['unreported'] = True
yield Node(self,
name=node['certname'],
deactivated=node['deactivated'],
expired=node['expired'],
report_timestamp=node['report_timestamp'],
catalog_timestamp=node['catalog_timestamp'],
facts_timestamp=node['facts_timestamp'],
status_report=node['status_report'],
noop=node.get('latest_report_noop'),
noop_pending=node.get('latest_report_noop_pending'),
events=node['events'],
unreported=node.get('unreported'),
unreported_time=node.get('unreported_time'),
report_environment=node['report_environment'],
catalog_environment=node['catalog_environment'],
facts_environment=node['facts_environment'],
latest_report_hash=node.get('latest_report_hash'),
cached_catalog_status=node.get('cached_catalog_status')
)
|
def node(self, name):
"""Gets a single node from PuppetDB.
:param name: The name of the node search.
:type name: :obj:`string`
:return: An instance of Node
:rtype: :class:`pypuppetdb.types.Node`
"""
nodes = self.nodes(path=name)
return next(node for node in nodes)
|
def edges(self, **kwargs):
"""Get the known catalog edges, formed between two resources.
:param \*\*kwargs: The rest of the keyword arguments are passed
to the _query function.
:returns: A generating yielding Edges.
:rtype: :class:`pypuppetdb.types.Edge`
"""
edges = self._query('edges', **kwargs)
for edge in edges:
identifier_source = edge['source_type'] + \
'[' + edge['source_title'] + ']'
identifier_target = edge['target_type'] + \
'[' + edge['target_title'] + ']'
yield Edge(source=self.resources[identifier_source],
target=self.resources[identifier_target],
relationship=edge['relationship'],
node=edge['certname'])
|
def facts(self, name=None, value=None, **kwargs):
"""Query for facts limited by either name, value and/or query.
:param name: (Optional) Only return facts that match this name.
:type name: :obj:`string`
:param value: (Optional) Only return facts of `name` that\
match this value. Use of this parameter requires the `name`\
parameter be set.
:type value: :obj:`string`
:param \*\*kwargs: The rest of the keyword arguments are passed
to the _query function
:returns: A generator yielding Facts.
:rtype: :class:`pypuppetdb.types.Fact`
"""
if name is not None and value is not None:
path = '{0}/{1}'.format(name, value)
elif name is not None and value is None:
path = name
else:
path = None
facts = self._query('facts', path=path, **kwargs)
for fact in facts:
yield Fact(
node=fact['certname'],
name=fact['name'],
value=fact['value'],
environment=fact['environment']
)
|
def resources(self, type_=None, title=None, **kwargs):
"""Query for resources limited by either type and/or title or query.
This will yield a Resources object for every returned resource.
:param type_: (Optional) The resource type. This can be any resource
type referenced in\
'https://docs.puppetlabs.com/references/latest/type.html'
:type type_: :obj:`string`
:param title: (Optional) The name of the resource as declared as the
'namevar' in the Puppet Manifests. This parameter requires the\
`type_` parameter be set.
:type title: :obj:`string`
:param \*\*kwargs: The rest of the keyword arguments are passed
to the _query function
:returns: A generator yielding Resources
:rtype: :class:`pypuppetdb.types.Resource`
"""
path = None
if type_ is not None:
type_ = self._normalize_resource_type(type_)
if title is not None:
path = '{0}/{1}'.format(type_, title)
elif title is None:
path = type_
resources = self._query('resources', path=path, **kwargs)
for resource in resources:
yield Resource(
node=resource['certname'],
name=resource['title'],
type_=resource['type'],
tags=resource['tags'],
exported=resource['exported'],
sourcefile=resource['file'],
sourceline=resource['line'],
parameters=resource['parameters'],
environment=resource['environment'],
)
|
def catalog(self, node):
"""Get the available catalog for a given node.
:param node: (Required) The name of the PuppetDB node.
:type: :obj:`string`
:returns: An instance of Catalog
:rtype: :class:`pypuppetdb.types.Catalog`
"""
catalogs = self.catalogs(path=node)
return next(x for x in catalogs)
|
def catalogs(self, **kwargs):
"""Get the catalog information from the infrastructure based on path
and/or query results. It is strongly recommended to include query
and/or paging parameters for this endpoint to prevent large result
sets or PuppetDB performance bottlenecks.
:param \*\*kwargs: The rest of the keyword arguments are passed
to the _query function.
:returns: A generator yielding Catalogs
:rtype: :class:`pypuppetdb.types.Catalog`
"""
catalogs = self._query('catalogs', **kwargs)
if type(catalogs) == dict:
catalogs = [catalogs, ]
for catalog in catalogs:
yield Catalog(node=catalog['certname'],
edges=catalog['edges']['data'],
resources=catalog['resources']['data'],
version=catalog['version'],
transaction_uuid=catalog['transaction_uuid'],
environment=catalog['environment'],
code_id=catalog.get('code_id'),
catalog_uuid=catalog.get('catalog_uuid'))
|
def events(self, **kwargs):
"""A report is made up of events which can be queried either
individually or based on their associated report hash. It is strongly
recommended to include query and/or paging parameters for this
endpoint to prevent large result sets or PuppetDB performance
bottlenecks.
:param \*\*kwargs: The rest of the keyword arguments are passed
to the _query function
:returns: A generator yielding Events
:rtype: :class:`pypuppetdb.types.Event`
"""
events = self._query('events', **kwargs)
for event in events:
yield Event(
node=event['certname'],
status=event['status'],
timestamp=event['timestamp'],
hash_=event['report'],
title=event['resource_title'],
property_=event['property'],
message=event['message'],
new_value=event['new_value'],
old_value=event['old_value'],
type_=event['resource_type'],
class_=event['containing_class'],
execution_path=event['containment_path'],
source_file=event['file'],
line_number=event['line'],
)
|
def aggregate_event_counts(self, summarize_by, query=None,
count_by=None, count_filter=None):
"""Get event counts from puppetdb aggregated into a single map.
:param summarize_by: (Required) The object type to be counted on.
Valid values are 'containing_class', 'resource'
and 'certname' or any comma-separated value
thereof.
:type summarize_by: :obj:`string`
:param query: (Optional) The PuppetDB query to filter the results.
This query is passed to the `events` endpoint.
:type query: :obj:`string`
:param count_by: (Optional) The object type that is counted when
building the counts of 'successes', 'failures',
'noops' and 'skips'. Support values are 'certname'
and 'resource' (default)
:type count_by: :obj:`string`
:param count_filter: (Optional) A JSON query that is applied to the
event-counts output but before the results are
aggregated. Supported operators are `=`, `>`,
`<`, `>=`, and `<=`. Supported fields are
`failures`, `successes`, `noops`, and `skips`.
:type count_filter: :obj:`string`
:returns: A dictionary of name/value results.
:rtype: :obj:`dict`
"""
return self._query('aggregate-event-counts',
query=query, summarize_by=summarize_by,
count_by=count_by, count_filter=count_filter)
|
def reports(self, **kwargs):
"""Get reports for our infrastructure. It is strongly recommended
to include query and/or paging parameters for this endpoint to
prevent large result sets and potential PuppetDB performance
bottlenecks.
:param \*\*kwargs: The rest of the keyword arguments are passed
to the _query function
:returns: A generating yielding Reports
:rtype: :class:`pypuppetdb.types.Report`
"""
reports = self._query('reports', **kwargs)
for report in reports:
yield Report(
api=self,
node=report['certname'],
hash_=report['hash'],
start=report['start_time'],
end=report['end_time'],
received=report['receive_time'],
version=report['configuration_version'],
format_=report['report_format'],
agent_version=report['puppet_version'],
transaction=report['transaction_uuid'],
environment=report['environment'],
status=report['status'],
noop=report.get('noop'),
noop_pending=report.get('noop_pending'),
metrics=report['metrics']['data'],
logs=report['logs']['data'],
code_id=report.get('code_id'),
catalog_uuid=report.get('catalog_uuid'),
cached_catalog_status=report.get('cached_catalog_status')
)
|
def inventory(self, **kwargs):
"""Get Node and Fact information with an alternative query syntax
for structured facts instead of using the facts, fact-contents and
factsets endpoints for many fact-related queries.
:param \*\*kwargs: The rest of the keyword arguments are passed
to the _query function.
:returns: A generator yielding Inventory
:rtype: :class:`pypuppetdb.types.Inventory`
"""
inventory = self._query('inventory', **kwargs)
for inv in inventory:
yield Inventory(
node=inv['certname'],
time=inv['timestamp'],
environment=inv['environment'],
facts=inv['facts'],
trusted=inv['trusted']
)
|
def versioncmp(v1, v2):
"""Compares two objects, x and y, and returns an integer according to the
outcome. The return value is negative if x < y, zero if x == y and
positive if x > y.
:param v1: The first object to compare.
:param v2: The second object to compare.
:returns: -1, 0 or 1.
:rtype: :obj:`int`
"""
def normalize(v):
"""Removes leading zeroes from right of a decimal point from v and
returns an array of values separated by '.'
:param v: The data to normalize.
:returns: An list representation separated by '.' with all leading
zeroes stripped.
:rtype: :obj:`list`
"""
return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")]
try:
return cmp(normalize(v1), normalize(v2))
except NameError:
return (normalize(v1) > normalize(v2)) - (
normalize(v1) < normalize(v2))
|
def connect(host='localhost', port=8080, ssl_verify=False, ssl_key=None,
ssl_cert=None, timeout=10, protocol=None, url_path='/',
username=None, password=None, token=None):
"""Connect with PuppetDB. This will return an object allowing you
to query the API through its methods.
:param host: (Default: 'localhost;) Hostname or IP of PuppetDB.
:type host: :obj:`string`
:param port: (Default: '8080') Port on which to talk to PuppetDB.
:type port: :obj:`int`
:param ssl_verify: (optional) Verify PuppetDB server certificate.
:type ssl_verify: :obj:`bool` or :obj:`string` True, False or filesystem \
path to CA certificate.
:param ssl_key: (optional) Path to our client secret key.
:type ssl_key: :obj:`None` or :obj:`string` representing a filesystem\
path.
:param ssl_cert: (optional) Path to our client certificate.
:type ssl_cert: :obj:`None` or :obj:`string` representing a filesystem\
path.
:param timeout: (Default: 10) Number of seconds to wait for a response.
:type timeout: :obj:`int`
:param protocol: (optional) Explicitly specify the protocol to be used
(especially handy when using HTTPS with ssl_verify=False and
without certs)
:type protocol: :obj:`None` or :obj:`string`
:param url_path: (Default: '/') The URL path where PuppetDB is served
:type url_path: :obj:`None` or :obj:`string`
:param username: (optional) The username to use for HTTP basic
authentication
:type username: :obj:`None` or :obj:`string`
:param password: (optional) The password to use for HTTP basic
authentication
:type password: :obj:`None` or :obj:`string`
:param token: (optional) The x-auth token to use for X-Authentication
:type token: :obj:`None` or :obj:`string`
"""
return BaseAPI(host=host, port=port,
timeout=timeout, ssl_verify=ssl_verify, ssl_key=ssl_key,
ssl_cert=ssl_cert, protocol=protocol, url_path=url_path,
username=username, password=password, token=token)
|
def collection_callback(result=None):
"""
:type result: opendnp3.CommandPointResult
"""
print("Header: {0} | Index: {1} | State: {2} | Status: {3}".format(
result.headerIndex,
result.index,
opendnp3.CommandPointStateToString(result.state),
opendnp3.CommandStatusToString(result.status)
))
|
def command_callback(result=None):
"""
:type result: opendnp3.ICommandTaskResult
"""
print("Received command result with summary: {}".format(opendnp3.TaskCompletionToString(result.summary)))
result.ForeachItem(collection_callback)
|
def main():
"""The Master has been started from the command line. Execute ad-hoc tests if desired."""
# app = MyMaster()
app = MyMaster(log_handler=MyLogger(),
listener=AppChannelListener(),
soe_handler=SOEHandler(),
master_application=MasterApplication())
_log.debug('Initialization complete. In command loop.')
# Ad-hoc tests can be performed at this point. See master_cmd.py for examples.
app.shutdown()
_log.debug('Exiting.')
exit()
|
def send_direct_operate_command(self, command, index, callback=asiodnp3.PrintingCommandCallback.Get(),
config=opendnp3.TaskConfig().Default()):
"""
Direct operate a single command
:param command: command to operate
:param index: index of the command
:param callback: callback that will be invoked upon completion or failure
:param config: optional configuration that controls normal callbacks and allows the user to be specified for SA
"""
self.master.DirectOperate(command, index, callback, config)
|
def send_direct_operate_command_set(self, command_set, callback=asiodnp3.PrintingCommandCallback.Get(),
config=opendnp3.TaskConfig().Default()):
"""
Direct operate a set of commands
:param command_set: set of command headers
:param callback: callback that will be invoked upon completion or failure
:param config: optional configuration that controls normal callbacks and allows the user to be specified for SA
"""
self.master.DirectOperate(command_set, callback, config)
|
def send_select_and_operate_command(self, command, index, callback=asiodnp3.PrintingCommandCallback.Get(),
config=opendnp3.TaskConfig().Default()):
"""
Select and operate a single command
:param command: command to operate
:param index: index of the command
:param callback: callback that will be invoked upon completion or failure
:param config: optional configuration that controls normal callbacks and allows the user to be specified for SA
"""
self.master.SelectAndOperate(command, index, callback, config)
|
def send_select_and_operate_command_set(self, command_set, callback=asiodnp3.PrintingCommandCallback.Get(),
config=opendnp3.TaskConfig().Default()):
"""
Select and operate a set of commands
:param command_set: set of command headers
:param callback: callback that will be invoked upon completion or failure
:param config: optional configuration that controls normal callbacks and allows the user to be specified for SA
"""
self.master.SelectAndOperate(command_set, callback, config)
|
def Process(self, info, values):
"""
Process measurement data.
:param info: HeaderInfo
:param values: A collection of values received from the Outstation (various data types are possible).
"""
visitor_class_types = {
opendnp3.ICollectionIndexedBinary: VisitorIndexedBinary,
opendnp3.ICollectionIndexedDoubleBitBinary: VisitorIndexedDoubleBitBinary,
opendnp3.ICollectionIndexedCounter: VisitorIndexedCounter,
opendnp3.ICollectionIndexedFrozenCounter: VisitorIndexedFrozenCounter,
opendnp3.ICollectionIndexedAnalog: VisitorIndexedAnalog,
opendnp3.ICollectionIndexedBinaryOutputStatus: VisitorIndexedBinaryOutputStatus,
opendnp3.ICollectionIndexedAnalogOutputStatus: VisitorIndexedAnalogOutputStatus,
opendnp3.ICollectionIndexedTimeAndInterval: VisitorIndexedTimeAndInterval
}
visitor_class = visitor_class_types[type(values)]
visitor = visitor_class()
values.Foreach(visitor)
for index, value in visitor.index_and_value:
log_string = 'SOEHandler.Process {0}\theaderIndex={1}\tdata_type={2}\tindex={3}\tvalue={4}'
_log.debug(log_string.format(info.gv, info.headerIndex, type(values).__name__, index, value))
|
def main():
"""The Outstation has been started from the command line. Execute ad-hoc tests if desired."""
app = OutstationApplication()
_log.debug('Initialization complete. In command loop.')
# Ad-hoc tests can be inserted here if desired. See outstation_cmd.py for examples.
app.shutdown()
_log.debug('Exiting.')
exit()
|
def configure_stack():
"""Set up the OpenDNP3 configuration."""
stack_config = asiodnp3.OutstationStackConfig(opendnp3.DatabaseSizes.AllTypes(10))
stack_config.outstation.eventBufferConfig = opendnp3.EventBufferConfig().AllTypes(10)
stack_config.outstation.params.allowUnsolicited = True
stack_config.link.LocalAddr = 10
stack_config.link.RemoteAddr = 1
stack_config.link.KeepAliveTimeout = openpal.TimeDuration().Max()
return stack_config
|
def configure_database(db_config):
"""
Configure the Outstation's database of input point definitions.
Configure two Analog points (group/variation 30.1) at indexes 1 and 2.
Configure two Binary points (group/variation 1.2) at indexes 1 and 2.
"""
db_config.analog[1].clazz = opendnp3.PointClass.Class2
db_config.analog[1].svariation = opendnp3.StaticAnalogVariation.Group30Var1
db_config.analog[1].evariation = opendnp3.EventAnalogVariation.Group32Var7
db_config.analog[2].clazz = opendnp3.PointClass.Class2
db_config.analog[2].svariation = opendnp3.StaticAnalogVariation.Group30Var1
db_config.analog[2].evariation = opendnp3.EventAnalogVariation.Group32Var7
db_config.binary[1].clazz = opendnp3.PointClass.Class2
db_config.binary[1].svariation = opendnp3.StaticBinaryVariation.Group1Var2
db_config.binary[1].evariation = opendnp3.EventBinaryVariation.Group2Var2
db_config.binary[2].clazz = opendnp3.PointClass.Class2
db_config.binary[2].svariation = opendnp3.StaticBinaryVariation.Group1Var2
db_config.binary[2].evariation = opendnp3.EventBinaryVariation.Group2Var2
|
def GetApplicationIIN(self):
"""Return the application-controlled IIN field."""
application_iin = opendnp3.ApplicationIIN()
application_iin.configCorrupt = False
application_iin.deviceTrouble = False
application_iin.localControl = False
application_iin.needTime = False
# Just for testing purposes, convert it to an IINField and display the contents of the two bytes.
iin_field = application_iin.ToIIN()
_log.debug('OutstationApplication.GetApplicationIIN: IINField LSB={}, MSB={}'.format(iin_field.LSB,
iin_field.MSB))
return application_iin
|
def process_point_value(cls, command_type, command, index, op_type):
"""
A PointValue was received from the Master. Process its payload.
:param command_type: (string) Either 'Select' or 'Operate'.
:param command: A ControlRelayOutputBlock or else a wrapped data value (AnalogOutputInt16, etc.).
:param index: (integer) DNP3 index of the payload's data definition.
:param op_type: An OperateType, or None if command_type == 'Select'.
"""
_log.debug('Processing received point value for index {}: {}'.format(index, command))
|
def apply_update(self, value, index):
"""
Record an opendnp3 data value (Analog, Binary, etc.) in the outstation's database.
The data value gets sent to the Master as a side-effect.
:param value: An instance of Analog, Binary, or another opendnp3 data value.
:param index: (integer) Index of the data definition in the opendnp3 database.
"""
_log.debug('Recording {} measurement, index={}, value={}'.format(type(value).__name__, index, value.value))
builder = asiodnp3.UpdateBuilder()
builder.Update(value, index)
update = builder.Build()
OutstationApplication.get_outstation().Apply(update)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.