code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def insert(table='filter', chain=None, position=None, rule=None, family='ipv4'):
'''
Insert a rule into the specified table/chain, at the specified position.
This function accepts a rule in a standard iptables command format,
starting with the chain. Trying to force users to adapt to a new
method of creating rules would be irritating at best, and we
already have a parser that can handle it.
If the position specified is a negative number, then the insert will be
performed counting from the end of the list. For instance, a position
of -1 will insert the rule as the second to last rule. To insert a rule
in the last position, use the append function instead.
CLI Examples:
.. code-block:: bash
salt '*' iptables.insert filter INPUT position=3 \\
rule='-m state --state RELATED,ESTABLISHED -j ACCEPT'
IPv6:
salt '*' iptables.insert filter INPUT position=3 \\
rule='-m state --state RELATED,ESTABLISHED -j ACCEPT' \\
family=ipv6
'''
if not chain:
return 'Error: Chain needs to be specified'
if not position:
return 'Error: Position needs to be specified or use append (-A)'
if not rule:
return 'Error: Rule needs to be specified'
if position < 0:
rules = get_rules(family=family)
size = len(rules[table][chain]['rules'])
position = (size + position) + 1
if position is 0:
position = 1
wait = '--wait' if _has_option('--wait', family) else ''
returnCheck = check(table, chain, rule, family)
if isinstance(returnCheck, bool) and returnCheck:
return False
cmd = '{0} {1} -t {2} -I {3} {4} {5}'.format(
_iptables_cmd(family), wait, table, chain, position, rule)
out = __salt__['cmd.run'](cmd)
return out | Insert a rule into the specified table/chain, at the specified position.
This function accepts a rule in a standard iptables command format,
starting with the chain. Trying to force users to adapt to a new
method of creating rules would be irritating at best, and we
already have a parser that can handle it.
If the position specified is a negative number, then the insert will be
performed counting from the end of the list. For instance, a position
of -1 will insert the rule as the second to last rule. To insert a rule
in the last position, use the append function instead.
CLI Examples:
.. code-block:: bash
salt '*' iptables.insert filter INPUT position=3 \\
rule='-m state --state RELATED,ESTABLISHED -j ACCEPT'
IPv6:
salt '*' iptables.insert filter INPUT position=3 \\
rule='-m state --state RELATED,ESTABLISHED -j ACCEPT' \\
family=ipv6 |
def dfs_iterative(graph, start, seen):
"""DFS, detect connected component, iterative implementation
:param graph: directed graph in listlist or listdict format
:param int node: to start graph exploration
:param boolean-table seen: will be set true for the connected component
containing node.
:complexity: `O(|V|+|E|)`
"""
seen[start] = True
to_visit = [start]
while to_visit:
node = to_visit.pop()
for neighbor in graph[node]:
if not seen[neighbor]:
seen[neighbor] = True
to_visit.append(neighbor) | DFS, detect connected component, iterative implementation
:param graph: directed graph in listlist or listdict format
:param int node: to start graph exploration
:param boolean-table seen: will be set true for the connected component
containing node.
:complexity: `O(|V|+|E|)` |
def intinlist(lst):
"""test if int in list"""
for item in lst:
try:
item = int(item)
return True
except ValueError:
pass
return False | test if int in list |
def set_target(self, target):
'''Set the target to use (one of buildozer.targets, such as "android")
'''
self.targetname = target
m = __import__('buildozer.targets.{0}'.format(target),
fromlist=['buildozer'])
self.target = m.get_target(self)
self.check_build_layout()
self.check_configuration_tokens() | Set the target to use (one of buildozer.targets, such as "android") |
def top_i_answer(self, i):
"""获取排名某一位的答案.
:param int i: 要获取的答案的排名
:return: 答案对象,能直接获取的属性参见answers方法
:rtype: Answer
"""
for j, a in enumerate(self.answers):
if j == i - 1:
return a | 获取排名某一位的答案.
:param int i: 要获取的答案的排名
:return: 答案对象,能直接获取的属性参见answers方法
:rtype: Answer |
def parse_date(my_date):
"""Parse a date into canonical format of datetime.dateime.
:param my_date: Either datetime.datetime or string in
'%Y-%m-%dT%H:%M:%SZ' format.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:return: A datetime.datetime.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Parse a date and make sure it has no time zone.
"""
if isinstance(my_date, datetime.datetime):
result = my_date
elif isinstance(my_date, str):
result = datetime.datetime.strptime(my_date, '%Y-%m-%dT%H:%M:%SZ')
else:
raise ValueError('Unexpected date format for "%s" of type "%s"' % (
str(my_date), type(my_date)))
assert result.tzinfo is None, 'Unexpected tzinfo for date %s' % (
result)
return result | Parse a date into canonical format of datetime.dateime.
:param my_date: Either datetime.datetime or string in
'%Y-%m-%dT%H:%M:%SZ' format.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:return: A datetime.datetime.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Parse a date and make sure it has no time zone. |
def guess_type_tag(self, input_bytes, filename):
""" Try to guess the type_tag for this sample """
mime_to_type = {'application/jar': 'jar',
'application/java-archive': 'jar',
'application/octet-stream': 'data',
'application/pdf': 'pdf',
'application/vnd.ms-cab-compressed': 'cab',
'application/vnd.ms-fontobject': 'ms_font',
'application/vnd.tcpdump.pcap': 'pcap',
'application/x-dosexec': 'exe',
'application/x-empty': 'empty',
'application/x-shockwave-flash': 'swf',
'application/xml': 'xml',
'application/zip': 'zip',
'image/gif': 'gif',
'text/html': 'html',
'image/jpeg': 'jpg',
'image/png': 'png',
'image/x-icon': 'icon',
'text/plain': 'txt'
}
# See what filemagic can determine
with magic.Magic(flags=magic.MAGIC_MIME_TYPE) as mag:
mime_type = mag.id_buffer(input_bytes[:1024])
if mime_type in mime_to_type:
type_tag = mime_to_type[mime_type]
# If we get 'data' back look at the filename
if type_tag == 'data':
print 'Info: File -- Trying to Determine Type from filename...'
ext = os.path.splitext(filename)[1][1:]
if ext in ['mem','vmem']:
type_tag = 'mem'
else:
print 'Alert: Failed to Determine Type for %s' % filename
exit(1) # Temp
return type_tag
else:
print 'Alert: Sample Type could not be Determined'
return 'unknown' | Try to guess the type_tag for this sample |
def get_var(var):
'''
Get the value of a variable in make.conf
Return the value of the variable or None if the variable is not in
make.conf
CLI Example:
.. code-block:: bash
salt '*' makeconf.get_var 'LINGUAS'
'''
makeconf = _get_makeconf()
# Open makeconf
with salt.utils.files.fopen(makeconf) as fn_:
conf_file = salt.utils.data.decode(fn_.readlines())
for line in conf_file:
if line.startswith(var):
ret = line.split('=', 1)[1]
if '"' in ret:
ret = ret.split('"')[1]
elif '#' in ret:
ret = ret.split('#')[0]
ret = ret.strip()
return ret
return None | Get the value of a variable in make.conf
Return the value of the variable or None if the variable is not in
make.conf
CLI Example:
.. code-block:: bash
salt '*' makeconf.get_var 'LINGUAS' |
def error(self):
if self._error is None:
try:
#__init is renamed to the class with an _
init = getattr(self, "_" + self.__class__.__name__ + "__init", None)
if init is not None and callable(init):
init()
except Exception as e:
pass
"""gets the error"""
return self._error | gets the error |
def percent_encode_non_ascii_headers(self, encoding='UTF-8'):
""" Encode any headers that are not plain ascii
as UTF-8 as per:
https://tools.ietf.org/html/rfc8187#section-3.2.3
https://tools.ietf.org/html/rfc5987#section-3.2.2
"""
def do_encode(m):
return "*={0}''".format(encoding) + quote(to_native_str(m.group(1)))
for index in range(len(self.headers) - 1, -1, -1):
curr_name, curr_value = self.headers[index]
try:
# test if header is ascii encodable, no action needed
curr_value.encode('ascii')
except:
new_value = self.ENCODE_HEADER_RX.sub(do_encode, curr_value)
if new_value == curr_value:
new_value = quote(curr_value)
self.headers[index] = (curr_name, new_value) | Encode any headers that are not plain ascii
as UTF-8 as per:
https://tools.ietf.org/html/rfc8187#section-3.2.3
https://tools.ietf.org/html/rfc5987#section-3.2.2 |
def getatom(self, atomends=None):
"""Parse an RFC 2822 atom.
Optional atomends specifies a different set of end token delimiters
(the default is to use self.atomends). This is used e.g. in
getphraselist() since phrase endings must not include the `.' (which
is legal in phrases)."""
atomlist = ['']
if atomends is None:
atomends = self.atomends
while self.pos < len(self.field):
if self.field[self.pos] in atomends:
break
else:
atomlist.append(self.field[self.pos])
self.pos += 1
return EMPTYSTRING.join(atomlist) | Parse an RFC 2822 atom.
Optional atomends specifies a different set of end token delimiters
(the default is to use self.atomends). This is used e.g. in
getphraselist() since phrase endings must not include the `.' (which
is legal in phrases). |
def get_nowait_from_queue(queue):
""" Collect all immediately available items from a queue """
data = []
for _ in range(queue.qsize()):
try:
data.append(queue.get_nowait())
except q.Empty:
break
return data | Collect all immediately available items from a queue |
def encipher(self,message):
"""Encipher string using M209 cipher according to initialised key. Punctuation and whitespace
are removed from the input.
Example (continuing from the example above)::
ciphertext = m.encipher(plaintext)
:param string: The string to encipher.
:returns: The enciphered string.
"""
message = self.remove_punctuation(message)
effective_ch = [0,0,0,0,0,0,0] # these are the wheels which are effective currently, 1 for yes, 0 no
# -the zero at the beginning is extra, indicates lug was in pos 0
ret = ''
# from now we no longer need the wheel starts, we can just increment the actual key
for j in range(len(message)):
shift = 0
effective_ch[0] = 0;
effective_ch[1] = self.wheel_1_settings[self.actual_key[0]]
effective_ch[2] = self.wheel_2_settings[self.actual_key[1]]
effective_ch[3] = self.wheel_3_settings[self.actual_key[2]]
effective_ch[4] = self.wheel_4_settings[self.actual_key[3]]
effective_ch[5] = self.wheel_5_settings[self.actual_key[4]]
effective_ch[6] = self.wheel_6_settings[self.actual_key[5]]
for i in range(0,27): # implements the cylindrical drum with lugs on it
if effective_ch[self.lug_positions[i][0]] or effective_ch[self.lug_positions[i][1]]: shift+=1
# shift has been found, now actually encrypt letter
ret += self.subst(message[j],key='ZYXWVUTSRQPONMLKJIHGFEDCBA',offset=-shift); # encrypt letter
self.advance_key(); # advance the key wheels
return ret | Encipher string using M209 cipher according to initialised key. Punctuation and whitespace
are removed from the input.
Example (continuing from the example above)::
ciphertext = m.encipher(plaintext)
:param string: The string to encipher.
:returns: The enciphered string. |
def circuit_to_latex_using_qcircuit(
circuit: circuits.Circuit,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT) -> str:
"""Returns a QCircuit-based latex diagram of the given circuit.
Args:
circuit: The circuit to represent in latex.
qubit_order: Determines the order of qubit wires in the diagram.
Returns:
Latex code for the diagram.
"""
diagram = circuit.to_text_diagram_drawer(
qubit_namer=qcircuit_qubit_namer,
qubit_order=qubit_order,
get_circuit_diagram_info=get_qcircuit_diagram_info)
return _render(diagram) | Returns a QCircuit-based latex diagram of the given circuit.
Args:
circuit: The circuit to represent in latex.
qubit_order: Determines the order of qubit wires in the diagram.
Returns:
Latex code for the diagram. |
def translate(self, closed_regex=True):
u"""
Returns a Python regular expression allowing to match
:return:
"""
if closed_regex:
return self.regex
else:
return translate(self.pattern, closed_regex=False, **self.flags) | u"""
Returns a Python regular expression allowing to match
:return: |
def correct_pairs(p, pf, tag):
"""
Take one pair of reads and correct to generate *.corr.fastq.
"""
from jcvi.assembly.preprocess import correct as cr
logging.debug("Work on {0} ({1})".format(pf, ','.join(p)))
itag = tag[0]
cm = ".".join((pf, itag))
targets = (cm + ".1.corr.fastq", cm + ".2.corr.fastq", \
pf + ".PE-0.corr.fastq")
if not need_update(p, targets):
logging.debug("Corrected reads found: {0}. Skipped.".format(targets))
return
slink(p, pf, tag)
cwd = os.getcwd()
os.chdir(pf)
cr(sorted(glob("*.fastq") + glob("*.fastq.gz")) + ["--nofragsdedup"])
sh("mv {0}.1.corr.fastq ../{1}".format(itag, targets[0]))
sh("mv {0}.2.corr.fastq ../{1}".format(itag, targets[1]))
sh("mv frag_reads_corr.corr.fastq ../{0}".format(targets[2]))
logging.debug("Correction finished: {0}".format(targets))
os.chdir(cwd) | Take one pair of reads and correct to generate *.corr.fastq. |
def show_version(a_device):
"""Execute show version command using Netmiko."""
remote_conn = ConnectHandler(**a_device)
print()
print("#" * 80)
print(remote_conn.send_command("show version"))
print("#" * 80)
print() | Execute show version command using Netmiko. |
def sign(s, passphrase, sig_format=SER_COMPACT, curve='secp160r1'):
""" Signs `s' with passphrase `passphrase' """
if isinstance(s, six.text_type):
raise ValueError("Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
curve = Curve.by_name(curve)
privkey = curve.passphrase_to_privkey(passphrase)
return privkey.sign(hashlib.sha512(s).digest(), sig_format) | Signs `s' with passphrase `passphrase' |
def add_template_events(self, columns, vectors):
""" Add a vector indexed """
# initialize with zeros - since vectors can be None, look for the
# longest one that isn't
new_events = None
for v in vectors:
if v is not None:
new_events = numpy.zeros(len(v), dtype=self.event_dtype)
break
# they shouldn't all be None
assert new_events is not None
new_events['template_id'] = self.template_index
for c, v in zip(columns, vectors):
if v is not None:
if isinstance(v, Array):
new_events[c] = v.numpy()
else:
new_events[c] = v
self.template_events = numpy.append(self.template_events, new_events) | Add a vector indexed |
def flat_map(self, flatmap_fn):
"""Applies a flatmap operator to the stream.
Attributes:
flatmap_fn (function): The user-defined logic of the flatmap
(e.g. split()).
"""
op = Operator(
_generate_uuid(),
OpType.FlatMap,
"FlatMap",
flatmap_fn,
num_instances=self.env.config.parallelism)
return self.__register(op) | Applies a flatmap operator to the stream.
Attributes:
flatmap_fn (function): The user-defined logic of the flatmap
(e.g. split()). |
def _reset_values(self, instance):
"""Reset all associated values and clean up dictionary items"""
self.value = None
self.reference.value = None
instance.__dict__.pop(self.field_name, None)
instance.__dict__.pop(self.reference.field_name, None)
self.reference.delete_cached_value(instance) | Reset all associated values and clean up dictionary items |
def to_networkx(self):
r"""Export the graph to NetworkX.
Edge weights are stored as an edge attribute,
under the name "weight".
Signals are stored as node attributes,
under their name in the :attr:`signals` dictionary.
`N`-dimensional signals are broken into `N` 1-dimensional signals.
They will eventually be joined back together on import.
Returns
-------
graph : :class:`networkx.Graph`
A NetworkX graph object.
See Also
--------
to_graphtool : export to graph-tool
save : save to a file
Examples
--------
>>> import networkx as nx
>>> from matplotlib import pyplot as plt
>>> graph = graphs.Path(4, directed=True)
>>> graph.set_signal(np.full(4, 2.3), 'signal')
>>> graph = graph.to_networkx()
>>> print(nx.info(graph))
Name: Path
Type: DiGraph
Number of nodes: 4
Number of edges: 3
Average in degree: 0.7500
Average out degree: 0.7500
>>> nx.is_directed(graph)
True
>>> graph.nodes()
NodeView((0, 1, 2, 3))
>>> graph.edges()
OutEdgeView([(0, 1), (1, 2), (2, 3)])
>>> graph.nodes()[2]
{'signal': 2.3}
>>> graph.edges()[(0, 1)]
{'weight': 1.0}
>>> # nx.draw(graph, with_labels=True)
Another common goal is to use NetworkX to compute some properties to be
be imported back in the PyGSP as signals.
>>> import networkx as nx
>>> from matplotlib import pyplot as plt
>>> graph = graphs.Sensor(100, seed=42)
>>> graph.set_signal(graph.coords, 'coords')
>>> graph = graph.to_networkx()
>>> betweenness = nx.betweenness_centrality(graph, weight='weight')
>>> nx.set_node_attributes(graph, betweenness, 'betweenness')
>>> graph = graphs.Graph.from_networkx(graph)
>>> graph.compute_fourier_basis()
>>> graph.set_coordinates(graph.signals['coords'])
>>> fig, axes = plt.subplots(1, 2)
>>> _ = graph.plot(graph.signals['betweenness'], ax=axes[0])
>>> _ = axes[1].plot(graph.e, graph.gft(graph.signals['betweenness']))
"""
nx = _import_networkx()
def convert(number):
# NetworkX accepts arbitrary python objects as attributes, but:
# * the GEXF writer does not accept any NumPy types (on signals),
# * the GraphML writer does not accept NumPy ints.
if issubclass(number.dtype.type, (np.integer, np.bool_)):
return int(number)
else:
return float(number)
def edges():
for source, target, weight in zip(*self.get_edge_list()):
yield int(source), int(target), {'weight': convert(weight)}
def nodes():
for vertex in range(self.n_vertices):
signals = {name: convert(signal[vertex])
for name, signal in self.signals.items()}
yield vertex, signals
self._break_signals()
graph = nx.DiGraph() if self.is_directed() else nx.Graph()
graph.add_nodes_from(nodes())
graph.add_edges_from(edges())
graph.name = self.__class__.__name__
return graph | r"""Export the graph to NetworkX.
Edge weights are stored as an edge attribute,
under the name "weight".
Signals are stored as node attributes,
under their name in the :attr:`signals` dictionary.
`N`-dimensional signals are broken into `N` 1-dimensional signals.
They will eventually be joined back together on import.
Returns
-------
graph : :class:`networkx.Graph`
A NetworkX graph object.
See Also
--------
to_graphtool : export to graph-tool
save : save to a file
Examples
--------
>>> import networkx as nx
>>> from matplotlib import pyplot as plt
>>> graph = graphs.Path(4, directed=True)
>>> graph.set_signal(np.full(4, 2.3), 'signal')
>>> graph = graph.to_networkx()
>>> print(nx.info(graph))
Name: Path
Type: DiGraph
Number of nodes: 4
Number of edges: 3
Average in degree: 0.7500
Average out degree: 0.7500
>>> nx.is_directed(graph)
True
>>> graph.nodes()
NodeView((0, 1, 2, 3))
>>> graph.edges()
OutEdgeView([(0, 1), (1, 2), (2, 3)])
>>> graph.nodes()[2]
{'signal': 2.3}
>>> graph.edges()[(0, 1)]
{'weight': 1.0}
>>> # nx.draw(graph, with_labels=True)
Another common goal is to use NetworkX to compute some properties to be
be imported back in the PyGSP as signals.
>>> import networkx as nx
>>> from matplotlib import pyplot as plt
>>> graph = graphs.Sensor(100, seed=42)
>>> graph.set_signal(graph.coords, 'coords')
>>> graph = graph.to_networkx()
>>> betweenness = nx.betweenness_centrality(graph, weight='weight')
>>> nx.set_node_attributes(graph, betweenness, 'betweenness')
>>> graph = graphs.Graph.from_networkx(graph)
>>> graph.compute_fourier_basis()
>>> graph.set_coordinates(graph.signals['coords'])
>>> fig, axes = plt.subplots(1, 2)
>>> _ = graph.plot(graph.signals['betweenness'], ax=axes[0])
>>> _ = axes[1].plot(graph.e, graph.gft(graph.signals['betweenness'])) |
def write(self, *messages):
"""
Push a message list to this context's input queue.
:param mixed value: message
"""
for message in messages:
if not isinstance(message, Token):
message = ensure_tuple(message, cls=self._input_type, length=self._input_length)
if self._input_length is None:
self._input_length = len(message)
self.input.put(message) | Push a message list to this context's input queue.
:param mixed value: message |
def get_chunk_meta(self, meta_file):
"""Get chunk meta table"""
chunks = self.envs["CHUNKS"]
if cij.nvme.get_meta(0, chunks * self.envs["CHUNK_META_SIZEOF"], meta_file):
raise RuntimeError("cij.liblight.get_chunk_meta: fail")
chunk_meta = cij.bin.Buffer(types=self.envs["CHUNK_META_STRUCT"], length=chunks)
chunk_meta.read(meta_file)
return chunk_meta | Get chunk meta table |
def deposit_links_factory(pid):
"""Factory for record links generation.
The dictionary is formed as:
.. code-block:: python
{
'files': '/url/to/files',
'publish': '/url/to/publish',
'edit': '/url/to/edit',
'discard': '/url/to/discard',
...
}
:param pid: The record PID object.
:returns: A dictionary that contains all the links.
"""
links = default_links_factory(pid)
def _url(name, **kwargs):
"""URL builder."""
endpoint = '.{0}_{1}'.format(
current_records_rest.default_endpoint_prefixes[pid.pid_type],
name,
)
return url_for(endpoint, pid_value=pid.pid_value, _external=True,
**kwargs)
links['files'] = _url('files')
ui_endpoint = current_app.config.get('DEPOSIT_UI_ENDPOINT')
if ui_endpoint is not None:
links['html'] = ui_endpoint.format(
host=request.host,
scheme=request.scheme,
pid_value=pid.pid_value,
)
deposit_cls = Deposit
if 'pid_value' in request.view_args:
deposit_cls = request.view_args['pid_value'].data[1].__class__
for action in extract_actions_from_class(deposit_cls):
links[action] = _url('actions', action=action)
return links | Factory for record links generation.
The dictionary is formed as:
.. code-block:: python
{
'files': '/url/to/files',
'publish': '/url/to/publish',
'edit': '/url/to/edit',
'discard': '/url/to/discard',
...
}
:param pid: The record PID object.
:returns: A dictionary that contains all the links. |
def instance(self, skip_exist_test=False):
"""
Returns the instance of the related object linked by the field.
"""
model = self.database._models[self.related_to]
meth = model.lazy_connect if skip_exist_test else model
return meth(self.proxy_get()) | Returns the instance of the related object linked by the field. |
def genesis_signing_lockset(genesis, privkey):
"""
in order to avoid a complicated bootstrapping, we define
the genesis_signing_lockset as a lockset with one vote by any validator.
"""
v = VoteBlock(0, 0, genesis.hash)
v.sign(privkey)
ls = LockSet(num_eligible_votes=1)
ls.add(v)
assert ls.has_quorum
return ls | in order to avoid a complicated bootstrapping, we define
the genesis_signing_lockset as a lockset with one vote by any validator. |
def reporter(self):
"""
Creates a report of the results
"""
# Create a set of all the gene names without alleles or accessions e.g. sul1_18_AY260546 becomes sul1
genedict = dict()
# Load the notes file to a dictionary
notefile = os.path.join(self.targetpath, 'notes.txt')
with open(notefile, 'r') as notes:
for line in notes:
# Ignore comment lines - they will break the parsing
if line.startswith('#'):
continue
# Split the line on colons e.g. stx1Aa: Shiga toxin 1, subunit A, variant a: has three variables after
# the split: gene(stx1Aa), description(Shiga toxin 1, subunit A, variant a), and _(\n)
try:
gene, description, _ = line.split(':')
# There are exceptions to the parsing. Some lines only have one :, while others have three. Allow for
# these possibilities.
except ValueError:
try:
gene, description = line.split(':')
except ValueError:
gene, description, _, _ = line.split(':')
# Set up the description dictionary
genedict[gene] = description.replace(', ', '_').strip()
# Find unique gene names with the highest percent identity
for sample in self.runmetadata.samples:
try:
if sample[self.analysistype].results:
# Initialise a dictionary to store the unique genes, and their percent identities
sample[self.analysistype].uniquegenes = dict()
for name, identity in sample[self.analysistype].results.items():
# Split the name of the gene from the string e.g. stx1:11:Z36899:11 yields stx1
if ':' in name:
sample[self.analysistype].delimiter = ':'
else:
sample[self.analysistype].delimiter = '_'
genename = name.split(sample[self.analysistype].delimiter)[0]
# Set the best observed percent identity for each unique gene
try:
# Pull the previous best identity from the dictionary
bestidentity = sample[self.analysistype].uniquegenes[genename]
# If the current identity is better than the old identity, save it
if float(identity) > float(bestidentity):
sample[self.analysistype].uniquegenes[genename] = float(identity)
# Initialise the dictionary if necessary
except KeyError:
sample[self.analysistype].uniquegenes[genename] = float(identity)
except AttributeError:
raise
# Create the path in which the reports are stored
make_path(self.reportpath)
# Initialise strings to store the results
data = 'Strain,Gene,Subtype/Allele,Description,Accession,PercentIdentity,FoldCoverage\n'
with open(os.path.join(self.reportpath, self.analysistype + '.csv'), 'w') as report:
for sample in self.runmetadata.samples:
try:
if sample[self.analysistype].results:
# If there are many results for a sample, don't write the sample name in each line of the report
for name, identity in sorted(sample[self.analysistype].results.items()):
# Check to see which delimiter is used to separate the gene name, allele, accession, and
# subtype information in the header
if len(name.split(sample[self.analysistype].delimiter)) == 4:
# Split the name on the delimiter: stx2A:63:AF500190:d; gene: stx2A, allele: 63,
# accession: AF500190, subtype: d
genename, allele, accession, subtype = name.split(sample[self.analysistype].delimiter)
elif len(name.split(sample[self.analysistype].delimiter)) == 3:
# Treat samples without a subtype e.g. icaC:intercellular adhesion protein C: differently.
# Extract the allele as the 'subtype', and the gene name, and accession as above
genename, subtype, accession = name.split(sample[self.analysistype].delimiter)
else:
genename = name
subtype = ''
accession = ''
# Retrieve the best identity for each gene
percentid = sample[self.analysistype].uniquegenes[genename]
# If the percent identity of the current gene matches the best percent identity, add it to
# the report - there can be multiple occurrences of genes e.g.
# sul1,1,AY224185,100.00,840 and sul1,2,CP002151,100.00,927 are both included because they
# have the same 100% percent identity
if float(identity) == percentid:
# Treat the initial vs subsequent results for each sample slightly differently - instead
# of including the sample name, use an empty cell instead
try:
description = genedict[genename]
except KeyError:
description = 'na'
# Populate the results
data += '{samplename},{gene},{subtype},{description},{accession},{identity},{depth}\n'\
.format(samplename=sample.name,
gene=genename,
subtype=subtype,
description=description,
accession=accession,
identity=identity,
depth=sample[self.analysistype].avgdepth[name])
else:
data += sample.name + '\n'
except (KeyError, AttributeError):
data += sample.name + '\n'
# Write the strings to the file
report.write(data) | Creates a report of the results |
def get_page(self, form):
'''Get the requested page'''
page_size = form.cleaned_data['iDisplayLength']
start_index = form.cleaned_data['iDisplayStart']
paginator = Paginator(self.object_list, page_size)
num_page = (start_index / page_size) + 1
return paginator.page(num_page) | Get the requested page |
def summary(args):
"""
%prog summary gffile
Print summary stats for features of different types.
"""
from jcvi.formats.base import SetFile
from jcvi.formats.bed import BedSummary
from jcvi.utils.table import tabulate
p = OptionParser(summary.__doc__)
p.add_option("--isoform", default=False, action="store_true",
help="Find longest isoform of each id")
p.add_option("--ids", help="Only include features from certain IDs")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gff_file, = args
ids = opts.ids
if ids:
ids = SetFile(ids)
logging.debug("Total ids loaded: {0}".format(len(ids)))
if opts.isoform:
pids = set()
gff = Gff(gff_file)
for g in gff:
if g.type != "mRNA":
continue
if g.parent not in ids:
continue
if "longest" not in g.attributes:
pids = set(x + ".1" for x in ids)
break
if g.attributes["longest"][0] == "0":
continue
pids.add(g.id)
ids = pids
logging.debug("After checking longest: {0}".format(len(ids)))
# Collects aliases
gff = Gff(gff_file)
for g in gff:
if g.name in ids:
ids.add(g.id)
logging.debug("Total ids including aliases: {0}".format(len(ids)))
gff = Gff(gff_file)
beds = defaultdict(list)
for g in gff:
if ids and not (g.id in ids or g.name in ids or g.parent in ids):
continue
beds[g.type].append(g.bedline)
table = {}
for type, bb in sorted(beds.items()):
bs = BedSummary(bb)
table[(type, "Features")] = bs.nfeats
table[(type, "Unique bases")] = bs.unique_bases
table[(type, "Total bases")] = bs.total_bases
print(tabulate(table), file=sys.stdout) | %prog summary gffile
Print summary stats for features of different types. |
def build_views(self):
"""
Bake out specified buildable views.
"""
# Then loop through and run them all
for view_str in self.view_list:
logger.debug("Building %s" % view_str)
if self.verbosity > 1:
self.stdout.write("Building %s" % view_str)
view = get_callable(view_str)
self.get_view_instance(view).build_method() | Bake out specified buildable views. |
def uri_from_fields(prefix, *fields):
"""Construct a URI out of the fields, concatenating them after removing offensive characters.
When all the fields are empty, return empty"""
string = '_'.join(AlignmentHelper.alpha_numeric(f.strip().lower(), '') for f in fields)
if len(string) == len(fields) - 1:
return ''
return prefix + string | Construct a URI out of the fields, concatenating them after removing offensive characters.
When all the fields are empty, return empty |
def resize(self, nrows, front=False):
"""
Resize the table to the given size, removing or adding rows as
necessary. Note if expanding the table at the end, it is more
efficient to use the append function than resizing and then
writing.
New added rows are zerod, except for 'i1', 'u2' and 'u4' data types
which get -128,32768,2147483648 respectively
parameters
----------
nrows: int
new size of table
front: bool, optional
If True, add or remove rows from the front. Default
is False
"""
nrows_current = self.get_nrows()
if nrows == nrows_current:
return
if nrows < nrows_current:
rowdiff = nrows_current - nrows
if front:
# delete from the front
start = 0
stop = rowdiff
else:
# delete from the back
start = nrows
stop = nrows_current
self.delete_rows(slice(start, stop))
else:
rowdiff = nrows - nrows_current
if front:
# in this case zero is what we want, since the code inserts
firstrow = 0
else:
firstrow = nrows_current
self._FITS.insert_rows(self._ext+1, firstrow, rowdiff)
self._update_info() | Resize the table to the given size, removing or adding rows as
necessary. Note if expanding the table at the end, it is more
efficient to use the append function than resizing and then
writing.
New added rows are zerod, except for 'i1', 'u2' and 'u4' data types
which get -128,32768,2147483648 respectively
parameters
----------
nrows: int
new size of table
front: bool, optional
If True, add or remove rows from the front. Default
is False |
def close_stream(self):
""" Closes the stream. Performs cleanup. """
self.keep_listening = False
self.stream.stop_stream()
self.stream.close()
self.pa.terminate() | Closes the stream. Performs cleanup. |
def unban_chat_member(self, *args, **kwargs):
"""See :func:`unban_chat_member`"""
return unban_chat_member(*args, **self._merge_overrides(**kwargs)).run() | See :func:`unban_chat_member` |
def _MergeIdentical(self, a, b):
"""Tries to merge two values. The values are required to be identical.
Args:
a: The first value.
b: The second value.
Returns:
The trivially merged value.
Raises:
MergeError: The values were not identical.
"""
if a != b:
raise MergeError("values must be identical ('%s' vs '%s')" %
(transitfeed.EncodeUnicode(a),
transitfeed.EncodeUnicode(b)))
return b | Tries to merge two values. The values are required to be identical.
Args:
a: The first value.
b: The second value.
Returns:
The trivially merged value.
Raises:
MergeError: The values were not identical. |
def encode_abi(self, types: Iterable[TypeStr], args: Iterable[Any]) -> bytes:
"""
Encodes the python values in ``args`` as a sequence of binary values of
the ABI types in ``types`` via the head-tail mechanism.
:param types: An iterable of string representations of the ABI types
that will be used for encoding e.g. ``('uint256', 'bytes[]',
'(int,int)')``
:param args: An iterable of python values to be encoded.
:returns: The head-tail encoded binary representation of the python
values in ``args`` as values of the ABI types in ``types``.
"""
encoders = [
self._registry.get_encoder(type_str)
for type_str in types
]
encoder = TupleEncoder(encoders=encoders)
return encoder(args) | Encodes the python values in ``args`` as a sequence of binary values of
the ABI types in ``types`` via the head-tail mechanism.
:param types: An iterable of string representations of the ABI types
that will be used for encoding e.g. ``('uint256', 'bytes[]',
'(int,int)')``
:param args: An iterable of python values to be encoded.
:returns: The head-tail encoded binary representation of the python
values in ``args`` as values of the ABI types in ``types``. |
def convert(fname,saveAs=True,showToo=False):
"""
Convert weird TIF files into web-friendly versions.
Auto contrast is applied (saturating lower and upper 0.1%).
make saveAs True to save as .TIF.png
make saveAs False and it won't save at all
make saveAs "someFile.jpg" to save it as a different path/format
"""
# load the image
#im = Image.open(fname) #PIL can't handle 12-bit TIFs well
im=ndimage.imread(fname) #scipy does better with it
im=np.array(im,dtype=float) # now it's a numpy array
# do all image enhancement here
cutoffLow=np.percentile(im,.01)
cutoffHigh=np.percentile(im,99.99)
im[np.where(im<cutoffLow)]=cutoffLow
im[np.where(im>cutoffHigh)]=cutoffHigh
# IMAGE FORMATTING
im-=np.min(im) #auto contrast
im/=np.max(im) #normalize
im*=255 #stretch contrast (8-bit)
im = Image.fromarray(im)
# IMAGE DRAWING
msg="%s\n"%os.path.basename(fname)
msg+="%s\n"%cm.epochToString(os.path.getmtime(fname))
d = ImageDraw.Draw(im)
fnt = ImageFont.truetype("arial.ttf", 20)
d.text((6,6),msg,font=fnt,fill=0)
d.text((4,4),msg,font=fnt,fill=255)
if showToo:
im.show()
if saveAs is False:
return
if saveAs is True:
saveAs=fname+".png"
im.convert('RGB').save(saveAs)
return saveAs | Convert weird TIF files into web-friendly versions.
Auto contrast is applied (saturating lower and upper 0.1%).
make saveAs True to save as .TIF.png
make saveAs False and it won't save at all
make saveAs "someFile.jpg" to save it as a different path/format |
def create_connection(cls, address, timeout=None, source_address=None):
"""Create a SlipSocket connection.
This convenience method creates a connection to the the specified address
using the :func:`socket.create_connection` function.
The socket that is returned from that call is automatically wrapped in
a :class:`SlipSocket` object.
.. note::
The :meth:`create_connection` method does not magically turn the
socket at the remote address into a SlipSocket.
For the connection to work properly,
the remote socket must already
have been configured to use the SLIP protocol.
"""
sock = socket.create_connection(address, timeout, source_address)
return cls(sock) | Create a SlipSocket connection.
This convenience method creates a connection to the the specified address
using the :func:`socket.create_connection` function.
The socket that is returned from that call is automatically wrapped in
a :class:`SlipSocket` object.
.. note::
The :meth:`create_connection` method does not magically turn the
socket at the remote address into a SlipSocket.
For the connection to work properly,
the remote socket must already
have been configured to use the SLIP protocol. |
def cmd(name, options=''):
"""
Decorator for all commands.
Commands will receive (pymux, variables) as input.
Commands can raise CommandException.
"""
# Validate options.
if options:
try:
docopt.docopt('Usage:\n %s %s' % (name, options, ), [])
except SystemExit:
pass
def decorator(func):
def command_wrapper(pymux, arguments):
# Hack to make the 'bind-key' option work.
# (bind-key expects a variable number of arguments.)
if name == 'bind-key' and '--' not in arguments:
# Insert a double dash after the first non-option.
for i, p in enumerate(arguments):
if not p.startswith('-'):
arguments.insert(i + 1, '--')
break
# Parse options.
try:
# Python 2 workaround: pass bytes to docopt.
# From the following, only the bytes version returns the right
# output in Python 2:
# docopt.docopt('Usage:\n app <params>...', [b'a', b'b'])
# docopt.docopt('Usage:\n app <params>...', [u'a', u'b'])
# https://github.com/docopt/docopt/issues/30
# (Not sure how reliable this is...)
if six.PY2:
arguments = [a.encode('utf-8') for a in arguments]
received_options = docopt.docopt(
'Usage:\n %s %s' % (name, options),
arguments,
help=False) # Don't interpret the '-h' option as help.
# Make sure that all the received options from docopt are
# unicode objects. (Docopt returns 'str' for Python2.)
for k, v in received_options.items():
if isinstance(v, six.binary_type):
received_options[k] = v.decode('utf-8')
except SystemExit:
raise CommandException('Usage: %s %s' % (name, options))
# Call handler.
func(pymux, received_options)
# Invalidate all clients, not just the current CLI.
pymux.invalidate()
COMMANDS_TO_HANDLERS[name] = command_wrapper
COMMANDS_TO_HELP[name] = options
# Get list of option flags.
flags = re.findall(r'-[a-zA-Z0-9]\b', options)
COMMANDS_TO_OPTION_FLAGS[name] = flags
return func
return decorator | Decorator for all commands.
Commands will receive (pymux, variables) as input.
Commands can raise CommandException. |
def manangeRecurringPaymentsProfileStatus(self, params, fail_silently=False):
"""
Requires `profileid` and `action` params.
Action must be either "Cancel", "Suspend", or "Reactivate".
"""
defaults = {"method": "ManageRecurringPaymentsProfileStatus"}
required = ["profileid", "action"]
nvp_obj = self._fetch(params, required, defaults)
# TODO: This fail silently check should be using the error code, but its not easy to access
flag_info_test_string = 'Invalid profile status for cancel action; profile should be active or suspended'
if nvp_obj.flag and not (fail_silently and nvp_obj.flag_info == flag_info_test_string):
raise PayPalFailure(nvp_obj.flag_info, nvp=nvp_obj)
return nvp_obj | Requires `profileid` and `action` params.
Action must be either "Cancel", "Suspend", or "Reactivate". |
def translate(self):
"""
Will look at the `Content-type` sent by the client, and maybe
deserialize the contents into the format they sent. This will
work for JSON, YAML, XML and Pickle. Since the data is not just
key-value (and maybe just a list), the data will be placed on
`request.data` instead, and the handler will have to read from
there.
It will also set `request.content_type` so the handler has an easy
way to tell what's going on. `request.content_type` will always be
None for form-encoded and/or multipart form data (what your browser sends.)
"""
ctype = self.content_type()
self.request.content_type = ctype
if not self.is_multipart() and ctype:
loadee = self.loader_for_type(ctype)
if loadee:
try:
self.request.data = loadee(self.request.raw_post_data)
# Reset both POST and PUT from request, as its
# misleading having their presence around.
self.request.POST = self.request.PUT = dict()
except (TypeError, ValueError):
# This also catches if loadee is None.
raise MimerDataException
else:
self.request.data = None
return self.request | Will look at the `Content-type` sent by the client, and maybe
deserialize the contents into the format they sent. This will
work for JSON, YAML, XML and Pickle. Since the data is not just
key-value (and maybe just a list), the data will be placed on
`request.data` instead, and the handler will have to read from
there.
It will also set `request.content_type` so the handler has an easy
way to tell what's going on. `request.content_type` will always be
None for form-encoded and/or multipart form data (what your browser sends.) |
def show(self, id, detailed=None):
"""
This API endpoint returns a single Key transaction, identified its ID.
:type id: int
:param id: Key transaction ID
:type detailed: bool
:param detailed:
:rtype: dict
:return: The JSON response of the API
::
{
"plugin": {
"id": "integer",
"name": "string",
"guid": "string",
"publisher": "string",
"details": {
"description": "integer",
"is_public": "string",
"created_at": "time",
"updated_at": "time",
"last_published_at": "time",
"has_unpublished_changes": "boolean",
"branding_image_url": "string",
"upgraded_at": "time",
"short_name": "string",
"publisher_about_url": "string",
"publisher_support_url": "string",
"download_url": "string",
"first_edited_at": "time",
"last_edited_at": "time",
"first_published_at": "time",
"published_version": "string"
},
"summary_metrics": [
{
"id": "integer",
"name": "string",
"metric": "string",
"value_function": "string",
"thresholds": {
"caution": "float",
"critical": "float"
},
"values": {
"raw": "float",
"formatted": "string"
}
}
]
}
}
"""
filters = [
'detailed={0}'.format(detailed) if detailed is not None else None,
]
return self._get(
url='{root}plugins/{id}.json'.format(
root=self.URL,
id=id
),
headers=self.headers,
params=self.build_param_string(filters) or None
) | This API endpoint returns a single Key transaction, identified its ID.
:type id: int
:param id: Key transaction ID
:type detailed: bool
:param detailed:
:rtype: dict
:return: The JSON response of the API
::
{
"plugin": {
"id": "integer",
"name": "string",
"guid": "string",
"publisher": "string",
"details": {
"description": "integer",
"is_public": "string",
"created_at": "time",
"updated_at": "time",
"last_published_at": "time",
"has_unpublished_changes": "boolean",
"branding_image_url": "string",
"upgraded_at": "time",
"short_name": "string",
"publisher_about_url": "string",
"publisher_support_url": "string",
"download_url": "string",
"first_edited_at": "time",
"last_edited_at": "time",
"first_published_at": "time",
"published_version": "string"
},
"summary_metrics": [
{
"id": "integer",
"name": "string",
"metric": "string",
"value_function": "string",
"thresholds": {
"caution": "float",
"critical": "float"
},
"values": {
"raw": "float",
"formatted": "string"
}
}
]
}
} |
def _start_new_episode(self):
"""
Bookkeeping to do at the start of each new episode.
"""
# flush any data left over from the previous episode if any interactions have happened
if self.has_interaction:
self._flush()
# timesteps in current episode
self.t = 0
self.has_interaction = False | Bookkeeping to do at the start of each new episode. |
async def main():
"""
Main code
"""
# Create Client from endpoint string in Duniter format
client = Client(BMAS_ENDPOINT)
# Get the node summary infos to test the connection
response = await client(bma.node.summary)
print(response)
# prompt hidden user entry
salt = getpass.getpass("Enter your passphrase (salt): ")
# prompt hidden user entry
password = getpass.getpass("Enter your password: ")
# create key from credentials
key = SigningKey.from_credentials(salt, password)
pubkey_from = key.pubkey
# prompt entry
pubkey_to = input("Enter certified pubkey: ")
# capture current block to get version and currency and blockstamp
current_block = await client(bma.blockchain.current)
# create our Identity document to sign the Certification document
identity = await get_identity_document(client, current_block, pubkey_to)
# send the Certification document to the node
certification = get_certification_document(current_block, identity, pubkey_from)
# sign document
certification.sign([key])
# Here we request for the path wot/certify
response = await client(bma.wot.certify, certification.signed_raw())
if response.status == 200:
print(await response.text())
else:
print("Error while publishing certification: {0}".format(await response.text()))
# Close client aiohttp session
await client.close() | Main code |
def _check_image(self, X):
"""
Checks the image size and its compatibility with classifier's receptive field.
At this moment it is required that image size = K * receptive_field. This will
be relaxed in future with the introduction of padding.
"""
if (len(X.shape) < 3) or (len(X.shape) > 4):
raise ValueError('Input has to have shape [n_samples, n_pixels_y, n_pixels_x] '
'or [n_samples, n_pixels_y, n_pixels_x, n_bands].')
self._samples = X.shape[0]
self._image_size = X.shape[1:3]
if (self._image_size[0] % self.receptive_field[0]) or (self._image_size[0] % self.receptive_field[0]):
raise ValueError('Image (%d,%d) and receptive fields (%d,%d) mismatch.\n'
'Resize your image to be divisible with receptive field.'
% (self._image_size[0], self._image_size[0], self.receptive_field[0],
self.receptive_field[1])) | Checks the image size and its compatibility with classifier's receptive field.
At this moment it is required that image size = K * receptive_field. This will
be relaxed in future with the introduction of padding. |
def seeds(args):
"""
%prog seeds [pngfile|jpgfile]
Extract seed metrics from [pngfile|jpgfile]. Use --rows and --cols to crop image.
"""
p = OptionParser(seeds.__doc__)
p.set_outfile()
opts, args, iopts = add_seeds_options(p, args)
if len(args) != 1:
sys.exit(not p.print_help())
pngfile, = args
pf = opts.prefix or op.basename(pngfile).rsplit(".", 1)[0]
sigma, kernel = opts.sigma, opts.kernel
rows, cols = opts.rows, opts.cols
labelrows, labelcols = opts.labelrows, opts.labelcols
ff = opts.filter
calib = opts.calibrate
outdir = opts.outdir
if outdir != '.':
mkdir(outdir)
if calib:
calib = json.load(must_open(calib))
pixel_cm_ratio, tr = calib["PixelCMratio"], calib["RGBtransform"]
tr = np.array(tr)
resizefile, mainfile, labelfile, exif = \
convert_image(pngfile, pf, outdir=outdir,
rotate=opts.rotate,
rows=rows, cols=cols,
labelrows=labelrows, labelcols=labelcols)
oimg = load_image(resizefile)
img = load_image(mainfile)
fig, (ax1, ax2, ax3, ax4) = plt.subplots(ncols=4, nrows=1,
figsize=(iopts.w, iopts.h))
# Edge detection
img_gray = rgb2gray(img)
logging.debug("Running {0} edge detection ...".format(ff))
if ff == "canny":
edges = canny(img_gray, sigma=opts.sigma)
elif ff == "roberts":
edges = roberts(img_gray)
elif ff == "sobel":
edges = sobel(img_gray)
edges = clear_border(edges, buffer_size=opts.border)
selem = disk(kernel)
closed = closing(edges, selem) if kernel else edges
filled = binary_fill_holes(closed)
# Watershed algorithm
if opts.watershed:
distance = distance_transform_edt(filled)
local_maxi = peak_local_max(distance, threshold_rel=.05, indices=False)
coordinates = peak_local_max(distance, threshold_rel=.05)
markers, nmarkers = label(local_maxi, return_num=True)
logging.debug("Identified {0} watershed markers".format(nmarkers))
labels = watershed(closed, markers, mask=filled)
else:
labels = label(filled)
# Object size filtering
w, h = img_gray.shape
canvas_size = w * h
min_size = int(round(canvas_size * opts.minsize / 100))
max_size = int(round(canvas_size * opts.maxsize / 100))
logging.debug("Find objects with pixels between {0} ({1}%) and {2} ({3}%)"\
.format(min_size, opts.minsize, max_size, opts.maxsize))
# Plotting
ax1.set_title('Original picture')
ax1.imshow(oimg)
params = "{0}, $\sigma$={1}, $k$={2}".format(ff, sigma, kernel)
if opts.watershed:
params += ", watershed"
ax2.set_title('Edge detection\n({0})'.format(params))
closed = gray2rgb(closed)
ax2_img = labels
if opts.edges:
ax2_img = closed
elif opts.watershed:
ax2.plot(coordinates[:, 1], coordinates[:, 0], 'g.')
ax2.imshow(ax2_img, cmap=iopts.cmap)
ax3.set_title('Object detection')
ax3.imshow(img)
filename = op.basename(pngfile)
if labelfile:
accession = extract_label(labelfile)
else:
accession = pf
# Calculate region properties
rp = regionprops(labels)
rp = [x for x in rp if min_size <= x.area <= max_size]
nb_labels = len(rp)
logging.debug("A total of {0} objects identified.".format(nb_labels))
objects = []
for i, props in enumerate(rp):
i += 1
if i > opts.count:
break
y0, x0 = props.centroid
orientation = props.orientation
major, minor = props.major_axis_length, props.minor_axis_length
major_dx = cos(orientation) * major / 2
major_dy = sin(orientation) * major / 2
minor_dx = sin(orientation) * minor / 2
minor_dy = cos(orientation) * minor / 2
ax2.plot((x0 - major_dx, x0 + major_dx),
(y0 + major_dy, y0 - major_dy), 'r-')
ax2.plot((x0 - minor_dx, x0 + minor_dx),
(y0 - minor_dy, y0 + minor_dy), 'r-')
npixels = int(props.area)
# Sample the center of the blob for color
d = min(int(round(minor / 2 * .35)) + 1, 50)
x0d, y0d = int(round(x0)), int(round(y0))
square = img[(y0d - d):(y0d + d), (x0d - d):(x0d + d)]
pixels = []
for row in square:
pixels.extend(row)
logging.debug("Seed #{0}: {1} pixels ({2} sampled) - {3:.2f}%".\
format(i, npixels, len(pixels), 100. * npixels / canvas_size))
rgb = pixel_stats(pixels)
objects.append(Seed(filename, accession, i, rgb, props, exif))
minr, minc, maxr, maxc = props.bbox
rect = Rectangle((minc, minr), maxc - minc, maxr - minr,
fill=False, ec='w', lw=1)
ax3.add_patch(rect)
mc, mr = (minc + maxc) / 2, (minr + maxr) / 2
ax3.text(mc, mr, "{0}".format(i), color='w',
ha="center", va="center", size=6)
for ax in (ax2, ax3):
ax.set_xlim(0, h)
ax.set_ylim(w, 0)
# Output identified seed stats
ax4.text(.1, .92, "File: {0}".format(latex(filename)), color='g')
ax4.text(.1, .86, "Label: {0}".format(latex(accession)), color='m')
yy = .8
fw = must_open(opts.outfile, "w")
if not opts.noheader:
print(Seed.header(calibrate=calib), file=fw)
for o in objects:
if calib:
o.calibrate(pixel_cm_ratio, tr)
print(o, file=fw)
i = o.seedno
if i > 7:
continue
ax4.text(.01, yy, str(i), va="center", bbox=dict(fc='none', ec='k'))
ax4.text(.1, yy, o.pixeltag, va="center")
yy -= .04
ax4.add_patch(Rectangle((.1, yy - .025), .12, .05, lw=0,
fc=rgb_to_hex(o.rgb)))
ax4.text(.27, yy, o.hashtag, va="center")
yy -= .06
ax4.text(.1 , yy, "(A total of {0} objects displayed)".format(nb_labels),
color="darkslategrey")
normalize_axes(ax4)
for ax in (ax1, ax2, ax3):
xticklabels = [int(x) for x in ax.get_xticks()]
yticklabels = [int(x) for x in ax.get_yticks()]
ax.set_xticklabels(xticklabels, family='Helvetica', size=8)
ax.set_yticklabels(yticklabels, family='Helvetica', size=8)
image_name = op.join(outdir, pf + "." + iopts.format)
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
return objects | %prog seeds [pngfile|jpgfile]
Extract seed metrics from [pngfile|jpgfile]. Use --rows and --cols to crop image. |
def source(self, format='xml', accessible=False):
"""
Args:
format (str): only 'xml' and 'json' source types are supported
accessible (bool): when set to true, format is always 'json'
"""
if accessible:
return self.http.get('/wda/accessibleSource').value
return self.http.get('source?format='+format).value | Args:
format (str): only 'xml' and 'json' source types are supported
accessible (bool): when set to true, format is always 'json' |
def append(self, *args):
"""
add arguments to the set
"""
self.args.append(args)
if self.started:
self.started = False
return self.length() | add arguments to the set |
def pad_z(pts, value=0.0):
"""Adds a Z component from `pts` if it is missing.
The value defaults to `value` (0.0)"""
pts = np.asarray(pts)
if pts.shape[-1] < 3:
if len(pts.shape) < 2:
return np.asarray((pts[0], pts[1], value), dtype=pts.dtype)
pad_col = np.full(len(pts), value, dtype=pts.dtype)
pts = np.asarray((pts.T[0], pts.T[1], pad_col)).T
return pts | Adds a Z component from `pts` if it is missing.
The value defaults to `value` (0.0) |
def distance(self, other):
"""
Distance between the center of this region and another.
Parameters
----------
other : one region, or array-like
Either another region, or the center of another region.
"""
from numpy.linalg import norm
if isinstance(other, one):
other = other.center
return norm(self.center - asarray(other), ord=2) | Distance between the center of this region and another.
Parameters
----------
other : one region, or array-like
Either another region, or the center of another region. |
def footprints_from_place(place, footprint_type='building', retain_invalid=False):
"""
Get footprints within the boundaries of some place.
The query must be geocodable and OSM must have polygon boundaries for the
geocode result. If OSM does not have a polygon for this place, you can
instead get its footprints using the footprints_from_address function, which
geocodes the place name to a point and gets the footprints within some distance
of that point.
Parameters
----------
place : string
the query to geocode to get geojson boundary polygon
footprint_type : string
type of footprint to be downloaded. OSM tag key e.g. 'building', 'landuse', 'place', etc.
retain_invalid : bool
if False discard any footprints with an invalid geometry
Returns
-------
GeoDataFrame
"""
city = gdf_from_place(place)
polygon = city['geometry'].iloc[0]
return create_footprints_gdf(polygon, retain_invalid=retain_invalid,
footprint_type=footprint_type) | Get footprints within the boundaries of some place.
The query must be geocodable and OSM must have polygon boundaries for the
geocode result. If OSM does not have a polygon for this place, you can
instead get its footprints using the footprints_from_address function, which
geocodes the place name to a point and gets the footprints within some distance
of that point.
Parameters
----------
place : string
the query to geocode to get geojson boundary polygon
footprint_type : string
type of footprint to be downloaded. OSM tag key e.g. 'building', 'landuse', 'place', etc.
retain_invalid : bool
if False discard any footprints with an invalid geometry
Returns
-------
GeoDataFrame |
def _bp_static_url(blueprint):
""" builds the absolute url path for a blueprint's static folder """
u = six.u('%s%s' % (blueprint.url_prefix or '', blueprint.static_url_path or ''))
return u | builds the absolute url path for a blueprint's static folder |
def extraction_data_statistics(path):
""" Generates data statistics for the given data extraction setup stored
in Xcessiv notebook.
This is in rqtasks.py but not as a job yet. Temporarily call this directly
while I'm figuring out Javascript lel.
Args:
path (str, unicode): Path to xcessiv notebook
"""
with functions.DBContextManager(path) as session:
extraction = session.query(models.Extraction).first()
X, y = extraction.return_main_dataset()
functions.verify_dataset(X, y)
if extraction.test_dataset['method'] == 'split_from_main':
X, X_test, y, y_test = train_test_split(
X,
y,
test_size=extraction.test_dataset['split_ratio'],
random_state=extraction.test_dataset['split_seed'],
stratify=y
)
elif extraction.test_dataset['method'] == 'source':
if 'source' not in extraction.test_dataset or not extraction.test_dataset['source']:
raise exceptions.UserError('Source is empty')
extraction_code = extraction.test_dataset["source"]
extraction_function = functions.\
import_object_from_string_code(extraction_code, "extract_test_dataset")
X_test, y_test = extraction_function()
else:
X_test, y_test = None, None
# test base learner cross-validation
extraction_code = extraction.meta_feature_generation['source']
return_splits_iterable = functions.import_object_from_string_code(
extraction_code,
'return_splits_iterable'
)
number_of_splits = 0
test_indices = []
try:
for train_idx, test_idx in return_splits_iterable(X, y):
number_of_splits += 1
test_indices.append(test_idx)
except Exception as e:
raise exceptions.UserError('User code exception', exception_message=str(e))
# preparation before testing stacked ensemble cross-validation
test_indices = np.concatenate(test_indices)
X, y = X[test_indices], y[test_indices]
# test stacked ensemble cross-validation
extraction_code = extraction.stacked_ensemble_cv['source']
return_splits_iterable = functions.import_object_from_string_code(
extraction_code,
'return_splits_iterable'
)
number_of_splits_stacked_cv = 0
try:
for train_idx, test_idx in return_splits_iterable(X, y):
number_of_splits_stacked_cv += 1
except Exception as e:
raise exceptions.UserError('User code exception', exception_message=str(e))
data_stats = dict()
data_stats['train_data_stats'] = functions.verify_dataset(X, y)
if X_test is not None:
data_stats['test_data_stats'] = functions.verify_dataset(X_test, y_test)
else:
data_stats['test_data_stats'] = None
data_stats['holdout_data_stats'] = {'number_of_splits': number_of_splits}
data_stats['stacked_ensemble_cv_stats'] = {'number_of_splits': number_of_splits_stacked_cv}
extraction.data_statistics = data_stats
session.add(extraction)
session.commit() | Generates data statistics for the given data extraction setup stored
in Xcessiv notebook.
This is in rqtasks.py but not as a job yet. Temporarily call this directly
while I'm figuring out Javascript lel.
Args:
path (str, unicode): Path to xcessiv notebook |
def _sending_task(self, backend):
"""
Used internally to safely increment `backend`s task count. Returns the
overall count of tasks for `backend`.
"""
with self.backend_mutex:
self.backends[backend] += 1
self.task_counter[backend] += 1
this_task = self.task_counter[backend]
return this_task | Used internally to safely increment `backend`s task count. Returns the
overall count of tasks for `backend`. |
def get_objs_from_record(self, record, key):
"""Returns a mapping of UID -> object
"""
uids = self.get_uids_from_record(record, key)
objs = map(self.get_object_by_uid, uids)
return dict(zip(uids, objs)) | Returns a mapping of UID -> object |
def _checkpoint(self, stage):
"""
Decide whether to stop processing of a pipeline. This is the hook
A pipeline can report various "checkpoints" as sort of status markers
that designate the logical processing phase that's just been completed.
The initiation of a pipeline can preordain one of those as a "stopping
point" that when reached, should stop the pipeline's execution.
:param pypiper.Stage | str stage: Pipeline processing stage/phase just completed.
:return bool: Whether a checkpoint was created (i.e., whether it didn't
already exist)
:raise ValueError: If the stage is specified as an absolute filepath,
and that path indicates a location that's not immediately within
the main output folder, raise a ValueError.
"""
# For null stage, short-circuit and indicate no file write.
# This handles case in which we're timestamping prospectively and
# previously weren't in a stage.
if stage is None:
return False
try:
is_checkpoint = stage.checkpoint
except AttributeError:
# Maybe we have a raw function, not a stage.
if hasattr(stage, "__call__"):
stage = stage.__name__
else:
# Maybe we have a stage name not a Stage.
# In that case, we can proceed as-is, with downstream
# processing handling Stage vs. stage name disambiguation.
# Here, though, warn about inputs that appear filename/path-like.
# We can't rely on raw text being a filepath or filename,
# because that would ruin the ability to pass stage name rather
# than actual stage. We can issue a warning message based on the
# improbability of a stage name containing the '.' that would
# be expected to characterize the extension of a file name/path.
base, ext = os.path.splitext(stage)
if ext and "." not in base:
print("WARNING: '{}' looks like it may be the name or path of "
"a file; for such a checkpoint, use touch_checkpoint.".
format(stage))
else:
if not is_checkpoint:
print("Not a checkpoint: {}".format(stage))
return False
stage = stage.name
print("Checkpointing: '{}'".format(stage))
if os.path.isabs(stage):
check_fpath = stage
else:
check_fpath = checkpoint_filepath(stage, pm=self)
return self._touch_checkpoint(check_fpath) | Decide whether to stop processing of a pipeline. This is the hook
A pipeline can report various "checkpoints" as sort of status markers
that designate the logical processing phase that's just been completed.
The initiation of a pipeline can preordain one of those as a "stopping
point" that when reached, should stop the pipeline's execution.
:param pypiper.Stage | str stage: Pipeline processing stage/phase just completed.
:return bool: Whether a checkpoint was created (i.e., whether it didn't
already exist)
:raise ValueError: If the stage is specified as an absolute filepath,
and that path indicates a location that's not immediately within
the main output folder, raise a ValueError. |
def readline(self, size=None):
"""Reads a single line of text.
The functions reads one entire line from the file-like object. A trailing
end-of-line indicator (newline by default) is kept in the string (but may
be absent when a file ends with an incomplete line). An empty string is
returned only when end-of-file is encountered immediately.
Args:
size (Optional[int]): maximum byte size to read. If present and
non-negative, it is a maximum byte count (including the trailing
end-of-line) and an incomplete line may be returned.
Returns:
str: line of text.
Raises:
UnicodeDecodeError: if a line cannot be decoded.
ValueError: if the size is smaller than zero or exceeds the maximum
(as defined by _MAXIMUM_READ_BUFFER_SIZE).
"""
if size is not None and size < 0:
raise ValueError('Invalid size value smaller than zero.')
if size is not None and size > self._MAXIMUM_READ_BUFFER_SIZE:
raise ValueError('Invalid size value exceeds maximum.')
if not self._lines:
if self._lines_buffer_offset >= self._file_object_size:
return ''
read_size = size
if not read_size:
read_size = self._MAXIMUM_READ_BUFFER_SIZE
if self._lines_buffer_offset + read_size > self._file_object_size:
read_size = self._file_object_size - self._lines_buffer_offset
self._file_object.seek(self._lines_buffer_offset, os.SEEK_SET)
read_buffer = self._file_object.read(read_size)
self._lines_buffer_offset += len(read_buffer)
self._lines = read_buffer.split(self._end_of_line)
if self._lines_buffer:
self._lines[0] = b''.join([self._lines_buffer, self._lines[0]])
self._lines_buffer = b''
# Move a partial line from the lines list to the lines buffer.
if read_buffer[self._end_of_line_length:] != self._end_of_line:
self._lines_buffer = self._lines.pop()
for index, line in enumerate(self._lines):
self._lines[index] = b''.join([line, self._end_of_line])
if (self._lines_buffer and
self._lines_buffer_offset >= self._file_object_size):
self._lines.append(self._lines_buffer)
self._lines_buffer = b''
if not self._lines:
line = self._lines_buffer
self._lines_buffer = b''
elif not size or size >= len(self._lines[0]):
line = self._lines.pop(0)
else:
line = self._lines[0]
self._lines[0] = line[size:]
line = line[:size]
last_offset = self._current_offset
self._current_offset += len(line)
decoded_line = line.decode(self._encoding)
# Remove a byte-order mark at the start of the file.
if last_offset == 0 and decoded_line[0] == '\ufeff':
decoded_line = decoded_line[1:]
return decoded_line | Reads a single line of text.
The functions reads one entire line from the file-like object. A trailing
end-of-line indicator (newline by default) is kept in the string (but may
be absent when a file ends with an incomplete line). An empty string is
returned only when end-of-file is encountered immediately.
Args:
size (Optional[int]): maximum byte size to read. If present and
non-negative, it is a maximum byte count (including the trailing
end-of-line) and an incomplete line may be returned.
Returns:
str: line of text.
Raises:
UnicodeDecodeError: if a line cannot be decoded.
ValueError: if the size is smaller than zero or exceeds the maximum
(as defined by _MAXIMUM_READ_BUFFER_SIZE). |
def default_privileges_revoke(name,
object_name,
object_type,
defprivileges=None,
prepend='public',
maintenance_db=None,
user=None,
host=None,
port=None,
password=None,
runas=None):
'''
.. versionadded:: 2019.0.0
Revoke default privileges on a postgres object
CLI Example:
.. code-block:: bash
salt '*' postgres.default_privileges_revoke user_name table_name table \\
SELECT,UPDATE maintenance_db=db_name
name
Name of the role whose default privileges should be revoked
object_name
Name of the object on which the revoke is to be performed
object_type
The object type, which can be one of the following:
- table
- sequence
- schema
- group
- function
privileges
Comma separated list of privileges to revoke, from the list below:
- INSERT
- CREATE
- TRUNCATE
- TRIGGER
- SELECT
- USAGE
- UPDATE
- EXECUTE
- REFERENCES
- DELETE
- ALL
maintenance_db
The database to connect to
user
database username if different from config or default
password
user password if any password for a specified user
host
Database host if different from config or default
port
Database port if different from config or default
runas
System user all operations should be performed on behalf of
'''
object_type, defprivileges, _defprivs = _mod_defpriv_opts(object_type, defprivileges)
_validate_default_privileges(object_type, _defprivs, defprivileges)
if not has_default_privileges(name, object_name, object_type, defprivileges,
prepend=prepend, maintenance_db=maintenance_db, user=user,
host=host, port=port, password=password, runas=runas):
log.info('The object: %s of type: %s does not'
' have default privileges: %s set', object_name, object_type, defprivileges)
return False
_grants = ','.join(_defprivs)
if object_type in ['table', 'sequence']:
on_part = '{0}.{1}'.format(prepend, object_name)
else:
on_part = object_name
if object_type == 'group':
query = 'ALTER DEFAULT PRIVILEGES REVOKE {0} FROM {1}'.format(object_name, name)
else:
query = 'ALTER DEFAULT PRIVILEGES IN SCHEMA {2} REVOKE {0} ON {1}S FROM {3}'.format(
_grants, object_type.upper(), prepend, name)
ret = _psql_prepare_and_run(['-c', query],
user=user,
host=host,
port=port,
maintenance_db=maintenance_db,
password=password,
runas=runas)
return ret['retcode'] == 0 | .. versionadded:: 2019.0.0
Revoke default privileges on a postgres object
CLI Example:
.. code-block:: bash
salt '*' postgres.default_privileges_revoke user_name table_name table \\
SELECT,UPDATE maintenance_db=db_name
name
Name of the role whose default privileges should be revoked
object_name
Name of the object on which the revoke is to be performed
object_type
The object type, which can be one of the following:
- table
- sequence
- schema
- group
- function
privileges
Comma separated list of privileges to revoke, from the list below:
- INSERT
- CREATE
- TRUNCATE
- TRIGGER
- SELECT
- USAGE
- UPDATE
- EXECUTE
- REFERENCES
- DELETE
- ALL
maintenance_db
The database to connect to
user
database username if different from config or default
password
user password if any password for a specified user
host
Database host if different from config or default
port
Database port if different from config or default
runas
System user all operations should be performed on behalf of |
def path_to(self, p):
"""Returns the absolute path to a given relative path."""
if os.path.isabs(p):
return p
return os.sep.join([self._original_dir, p]) | Returns the absolute path to a given relative path. |
def record_drop_duplicate_fields(record):
"""
Return a record where all the duplicate fields have been removed.
Fields are considered identical considering also the order of their
subfields.
"""
out = {}
position = 0
tags = sorted(record.keys())
for tag in tags:
fields = record[tag]
out[tag] = []
current_fields = set()
for full_field in fields:
field = (tuple(full_field[0]),) + full_field[1:4]
if field not in current_fields:
current_fields.add(field)
position += 1
out[tag].append(full_field[:4] + (position,))
return out | Return a record where all the duplicate fields have been removed.
Fields are considered identical considering also the order of their
subfields. |
def plot_colormap_components(cmap):
"""Plot the components of a given colormap."""
from ._helpers import set_ax_labels # recursive import protection
plt.figure(figsize=[8, 4])
gs = grd.GridSpec(3, 1, height_ratios=[1, 10, 1], hspace=0.05)
# colorbar
ax = plt.subplot(gs[0])
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
ax.imshow(gradient, aspect="auto", cmap=cmap, vmin=0., vmax=1.)
ax.set_title(cmap.name, fontsize=20)
ax.set_axis_off()
# components
ax = plt.subplot(gs[1])
x = np.arange(cmap.N)
colors = cmap(x)
r = colors[:, 0]
g = colors[:, 1]
b = colors[:, 2]
RGB_weight = [0.299, 0.587, 0.114]
k = np.sqrt(np.dot(colors[:, :3] ** 2, RGB_weight))
r.clip(0, 1, out=r)
g.clip(0, 1, out=g)
b.clip(0, 1, out=b)
xi = np.linspace(0, 1, x.size)
plt.plot(xi, r, "r", linewidth=5, alpha=0.6)
plt.plot(xi, g, "g", linewidth=5, alpha=0.6)
plt.plot(xi, b, "b", linewidth=5, alpha=0.6)
plt.plot(xi, k, "k", linewidth=5, alpha=0.6)
ax.set_xlim(0, 1)
ax.set_ylim(-0.1, 1.1)
set_ax_labels(ax=ax, xlabel=None, xticks=False, ylabel="intensity")
# grayified colorbar
cmap = grayify_cmap(cmap)
ax = plt.subplot(gs[2])
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
ax.imshow(gradient, aspect="auto", cmap=cmap, vmin=0., vmax=1.)
ax.set_axis_off() | Plot the components of a given colormap. |
def _search(mapping, filename):
"""Search a Loader data structure for a filename."""
result = mapping.get(filename)
if result is not None:
return result
name, ext = os.path.splitext(filename)
result = mapping.get(ext)
if result is not None:
for pattern, result2 in result:
if fnmatch(filename, pattern):
return result2
return None | Search a Loader data structure for a filename. |
def abbreviate_list(items, max_items=10, item_max_len=40, joiner=", ", indicator="..."):
"""
Abbreviate a list, truncating each element and adding an indicator at the end if the
whole list was truncated. Set item_max_len to None or 0 not to truncate items.
"""
if not items:
return items
else:
shortened = [abbreviate_str("%s" % item, max_len=item_max_len) for item in items[0:max_items]]
if len(items) > max_items:
shortened.append(indicator)
return joiner.join(shortened) | Abbreviate a list, truncating each element and adding an indicator at the end if the
whole list was truncated. Set item_max_len to None or 0 not to truncate items. |
def sudoers(self, enable):
"""
This method is used to enable/disable bash sudo commands running
through the guestshell virtual service. By default sudo access
is prevented due to the setting in the 'sudoers' file. Therefore
the setting must be disabled in the file to enable sudo commands.
This method assumes that the "bash-shell" feature is enabled.
@@@ TO-DO: have a mech to check &| control bash-shell feature support
:param enable:
True - enables sudo commands
False - disables sudo commands
:return:
returns the response of the sed command needed to make the
file change
"""
f_sudoers = "/isan/vdc_1/virtual-instance/guestshell+/rootfs/etc/sudoers"
if enable is True:
sed_cmd = r" 's/\(^Defaults *requiretty\)/#\1/g' "
elif enable is False:
sed_cmd = r" 's/^#\(Defaults *requiretty\)/\1/g' "
else:
raise RuntimeError('enable must be True or False')
self.guestshell("run bash sudo sed -i" + sed_cmd + f_sudoers) | This method is used to enable/disable bash sudo commands running
through the guestshell virtual service. By default sudo access
is prevented due to the setting in the 'sudoers' file. Therefore
the setting must be disabled in the file to enable sudo commands.
This method assumes that the "bash-shell" feature is enabled.
@@@ TO-DO: have a mech to check &| control bash-shell feature support
:param enable:
True - enables sudo commands
False - disables sudo commands
:return:
returns the response of the sed command needed to make the
file change |
async def list_pools() -> None:
"""
Lists names of created pool ledgers
:return: Error code
"""
logger = logging.getLogger(__name__)
logger.debug("list_pools: >>> ")
if not hasattr(list_pools, "cb"):
logger.debug("list_pools: Creating callback")
list_pools.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
res = await do_call('indy_list_pools',
list_pools.cb)
res = json.loads(res.decode())
logger.debug("list_pools: <<< res: %r", res)
return res | Lists names of created pool ledgers
:return: Error code |
def rewrite(self, source_bucket, source_object, destination_bucket,
destination_object=None):
"""
Has the same functionality as copy, except that will work on files
over 5 TB, as well as when copying between locations and/or storage
classes.
destination_object can be omitted, in which case source_object is used.
:param source_bucket: The bucket of the object to copy from.
:type source_bucket: str
:param source_object: The object to copy.
:type source_object: str
:param destination_bucket: The destination of the object to copied to.
:type destination_bucket: str
:param destination_object: The (renamed) path of the object if given.
Can be omitted; then the same name is used.
:type destination_object: str
"""
destination_object = destination_object or source_object
if (source_bucket == destination_bucket and
source_object == destination_object):
raise ValueError(
'Either source/destination bucket or source/destination object '
'must be different, not both the same: bucket=%s, object=%s' %
(source_bucket, source_object))
if not source_bucket or not source_object:
raise ValueError('source_bucket and source_object cannot be empty.')
client = self.get_conn()
source_bucket = client.get_bucket(bucket_name=source_bucket)
source_object = source_bucket.blob(blob_name=source_object)
destination_bucket = client.get_bucket(bucket_name=destination_bucket)
token, bytes_rewritten, total_bytes = destination_bucket.blob(
blob_name=destination_object).rewrite(
source=source_object
)
self.log.info('Total Bytes: %s | Bytes Written: %s',
total_bytes, bytes_rewritten)
while token is not None:
token, bytes_rewritten, total_bytes = destination_bucket.blob(
blob_name=destination_object).rewrite(
source=source_object, token=token
)
self.log.info('Total Bytes: %s | Bytes Written: %s',
total_bytes, bytes_rewritten)
self.log.info('Object %s in bucket %s copied to object %s in bucket %s',
source_object.name, source_bucket.name,
destination_object, destination_bucket.name) | Has the same functionality as copy, except that will work on files
over 5 TB, as well as when copying between locations and/or storage
classes.
destination_object can be omitted, in which case source_object is used.
:param source_bucket: The bucket of the object to copy from.
:type source_bucket: str
:param source_object: The object to copy.
:type source_object: str
:param destination_bucket: The destination of the object to copied to.
:type destination_bucket: str
:param destination_object: The (renamed) path of the object if given.
Can be omitted; then the same name is used.
:type destination_object: str |
def get_html(self):
"""Method to convert the repository list to a search results page."""
here = path.abspath(path.dirname(__file__))
env = Environment(loader=FileSystemLoader(path.join(here, "res/")))
suggest = env.get_template("suggest.htm.j2")
return suggest.render(
logo=path.join(here, "res/logo.png"),
user_login=self.user,
repos=self.repos,
) | Method to convert the repository list to a search results page. |
def compile_file(source, globals_=None):
"""Compile by saving to file and importing that.
Compiling the AST/source code this way ensures that the source code is
readable by e.g. `pdb` or `inspect`.
Args:
source: The code to compile, either as a string or as an AST.
globals_: A dictionary of variables that should be available as globals in
the compiled module. They will be monkey patched after importing the
module.
Returns:
A module object containing the compiled source code.
"""
if isinstance(source, gast.AST):
source = quoting.to_source(source)
# Write source to temporary file
tempdir = tempfile.mkdtemp()
uuid = str(uuid4().hex[:4])
tmpname = os.path.join(tempdir, 'tangent_%s.py' % uuid)
with open(tmpname, 'w') as f:
f.write(source)
# Load the temporary file as a module
module_name = 'tangent_%s' % uuid
if six.PY3:
spec = util.spec_from_file_location(module_name, tmpname)
m = util.module_from_spec(spec)
spec.loader.exec_module(m)
else:
m = imp.load_source(module_name, tmpname)
# Update the modules namespace
if globals_:
m.__dict__.update(globals_)
return m | Compile by saving to file and importing that.
Compiling the AST/source code this way ensures that the source code is
readable by e.g. `pdb` or `inspect`.
Args:
source: The code to compile, either as a string or as an AST.
globals_: A dictionary of variables that should be available as globals in
the compiled module. They will be monkey patched after importing the
module.
Returns:
A module object containing the compiled source code. |
def check(text):
"""Suggest the preferred forms."""
err = "spelling.athletes"
msg = "Misspelling of athlete's name. '{}' is the preferred form."
misspellings = [
["Dwyane Wade", ["Dwayne Wade"]],
["Miikka Kiprusoff", ["Mikka Kiprusoff"]],
["Mark Buehrle", ["Mark Buerhle"]],
["Skylar Diggins", ["Skyler Diggins"]],
["Agnieszka Radwanska", ["Agnieska Radwanska"]],
["J.J. Redick", ["J.J. Reddick"]],
["Manny Pacquiao", ["Manny Packquaio"]],
["Antawn Jamison", ["Antwan Jamison"]],
["Cal Ripken", ["Cal Ripkin"]],
["Jhonny Peralta", ["Johnny Peralta"]],
["Monta Ellis", ["Monte Ellis"]],
["Alex Rodriguez", ["Alex Rodriquez"]],
["Mark Teixeira", ["Mark Texeira"]],
["Brett Favre", ["Brett Farve"]],
["Torii Hunter", ["Tori Hunter"]],
["Stephen Curry", ["Stephon Curry"]],
["Mike Krzyzewski", ["Mike Kryzewski"]],
]
return preferred_forms_check(text, misspellings, err, msg) | Suggest the preferred forms. |
def get_dict_registry_services(registry, template_files, warn_missing_files=True):
"""
Return a dict mapping service name to a dict containing the service's
type ('fixtures', 'platform_services', 'application_services', 'internal_services'),
the template file's absolute path, and a list of environments to which the
service is intended to deploy.
Service names that appear twice in the output list will emit a warning and
ignore the latter records.
Services which have no template file will not appear in the returned dict.
If the `warn_missing_files` boolean is True these files will emit a warning.
"""
with open(registry) as fr:
parsed_registry = json.load(fr)
services = {}
for type, type_services in parsed_registry.iteritems():
for name, service in type_services.iteritems():
if name in services:
logger.warning("Template name appears twice, ignoring later items: `%s`", name)
continue
template_file = get_matching_service_template_file(name, template_files)
if not template_file:
if warn_missing_files:
logger.warning("No template file for `%s` (%s) `%s`", type, service['type'], name)
continue
services[name] = {
'type': type,
'template_file': template_file,
'environments': service['environments']
}
return services | Return a dict mapping service name to a dict containing the service's
type ('fixtures', 'platform_services', 'application_services', 'internal_services'),
the template file's absolute path, and a list of environments to which the
service is intended to deploy.
Service names that appear twice in the output list will emit a warning and
ignore the latter records.
Services which have no template file will not appear in the returned dict.
If the `warn_missing_files` boolean is True these files will emit a warning. |
def _unpack_truisms(self, c):
"""
Given a constraint, _unpack_truisms() returns a set of constraints that must be True
this constraint to be True.
"""
try:
op = getattr(self, '_unpack_truisms_'+c.op)
except AttributeError:
return set()
return op(c) | Given a constraint, _unpack_truisms() returns a set of constraints that must be True
this constraint to be True. |
def get_instance(self, payload):
"""
Build an instance of UserChannelInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.chat.v2.service.user.user_channel.UserChannelInstance
:rtype: twilio.rest.chat.v2.service.user.user_channel.UserChannelInstance
"""
return UserChannelInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
user_sid=self._solution['user_sid'],
) | Build an instance of UserChannelInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.chat.v2.service.user.user_channel.UserChannelInstance
:rtype: twilio.rest.chat.v2.service.user.user_channel.UserChannelInstance |
def editpermissions_anonymous_user_view(self, request, forum_id=None):
""" Allows to edit anonymous user permissions for the considered forum.
The view displays a form to define which permissions are granted for the anonymous user for
the considered forum.
"""
forum = get_object_or_404(Forum, pk=forum_id) if forum_id else None
# Set up the context
context = self.get_forum_perms_base_context(request, forum)
context['forum'] = forum
context['title'] = '{} - {}'.format(_('Forum permissions'), _('Anonymous user'))
context['form'] = self._get_permissions_form(
request, UserForumPermission, {'forum': forum, 'anonymous_user': True},
)
return render(request, self.editpermissions_anonymous_user_view_template_name, context) | Allows to edit anonymous user permissions for the considered forum.
The view displays a form to define which permissions are granted for the anonymous user for
the considered forum. |
def _get_gather_offset(self, size):
"""Calculate the offset for gather result from this process
Parameters
----------
size : int
The total number of process.
Returns
-------
tuple_size : tuple_int
Number of elements to send from each process
(one integer for each process)
tuple_offset : tuple_int
Number of elements away from the first element
in the array at which to begin the new, segmented
array for a process
(one integer for each process)
subject_map : dictionary
Mapping between global subject id to local id
"""
gather_size = np.zeros(size).astype(int)
gather_offset = np.zeros(size).astype(int)
num_local_subjs = np.zeros(size).astype(int)
subject_map = {}
for idx, s in enumerate(np.arange(self.n_subj)):
cur_rank = idx % size
gather_size[cur_rank] += self.prior_size
subject_map[idx] = (cur_rank, num_local_subjs[cur_rank])
num_local_subjs[cur_rank] += 1
for idx in np.arange(size - 1) + 1:
gather_offset[idx] = gather_offset[idx - 1] + gather_size[idx - 1]
tuple_size = tuple(gather_size)
tuple_offset = tuple(gather_offset)
return tuple_size, tuple_offset, subject_map | Calculate the offset for gather result from this process
Parameters
----------
size : int
The total number of process.
Returns
-------
tuple_size : tuple_int
Number of elements to send from each process
(one integer for each process)
tuple_offset : tuple_int
Number of elements away from the first element
in the array at which to begin the new, segmented
array for a process
(one integer for each process)
subject_map : dictionary
Mapping between global subject id to local id |
def member_del(self, cluster_id, member_id):
"""remove member from cluster cluster"""
cluster = self._storage[cluster_id]
result = cluster.member_remove(member_id)
self._storage[cluster_id] = cluster
return result | remove member from cluster cluster |
def step_a_file_named_filename_with(context, filename):
"""Creates a textual file with the content provided as docstring."""
step_a_file_named_filename_and_encoding_with(context, filename, "UTF-8")
# -- SPECIAL CASE: For usage with behave steps.
if filename.endswith(".feature"):
command_util.ensure_context_attribute_exists(context, "features", [])
context.features.append(filename) | Creates a textual file with the content provided as docstring. |
def get_feedback_from_submission(self, submission, only_feedback=False, show_everything=False, translation=gettext.NullTranslations()):
"""
Get the input of a submission. If only_input is False, returns the full submissions with a dictionnary object at the key "input".
Else, returns only the dictionnary.
If show_everything is True, feedback normally hidden is shown.
"""
if only_feedback:
submission = {"text": submission.get("text", None), "problems": dict(submission.get("problems", {}))}
if "text" in submission:
submission["text"] = ParsableText(submission["text"], submission["response_type"], show_everything, translation).parse()
if "problems" in submission:
for problem in submission["problems"]:
if isinstance(submission["problems"][problem], str): # fallback for old-style submissions
submission["problems"][problem] = (submission.get('result', 'crash'), ParsableText(submission["problems"][problem],
submission["response_type"],
show_everything, translation).parse())
else: # new-style submission
submission["problems"][problem] = (submission["problems"][problem][0], ParsableText(submission["problems"][problem][1],
submission["response_type"],
show_everything, translation).parse())
return submission | Get the input of a submission. If only_input is False, returns the full submissions with a dictionnary object at the key "input".
Else, returns only the dictionnary.
If show_everything is True, feedback normally hidden is shown. |
def rename_bika_setup():
"""
Rename Bika Setup to just Setup to avoid naming confusions for new users
"""
logger.info("Renaming Bika Setup...")
bika_setup = api.get_bika_setup()
bika_setup.setTitle("Setup")
bika_setup.reindexObject()
setup = api.get_portal().portal_setup
setup.runImportStepFromProfile('profile-bika.lims:default', 'controlpanel') | Rename Bika Setup to just Setup to avoid naming confusions for new users |
def post(self, document):
"""Send to API a document or a list of document.
:param document: a document or a list of document.
:type document: dict or list
:return: Message with location of job
:rtype: dict
:raises ValidationError: if API returns status 400
:raises Unauthorized: if API returns status 401
:raises Forbidden: if API returns status 403
:raises NotFound: if API returns status 404
:raises ApiError: if API returns other status
"""
if type(document) is dict:
document = [document]
return self.make_request(method='POST', uri='updates/', data=document) | Send to API a document or a list of document.
:param document: a document or a list of document.
:type document: dict or list
:return: Message with location of job
:rtype: dict
:raises ValidationError: if API returns status 400
:raises Unauthorized: if API returns status 401
:raises Forbidden: if API returns status 403
:raises NotFound: if API returns status 404
:raises ApiError: if API returns other status |
def insertLink(page, lnk, mark = True):
""" Insert a new link for the current page. """
CheckParent(page)
annot = getLinkText(page, lnk)
if annot == "":
raise ValueError("link kind not supported")
page._addAnnot_FromString([annot])
return | Insert a new link for the current page. |
def main(self,argv=None):
"""Run as a command-line script."""
parser = optparse.OptionParser(usage=USAGE % self.__class__.__name__)
newopt = parser.add_option
newopt('-i','--interact',action='store_true',default=False,
help='Interact with the program after the script is run.')
opts,args = parser.parse_args(argv)
if len(args) != 1:
print >> sys.stderr,"You must supply exactly one file to run."
sys.exit(1)
self.run_file(args[0],opts.interact) | Run as a command-line script. |
def valid_kdf(self, kdf):
"""Determine whether a KDFSuite can be used with this EncryptionSuite.
:param kdf: KDFSuite to evaluate
:type kdf: aws_encryption_sdk.identifiers.KDFSuite
:rtype: bool
"""
if kdf.input_length is None:
return True
if self.data_key_length > kdf.input_length(self):
raise InvalidAlgorithmError(
"Invalid Algorithm definition: data_key_len must not be greater than kdf_input_len"
)
return True | Determine whether a KDFSuite can be used with this EncryptionSuite.
:param kdf: KDFSuite to evaluate
:type kdf: aws_encryption_sdk.identifiers.KDFSuite
:rtype: bool |
async def inspect(self, *, node_id: str) -> Mapping[str, Any]:
"""
Inspect a node
Args:
node_id: The ID or name of the node
"""
response = await self.docker._query_json(
"nodes/{node_id}".format(node_id=node_id), method="GET"
)
return response | Inspect a node
Args:
node_id: The ID or name of the node |
def system_config_dir():
r"""Return the system-wide config dir (full path).
- Linux, SunOS: /etc/glances
- *BSD, macOS: /usr/local/etc/glances
- Windows: %APPDATA%\glances
"""
if LINUX or SUNOS:
path = '/etc'
elif BSD or MACOS:
path = '/usr/local/etc'
else:
path = os.environ.get('APPDATA')
if path is None:
path = ''
else:
path = os.path.join(path, 'glances')
return path | r"""Return the system-wide config dir (full path).
- Linux, SunOS: /etc/glances
- *BSD, macOS: /usr/local/etc/glances
- Windows: %APPDATA%\glances |
def get_properties(properties, identifier, namespace='cid', searchtype=None, as_dataframe=False, **kwargs):
"""Retrieve the specified properties from PubChem.
:param identifier: The compound, substance or assay identifier to use as a search query.
:param namespace: (optional) The identifier type.
:param searchtype: (optional) The advanced search type, one of substructure, superstructure or similarity.
:param as_dataframe: (optional) Automatically extract the properties into a pandas :class:`~pandas.DataFrame`.
"""
if isinstance(properties, text_types):
properties = properties.split(',')
properties = ','.join([PROPERTY_MAP.get(p, p) for p in properties])
properties = 'property/%s' % properties
results = get_json(identifier, namespace, 'compound', properties, searchtype=searchtype, **kwargs)
results = results['PropertyTable']['Properties'] if results else []
if as_dataframe:
import pandas as pd
return pd.DataFrame.from_records(results, index='CID')
return results | Retrieve the specified properties from PubChem.
:param identifier: The compound, substance or assay identifier to use as a search query.
:param namespace: (optional) The identifier type.
:param searchtype: (optional) The advanced search type, one of substructure, superstructure or similarity.
:param as_dataframe: (optional) Automatically extract the properties into a pandas :class:`~pandas.DataFrame`. |
def buildErrorResponse(self, request, error=None):
"""
Builds an error response.
@param request: The AMF request
@type request: L{Request<pyamf.remoting.Request>}
@return: The AMF response
@rtype: L{Response<pyamf.remoting.Response>}
"""
if error is not None:
cls, e, tb = error
else:
cls, e, tb = sys.exc_info()
return remoting.Response(build_fault(cls, e, tb, self.gateway.debug),
status=remoting.STATUS_ERROR) | Builds an error response.
@param request: The AMF request
@type request: L{Request<pyamf.remoting.Request>}
@return: The AMF response
@rtype: L{Response<pyamf.remoting.Response>} |
def max_interval_intersec(S):
"""determine a value that is contained in a largest number of given intervals
:param S: list of half open intervals
:complexity: O(n log n), where n = len(S)
"""
B = ([(left, +1) for left, right in S] +
[(right, -1) for left, right in S])
B.sort()
c = 0
best = (c, None)
for x, d in B:
c += d
if best[0] < c:
best = (c, x)
return best | determine a value that is contained in a largest number of given intervals
:param S: list of half open intervals
:complexity: O(n log n), where n = len(S) |
def add_sun_flare(img, flare_center_x, flare_center_y, src_radius, src_color, circles):
"""Add sun flare.
From https://github.com/UjjwalSaxena/Automold--Road-Augmentation-Library
Args:
img (np.array):
flare_center_x (float):
flare_center_y (float):
src_radius:
src_color (int, int, int):
circles (list):
Returns:
"""
non_rgb_warning(img)
input_dtype = img.dtype
needs_float = False
if input_dtype == np.float32:
img = from_float(img, dtype=np.dtype('uint8'))
needs_float = True
elif input_dtype not in (np.uint8, np.float32):
raise ValueError('Unexpected dtype {} for RandomSunFlareaugmentation'.format(input_dtype))
overlay = img.copy()
output = img.copy()
for (alpha, (x, y), rad3, (r_color, g_color, b_color)) in circles:
cv2.circle(overlay, (x, y), rad3, (r_color, g_color, b_color), -1)
cv2.addWeighted(overlay, alpha, output, 1 - alpha, 0, output)
point = (int(flare_center_x), int(flare_center_y))
overlay = output.copy()
num_times = src_radius // 10
alpha = np.linspace(0.0, 1, num=num_times)
rad = np.linspace(1, src_radius, num=num_times)
for i in range(num_times):
cv2.circle(overlay, point, int(rad[i]), src_color, -1)
alp = alpha[num_times - i - 1] * alpha[num_times - i - 1] * alpha[num_times - i - 1]
cv2.addWeighted(overlay, alp, output, 1 - alp, 0, output)
image_rgb = output
if needs_float:
image_rgb = to_float(image_rgb, max_value=255)
return image_rgb | Add sun flare.
From https://github.com/UjjwalSaxena/Automold--Road-Augmentation-Library
Args:
img (np.array):
flare_center_x (float):
flare_center_y (float):
src_radius:
src_color (int, int, int):
circles (list):
Returns: |
def post(self, url, entity):
"""
To make a POST request to Falkonry API server
:param url: string
:param entity: Instantiated class object
"""
try:
if entity is None or entity == "":
jsonData = ""
else:
jsonData = entity.to_json()
except Exception as e:
jsonData = jsonpickle.pickler.encode(entity)
response = requests.post(
self.host + url,
jsonData,
headers={
"Content-Type": "application/json",
'Authorization': 'Bearer ' + self.token,
'x-falkonry-source':self.sourceHeader
},
verify=False
)
if response.status_code == 201:
try:
return json.loads(response._content.decode('utf-8'))
except Exception as e:
return json.loads(response.content)
elif response.status_code == 409:
try:
return json.loads(response._content.decode('utf-8'))
except Exception as e:
return json.loads(response.content)
elif response.status_code == 401:
raise Exception(json.dumps({'message':'Unauthorized Access'}))
else:
raise Exception(response.content) | To make a POST request to Falkonry API server
:param url: string
:param entity: Instantiated class object |
def getSpecialPrice(self, product, store_view=None, identifierType=None):
"""
Get product special price data
:param product: ID or SKU of product
:param store_view: ID or Code of Store view
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: Dictionary
"""
return self.call(
'catalog_product.getSpecialPrice', [
product, store_view, identifierType
]
) | Get product special price data
:param product: ID or SKU of product
:param store_view: ID or Code of Store view
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: Dictionary |
def parse_scwrl_out(scwrl_std_out, scwrl_pdb):
"""Parses SCWRL output and returns PDB and SCWRL score.
Parameters
----------
scwrl_std_out : str
Std out from SCWRL.
scwrl_pdb : str
String of packed SCWRL PDB.
Returns
-------
fixed_scwrl_str : str
String of packed SCWRL PDB, with correct PDB format.
score : float
SCWRL Score
"""
score = re.findall(
r'Total minimal energy of the graph = ([-0-9.]+)', scwrl_std_out)[0]
# Add temperature factors to SCWRL out
split_scwrl = scwrl_pdb.splitlines()
fixed_scwrl = []
for line in split_scwrl:
if len(line) < 80:
line += ' ' * (80 - len(line))
if re.search(r'H?E?T?ATO?M\s+\d+.+', line):
front = line[:61]
temp_factor = ' 0.00'
back = line[66:]
fixed_scwrl.append(''.join([front, temp_factor, back]))
else:
fixed_scwrl.append(line)
fixed_scwrl_str = '\n'.join(fixed_scwrl) + '\n'
return fixed_scwrl_str, float(score) | Parses SCWRL output and returns PDB and SCWRL score.
Parameters
----------
scwrl_std_out : str
Std out from SCWRL.
scwrl_pdb : str
String of packed SCWRL PDB.
Returns
-------
fixed_scwrl_str : str
String of packed SCWRL PDB, with correct PDB format.
score : float
SCWRL Score |
def move(self, d_xyz, inplace=False):
"""
Translate the whole Space in x, y and z coordinates.
:param d_xyz: displacement in x, y(, and z).
:type d_xyz: tuple (len=2 or 3)
:param inplace: If True, the moved ``pyny.Space`` is copied and
added to the current ``pyny.Space``. If False, it returns
the new ``pyny.Space``.
:type inplace: bool
:returns: None, ``pyny.Space``
"""
state = Polygon.verify
Polygon.verify = False
if len(d_xyz) == 2: d_xyz = (d_xyz[0], d_xyz[1], 0)
xyz = np.array(d_xyz)
# Add (dx, dy, dz) to all the coordinates
map_ = self.get_map()[1] + xyz
space = self.map2pyny(map_)
Polygon.verify = state
if inplace:
self.add_spaces(space)
return None
else:
return space | Translate the whole Space in x, y and z coordinates.
:param d_xyz: displacement in x, y(, and z).
:type d_xyz: tuple (len=2 or 3)
:param inplace: If True, the moved ``pyny.Space`` is copied and
added to the current ``pyny.Space``. If False, it returns
the new ``pyny.Space``.
:type inplace: bool
:returns: None, ``pyny.Space`` |
def enter(clsQname):
"""
Delegate a rule to another class which instantiates a Klein app
This also memoizes the resource instance on the handler function itself
"""
def wrapper(routeHandler):
@functools.wraps(routeHandler)
def inner(self, request, *a, **kw):
if getattr(inner, '_subKlein', None) is None:
cls = namedAny(clsQname)
inner._subKlein = cls().app.resource()
return routeHandler(self, request, inner._subKlein, *a, **kw)
inner._subKleinQname = clsQname
return inner
return wrapper | Delegate a rule to another class which instantiates a Klein app
This also memoizes the resource instance on the handler function itself |
def delete_record(self, record):
"""
Permanently removes record from table.
"""
try:
self.session.delete(record)
self.session.commit()
except Exception as e:
self.session.rollback()
raise ProgrammingError(e)
finally:
self.session.close() | Permanently removes record from table. |
def get_linked(self):
"""Get a list of currently linked devices from the hub"""
linked_devices = {}
self.logger.info("\nget_linked")
#todo instead of sleep, create loop to keep checking buffer
self.direct_command_hub('0269')
sleep(1)
self.get_buffer_status()
msgs = self.buffer_status.get('msgs', [])
for entry in msgs:
im_code = entry.get('im_code', '')
#self.logger.info("get_linked entry {}".format(pprint.pformat(entry)))
if im_code == '57':
device_id = entry.get('id_high', '') + entry.get('id_mid', '') \
+ entry.get('id_low', '')
group = entry.get('group', '')
if device_id not in linked_devices:
dev_info = self.id_request(device_id)
dev_cat = dev_info.get('id_high', '')
dev_sub_cat = dev_info.get('id_mid', '')
dev_cat_record = self.get_device_category(dev_cat)
if dev_cat_record and 'name' in dev_cat_record:
dev_cat_name = dev_cat_record['name']
dev_cat_type = dev_cat_record['type']
else:
dev_cat_name = 'unknown'
dev_cat_type = 'unknown'
linked_dev_model = self.get_device_model(dev_cat, dev_sub_cat)
if 'name' in linked_dev_model:
dev_model_name = linked_dev_model['name']
else:
dev_model_name = 'unknown'
if 'sku' in linked_dev_model:
dev_sku = linked_dev_model['sku']
else:
dev_sku = 'unknown'
self.logger.info("get_linked: Got first device: %s group %s "
"cat type %s cat name %s dev model name %s",
device_id, group, dev_cat_type,
dev_cat_name, dev_model_name)
linked_devices[device_id] = {
'cat_name': dev_cat_name,
'cat_type': dev_cat_type,
'model_name' : dev_model_name,
'cat': dev_cat,
'sub_cat': dev_sub_cat,
'sku': dev_sku,
'group': []
}
linked_devices[device_id]['group'].append(group)
while self.buffer_status['success']:
self.direct_command_hub('026A')
sleep(1)
self.get_buffer_status()
msgs = self.buffer_status.get('msgs', [])
for entry in msgs:
im_code = entry.get('im_code', '')
if im_code == '57':
device_id = entry.get('id_high', '') + entry.get('id_mid', '') \
+ entry.get('id_low', '')
group = entry.get('group', '')
if device_id not in linked_devices:
dev_info = self.id_request(device_id)
dev_cat = dev_info.get('id_high', '')
dev_sub_cat = dev_info.get('id_mid', '')
dev_cat_record = self.get_device_category(dev_cat)
if dev_cat_record and 'name' in dev_cat_record:
dev_cat_name = dev_cat_record['name']
dev_cat_type = dev_cat_record['type']
else:
dev_cat_name = 'unknown'
dev_cat_type = 'unknown'
linked_dev_model = self.get_device_model(dev_cat, dev_sub_cat)
if 'name' in linked_dev_model:
dev_model_name = linked_dev_model['name']
else:
dev_model_name = 'unknown'
if 'sku' in linked_dev_model:
dev_sku = linked_dev_model['sku']
else:
dev_sku = 'unknown'
self.logger.info("get_linked: Got device: %s group %s "
+ "cat type %s cat name %s dev model name %s",
device_id, group, dev_cat_type,
dev_cat_name, dev_model_name)
linked_devices[device_id] = {
'cat_name': dev_cat_name,
'cat_type': dev_cat_type,
'model_name' : dev_model_name,
'cat': dev_cat,
'sub_cat': dev_sub_cat,
'sku': dev_sku,
'group': []
}
linked_devices[device_id]['group'].append(group)
self.logger.info("get_linked: Final device list: %s", pprint.pformat(linked_devices))
return linked_devices | Get a list of currently linked devices from the hub |
def reveal(input_image_file):
"""Find a message in an image.
"""
from base64 import b64decode
from zlib import decompress
img = tools.open_image(input_image_file)
try:
if img.format in ["JPEG", "TIFF"]:
if "exif" in img.info:
exif_dict = piexif.load(img.info.get("exif", b""))
description_key = piexif.ImageIFD.ImageDescription
encoded_message = exif_dict["0th"][description_key]
else:
encoded_message = b""
else:
raise ValueError("Given file is neither JPEG nor TIFF.")
finally:
img.close()
return b64decode(decompress(encoded_message)) | Find a message in an image. |
def check_child_friendly(self, name):
"""
Check if a module is a container and so can have children
"""
name = name.split()[0]
if name in self.container_modules:
return
root = os.path.dirname(os.path.realpath(__file__))
module_path = os.path.join(root, "modules")
try:
info = imp.find_module(name, [module_path])
except ImportError:
return
if not info:
return
(file, pathname, description) = info
try:
py_mod = imp.load_module(name, file, pathname, description)
except Exception:
# We cannot load the module! We could error out here but then the
# user gets informed that the problem is with their config. This
# is not correct. Better to say that all is well and then the
# config can get parsed and py3status loads. The error about the
# failing module load is better handled at that point, and will be.
return
try:
container = py_mod.Py3status.Meta.container
except AttributeError:
container = False
# delete the module
del py_mod
if container:
self.container_modules.append(name)
else:
self.error("Module `{}` cannot contain others".format(name)) | Check if a module is a container and so can have children |
def get_plot(self, units='THz', ymin=None, ymax=None, width=None,
height=None, dpi=None, plt=None, fonts=None, dos=None,
dos_aspect=3, color=None, style=None, no_base_style=False):
"""Get a :obj:`matplotlib.pyplot` object of the phonon band structure.
Args:
units (:obj:`str`, optional): Units of phonon frequency. Accepted
(case-insensitive) values are Thz, cm-1, eV, meV.
ymin (:obj:`float`, optional): The minimum energy on the y-axis.
ymax (:obj:`float`, optional): The maximum energy on the y-axis.
width (:obj:`float`, optional): The width of the plot.
height (:obj:`float`, optional): The height of the plot.
dpi (:obj:`int`, optional): The dots-per-inch (pixel density) for
the image.
fonts (:obj:`list`, optional): Fonts to use in the plot. Can be a
a single font, specified as a :obj:`str`, or several fonts,
specified as a :obj:`list` of :obj:`str`.
plt (:obj:`matplotlib.pyplot`, optional): A
:obj:`matplotlib.pyplot` object to use for plotting.
dos (:obj:`np.ndarray`): 2D Numpy array of total DOS data
dos_aspect (float): Width division for vertical DOS
color (:obj:`str` or :obj:`tuple`, optional): Line/fill colour in
any matplotlib-accepted format
style (:obj:`list`, :obj:`str`, or :obj:`dict`): Any matplotlib
style specifications, to be composed on top of Sumo base
style.
no_base_style (:obj:`bool`, optional): Prevent use of sumo base
style. This can make alternative styles behave more
predictably.
Returns:
:obj:`matplotlib.pyplot`: The phonon band structure plot.
"""
if color is None:
color = 'C0' # Default to first colour in matplotlib series
if dos is not None:
plt = pretty_subplot(1, 2, width=width, height=height,
sharex=False, sharey=True, dpi=dpi, plt=plt,
gridspec_kw={'width_ratios': [dos_aspect, 1],
'wspace': 0})
ax = plt.gcf().axes[0]
else:
plt = pretty_plot(width, height, dpi=dpi, plt=plt)
ax = plt.gca()
data = self.bs_plot_data()
dists = data['distances']
freqs = data['frequency']
# nd is branch index, nb is band index, nk is kpoint index
for nd, nb in itertools.product(range(len(data['distances'])),
range(self._nb_bands)):
f = freqs[nd][nb]
# plot band data
ax.plot(dists[nd], f, ls='-', c=color, zorder=1)
self._maketicks(ax, units=units)
self._makeplot(ax, plt.gcf(), data, width=width, height=height,
ymin=ymin, ymax=ymax, dos=dos, color=color)
plt.tight_layout()
plt.subplots_adjust(wspace=0)
return plt | Get a :obj:`matplotlib.pyplot` object of the phonon band structure.
Args:
units (:obj:`str`, optional): Units of phonon frequency. Accepted
(case-insensitive) values are Thz, cm-1, eV, meV.
ymin (:obj:`float`, optional): The minimum energy on the y-axis.
ymax (:obj:`float`, optional): The maximum energy on the y-axis.
width (:obj:`float`, optional): The width of the plot.
height (:obj:`float`, optional): The height of the plot.
dpi (:obj:`int`, optional): The dots-per-inch (pixel density) for
the image.
fonts (:obj:`list`, optional): Fonts to use in the plot. Can be a
a single font, specified as a :obj:`str`, or several fonts,
specified as a :obj:`list` of :obj:`str`.
plt (:obj:`matplotlib.pyplot`, optional): A
:obj:`matplotlib.pyplot` object to use for plotting.
dos (:obj:`np.ndarray`): 2D Numpy array of total DOS data
dos_aspect (float): Width division for vertical DOS
color (:obj:`str` or :obj:`tuple`, optional): Line/fill colour in
any matplotlib-accepted format
style (:obj:`list`, :obj:`str`, or :obj:`dict`): Any matplotlib
style specifications, to be composed on top of Sumo base
style.
no_base_style (:obj:`bool`, optional): Prevent use of sumo base
style. This can make alternative styles behave more
predictably.
Returns:
:obj:`matplotlib.pyplot`: The phonon band structure plot. |
def _initialize_cfg(self):
"""
Re-create the DiGraph
"""
self.kb.functions = FunctionManager(self.kb)
self._jobs_to_analyze_per_function = defaultdict(set)
self._completed_functions = set() | Re-create the DiGraph |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.