hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d2f4274cd9880a5d238d3fa14f2e327f418ff990
| 4,221
|
py
|
Python
|
contrib/generate_provider_logos_collage_image.py
|
dupontz/libcloud
|
419c69441ea10e7bbf37319e5e8d02e82e7e6b40
|
[
"Apache-2.0"
] | 4
|
2017-11-14T17:24:12.000Z
|
2020-10-30T01:46:02.000Z
|
contrib/generate_provider_logos_collage_image.py
|
dupontz/libcloud
|
419c69441ea10e7bbf37319e5e8d02e82e7e6b40
|
[
"Apache-2.0"
] | 11
|
2017-01-29T08:59:21.000Z
|
2018-07-02T09:17:47.000Z
|
contrib/generate_provider_logos_collage_image.py
|
dupontz/libcloud
|
419c69441ea10e7bbf37319e5e8d02e82e7e6b40
|
[
"Apache-2.0"
] | 4
|
2016-04-04T08:01:48.000Z
|
2018-06-06T08:04:36.000Z
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
# Script which generates a collage of provider logos from multiple provider
# logo files.
#
# It works in two steps:
#
# 1. Resize all the provider logo files (reduce the dimensions)
# 2. Assemble a final image from the resized images
import os
import sys
import argparse
import subprocess
import random
from os.path import join as pjoin
DIMENSIONS = '150x150' # Dimensions of the resized image (<width>x<height>)
GEOMETRY = '+4+4' # How to arrange images (+<rows>+<columns>)
TO_CREATE_DIRS = ['resized/', 'final/']
def setup(output_path):
"""
Create missing directories.
"""
for directory in TO_CREATE_DIRS:
final_path = pjoin(output_path, directory)
if not os.path.exists(final_path):
os.makedirs(final_path)
def get_logo_files(input_path):
logo_files = os.listdir(input_path)
logo_files = [name for name in logo_files if
'resized' not in name and name.endswith('png')]
logo_files = [pjoin(input_path, name) for name in logo_files]
return logo_files
def resize_images(logo_files, output_path):
resized_images = []
for logo_file in logo_files:
name, ext = os.path.splitext(os.path.basename(logo_file))
new_name = '%s%s' % (name, ext)
out_name = pjoin(output_path, 'resized/', new_name)
print 'Resizing image: %(name)s' % {'name': logo_file}
values = {'name': logo_file, 'out_name': out_name,
'dimensions': DIMENSIONS}
cmd = 'convert %(name)s -resize %(dimensions)s %(out_name)s'
cmd = cmd % values
subprocess.call(cmd, shell=True)
resized_images.append(out_name)
return resized_images
def assemble_final_image(resized_images, output_path):
final_name = pjoin(output_path, 'final/logos.png')
random.shuffle(resized_images)
values = {'images': ' '.join(resized_images), 'geometry': GEOMETRY,
'out_name': final_name}
cmd = 'montage %(images)s -geometry %(geometry)s %(out_name)s'
cmd = cmd % values
print 'Generating final image: %(name)s' % {'name': final_name}
subprocess.call(cmd, shell=True)
def main(input_path, output_path):
if not os.path.exists(input_path):
print('Path doesn\'t exist: %s' % (input_path))
sys.exit(2)
if not os.path.exists(output_path):
print('Path doesn\'t exist: %s' % (output_path))
sys.exit(2)
logo_files = get_logo_files(input_path=input_path)
setup(output_path=output_path)
resized_images = resize_images(logo_files=logo_files,
output_path=output_path)
assemble_final_image(resized_images=resized_images,
output_path=output_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Assemble provider logos '
' in a single image')
parser.add_argument('--input-path', action='store',
help='Path to directory which contains provider '
'logo files')
parser.add_argument('--output-path', action='store',
help='Path where the new files will be written')
args = parser.parse_args()
input_path = os.path.abspath(args.input_path)
output_path = os.path.abspath(args.output_path)
main(input_path=input_path, output_path=output_path)
| 33.23622
| 76
| 0.666667
|
24a03be85394d3bab2fd7e5ad30c44c741306433
| 8,674
|
py
|
Python
|
networkx/algorithms/bipartite/centrality.py
|
jmmcd/networkx
|
207ff7d1e9bfaff013ac77c8d6bb79619892c994
|
[
"BSD-3-Clause"
] | 1
|
2020-08-08T21:52:34.000Z
|
2020-08-08T21:52:34.000Z
|
networkx/algorithms/bipartite/centrality.py
|
jmmcd/networkx
|
207ff7d1e9bfaff013ac77c8d6bb79619892c994
|
[
"BSD-3-Clause"
] | 2
|
2019-11-13T03:48:53.000Z
|
2021-02-15T16:52:09.000Z
|
networkx/algorithms/bipartite/centrality.py
|
jmmcd/networkx
|
207ff7d1e9bfaff013ac77c8d6bb79619892c994
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (C) 2011 by
# Jordi Torrents <jtorrents@milnou.net>
# Aric Hagberg <hagberg@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
__author__ = """\n""".join(['Jordi Torrents <jtorrents@milnou.net>',
'Aric Hagberg (hagberg@lanl.gov)'])
__all__ = ['degree_centrality',
'betweenness_centrality',
'closeness_centrality']
def degree_centrality(G, nodes):
r"""Compute the degree centrality for nodes in a bipartite network.
The degree centrality for a node `v` is the fraction of nodes
connected to it.
Parameters
----------
G : graph
A bipartite network
nodes : list or container
Container with all nodes in one bipartite node set.
Returns
-------
centrality : dictionary
Dictionary keyed by node with bipartite degree centrality as the value.
See Also
--------
betweenness_centrality,
closeness_centrality,
sets,
is_bipartite
Notes
-----
The nodes input parameter must contain all nodes in one bipartite node set,
but the dictionary returned contains all nodes from both bipartite node
sets. See :mod:`bipartite documentation <networkx.algorithms.bipartite>`
for further details on how bipartite graphs are handled in NetworkX.
For unipartite networks, the degree centrality values are
normalized by dividing by the maximum possible degree (which is
`n-1` where `n` is the number of nodes in G).
In the bipartite case, the maximum possible degree of a node in a
bipartite node set is the number of nodes in the opposite node set
[1]_. The degree centrality for a node `v` in the bipartite
sets `U` with `n` nodes and `V` with `m` nodes is
.. math::
d_{v} = \frac{deg(v)}{m}, \mbox{for} v \in U ,
d_{v} = \frac{deg(v)}{n}, \mbox{for} v \in V ,
where `deg(v)` is the degree of node `v`.
References
----------
.. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation
Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook
of Social Network Analysis. Sage Publications.
http://www.steveborgatti.com/research/publications/bhaffiliations.pdf
"""
top = set(nodes)
bottom = set(G) - top
s = 1.0 / len(bottom)
centrality = dict((n, d * s) for n, d in G.degree(top))
s = 1.0 / len(top)
centrality.update(dict((n, d * s) for n, d in G.degree(bottom)))
return centrality
def betweenness_centrality(G, nodes):
r"""Compute betweenness centrality for nodes in a bipartite network.
Betweenness centrality of a node `v` is the sum of the
fraction of all-pairs shortest paths that pass through `v`.
Values of betweenness are normalized by the maximum possible
value which for bipartite graphs is limited by the relative size
of the two node sets [1]_.
Let `n` be the number of nodes in the node set `U` and
`m` be the number of nodes in the node set `V`, then
nodes in `U` are normalized by dividing by
.. math::
\frac{1}{2} [m^2 (s + 1)^2 + m (s + 1)(2t - s - 1) - t (2s - t + 3)] ,
where
.. math::
s = (n - 1) \div m , t = (n - 1) \mod m ,
and nodes in `V` are normalized by dividing by
.. math::
\frac{1}{2} [n^2 (p + 1)^2 + n (p + 1)(2r - p - 1) - r (2p - r + 3)] ,
where,
.. math::
p = (m - 1) \div n , r = (m - 1) \mod n .
Parameters
----------
G : graph
A bipartite graph
nodes : list or container
Container with all nodes in one bipartite node set.
Returns
-------
betweenness : dictionary
Dictionary keyed by node with bipartite betweenness centrality
as the value.
See Also
--------
degree_centrality,
closeness_centrality,
sets,
is_bipartite
Notes
-----
The nodes input parameter must contain all nodes in one bipartite node set,
but the dictionary returned contains all nodes from both node sets.
See :mod:`bipartite documentation <networkx.algorithms.bipartite>`
for further details on how bipartite graphs are handled in NetworkX.
References
----------
.. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation
Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook
of Social Network Analysis. Sage Publications.
http://www.steveborgatti.com/research/publications/bhaffiliations.pdf
"""
top = set(nodes)
bottom = set(G) - top
n = float(len(top))
m = float(len(bottom))
s = (n - 1) // m
t = (n - 1) % m
bet_max_top = (((m**2) * ((s + 1)**2)) +
(m * (s + 1) * (2 * t - s - 1)) -
(t * ((2 * s) - t + 3))) / 2.0
p = (m - 1) // n
r = (m - 1) % n
bet_max_bot = (((n**2) * ((p + 1)**2)) +
(n * (p + 1) * (2 * r - p - 1)) -
(r * ((2 * p) - r + 3))) / 2.0
betweenness = nx.betweenness_centrality(G, normalized=False,
weight=None)
for node in top:
betweenness[node] /= bet_max_top
for node in bottom:
betweenness[node] /= bet_max_bot
return betweenness
def closeness_centrality(G, nodes, normalized=True):
r"""Compute the closeness centrality for nodes in a bipartite network.
The closeness of a node is the distance to all other nodes in the
graph or in the case that the graph is not connected to all other nodes
in the connected component containing that node.
Parameters
----------
G : graph
A bipartite network
nodes : list or container
Container with all nodes in one bipartite node set.
normalized : bool, optional
If True (default) normalize by connected component size.
Returns
-------
closeness : dictionary
Dictionary keyed by node with bipartite closeness centrality
as the value.
See Also
--------
betweenness_centrality,
degree_centrality
sets,
is_bipartite
Notes
-----
The nodes input parameter must contain all nodes in one bipartite node set,
but the dictionary returned contains all nodes from both node sets.
See :mod:`bipartite documentation <networkx.algorithms.bipartite>`
for further details on how bipartite graphs are handled in NetworkX.
Closeness centrality is normalized by the minimum distance possible.
In the bipartite case the minimum distance for a node in one bipartite
node set is 1 from all nodes in the other node set and 2 from all
other nodes in its own set [1]_. Thus the closeness centrality
for node `v` in the two bipartite sets `U` with
`n` nodes and `V` with `m` nodes is
.. math::
c_{v} = \frac{m + 2(n - 1)}{d}, \mbox{for} v \in U,
c_{v} = \frac{n + 2(m - 1)}{d}, \mbox{for} v \in V,
where `d` is the sum of the distances from `v` to all
other nodes.
Higher values of closeness indicate higher centrality.
As in the unipartite case, setting normalized=True causes the
values to normalized further to n-1 / size(G)-1 where n is the
number of nodes in the connected part of graph containing the
node. If the graph is not completely connected, this algorithm
computes the closeness centrality for each connected part
separately.
References
----------
.. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation
Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook
of Social Network Analysis. Sage Publications.
http://www.steveborgatti.com/research/publications/bhaffiliations.pdf
"""
closeness = {}
path_length = nx.single_source_shortest_path_length
top = set(nodes)
bottom = set(G) - top
n = float(len(top))
m = float(len(bottom))
for node in top:
sp = dict(path_length(G, node))
totsp = sum(sp.values())
if totsp > 0.0 and len(G) > 1:
closeness[node] = (m + 2 * (n - 1)) / totsp
if normalized:
s = (len(sp) - 1.0) / (len(G) - 1)
closeness[node] *= s
else:
closeness[n] = 0.0
for node in bottom:
sp = dict(path_length(G, node))
totsp = sum(sp.values())
if totsp > 0.0 and len(G) > 1:
closeness[node] = (n + 2 * (m - 1)) / totsp
if normalized:
s = (len(sp) - 1.0) / (len(G) - 1)
closeness[node] *= s
else:
closeness[n] = 0.0
return closeness
| 31.541818
| 79
| 0.603412
|
8e9e00a0402a219ea5bf9fbce14b13de43e7e768
| 976
|
py
|
Python
|
Python/complete-binary-tree-inserter.py
|
se77enn/LeetCode-Solution
|
d29ef5358cae592b63952c3d293897a176fb75e1
|
[
"MIT"
] | 1
|
2020-10-27T03:22:31.000Z
|
2020-10-27T03:22:31.000Z
|
Python/complete-binary-tree-inserter.py
|
se77enn/LeetCode-Solution
|
d29ef5358cae592b63952c3d293897a176fb75e1
|
[
"MIT"
] | null | null | null |
Python/complete-binary-tree-inserter.py
|
se77enn/LeetCode-Solution
|
d29ef5358cae592b63952c3d293897a176fb75e1
|
[
"MIT"
] | 1
|
2021-03-22T18:58:23.000Z
|
2021-03-22T18:58:23.000Z
|
# Time: ctor: O(n)
# insert: O(1)
# get_root: O(1)
# Space: O(n)
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class CBTInserter(object):
def __init__(self, root):
"""
:type root: TreeNode
"""
self.__tree = [root]
for i in self.__tree:
if i.left:
self.__tree.append(i.left)
if i.right:
self.__tree.append(i.right)
def insert(self, v):
"""
:type v: int
:rtype: int
"""
n = len(self.__tree)
self.__tree.append(TreeNode(v))
if n % 2:
self.__tree[(n-1)//2].left = self.__tree[-1]
else:
self.__tree[(n-1)//2].right = self.__tree[-1]
return self.__tree[(n-1)//2].val
def get_root(self):
"""
:rtype: TreeNode
"""
return self.__tree[0]
| 22.181818
| 57
| 0.460041
|
4b9004d7eaacfa82658e2c6a4e99b5102c8277af
| 263
|
py
|
Python
|
src/fl_simulation/utils/types.py
|
microsoft/fl-simulation
|
d177d329c82559c7efe82deae8dea8f9baa49495
|
[
"MIT"
] | 5
|
2021-12-14T02:21:53.000Z
|
2021-12-26T07:45:13.000Z
|
src/fl_simulation/utils/types.py
|
microsoft/fl-simulation
|
d177d329c82559c7efe82deae8dea8f9baa49495
|
[
"MIT"
] | 1
|
2022-01-04T04:51:20.000Z
|
2022-01-04T04:51:20.000Z
|
src/fl_simulation/utils/types.py
|
microsoft/fl-simulation
|
d177d329c82559c7efe82deae8dea8f9baa49495
|
[
"MIT"
] | null | null | null |
"""Useful type decalrations."""
from typing import List, NewType
import torch
ModelDiff = NewType("ModelDiff", List[torch.Tensor])
"""The model weights update."""
ControlVarDiff = NewType("ControlVarDiff", List[torch.Tensor])
"""The control variate update."""
| 23.909091
| 62
| 0.737643
|
08e284adb9f26dfe8e7df19e776f7be234b47af5
| 41,837
|
py
|
Python
|
salt/states/cmd.py
|
ifraixedes/saltstack-salt
|
b54becb8b43cc9b7c00b2c0bc637ac534dc62896
|
[
"Apache-2.0"
] | null | null | null |
salt/states/cmd.py
|
ifraixedes/saltstack-salt
|
b54becb8b43cc9b7c00b2c0bc637ac534dc62896
|
[
"Apache-2.0"
] | null | null | null |
salt/states/cmd.py
|
ifraixedes/saltstack-salt
|
b54becb8b43cc9b7c00b2c0bc637ac534dc62896
|
[
"Apache-2.0"
] | null | null | null |
"""
Execution of arbitrary commands
===============================
The cmd state module manages the enforcement of executed commands, this
state can tell a command to run under certain circumstances.
A simple example to execute a command:
.. code-block:: yaml
# Store the current date in a file
'date > /tmp/salt-run':
cmd.run
Only run if another execution failed, in this case truncate syslog if there is
no disk space:
.. code-block:: yaml
'> /var/log/messages/':
cmd.run:
- unless: echo 'foo' > /tmp/.test && rm -f /tmp/.test
Only run if the file specified by ``creates`` does not exist, in this case
touch /tmp/foo if it does not exist:
.. code-block:: yaml
touch /tmp/foo:
cmd.run:
- creates: /tmp/foo
``creates`` also accepts a list of files, in which case this state will
run if **any** of the files do not exist:
.. code-block:: yaml
"echo 'foo' | tee /tmp/bar > /tmp/baz":
cmd.run:
- creates:
- /tmp/bar
- /tmp/baz
.. note::
The ``creates`` option was added to the cmd state in version 2014.7.0,
and made a global requisite in 3001.
Sometimes when running a command that starts up a daemon, the init script
doesn't return properly which causes Salt to wait indefinitely for a response.
In situations like this try the following:
.. code-block:: yaml
run_installer:
cmd.run:
- name: /tmp/installer.bin > /dev/null 2>&1
Salt determines whether the ``cmd`` state is successfully enforced based on the exit
code returned by the command. If the command returns a zero exit code, then salt
determines that the state was successfully enforced. If the script returns a non-zero
exit code, then salt determines that it failed to successfully enforce the state.
If a command returns a non-zero exit code but you wish to treat this as a success,
then you must place the command in a script and explicitly set the exit code of
the script to zero.
Please note that the success or failure of the state is not affected by whether a state
change occurred nor the stateful argument.
When executing a command or script, the state (i.e., changed or not)
of the command is unknown to Salt's state system. Therefore, by default, the
``cmd`` state assumes that any command execution results in a changed state.
This means that if a ``cmd`` state is watched by another state then the
state that's watching will always be executed due to the `changed` state in
the ``cmd`` state.
.. _stateful-argument:
Using the "Stateful" Argument
-----------------------------
Many state functions in this module now also accept a ``stateful`` argument.
If ``stateful`` is specified to be true then it is assumed that the command
or script will determine its own state and communicate it back by following
a simple protocol described below:
1. :strong:`If there's nothing in the stdout of the command, then assume no
changes.` Otherwise, the stdout must be either in JSON or its `last`
non-empty line must be a string of key=value pairs delimited by spaces (no
spaces on either side of ``=``).
2. :strong:`If it's JSON then it must be a JSON object (e.g., {}).` If it's
key=value pairs then quoting may be used to include spaces. (Python's shlex
module is used to parse the key=value string)
Two special keys or attributes are recognized in the output::
changed: bool (i.e., 'yes', 'no', 'true', 'false', case-insensitive)
comment: str (i.e., any string)
So, only if ``changed`` is ``True`` then assume the command execution has
changed the state, and any other key values or attributes in the output will
be set as part of the changes.
3. :strong:`If there's a comment then it will be used as the comment of the
state.`
Here's an example of how one might write a shell script for use with a
stateful command:
.. code-block:: bash
#!/bin/bash
#
echo "Working hard..."
# writing the state line
echo # an empty line here so the next line will be the last.
echo "changed=yes comment='something has changed' whatever=123"
And an example SLS file using this module:
.. code-block:: yaml
Run myscript:
cmd.run:
- name: /path/to/myscript
- cwd: /
- stateful: True
Run only if myscript changed something:
cmd.run:
- name: echo hello
- cwd: /
- onchanges:
- cmd: Run myscript
Note that if the second ``cmd.run`` state also specifies ``stateful: True`` it can
then be watched by some other states as well.
4. :strong:`The stateful argument can optionally include a test_name parameter.`
This is used to specify a command to run in test mode. This command should
return stateful data for changes that would be made by the command in the
name parameter.
.. versionadded:: 2015.2.0
.. code-block:: yaml
Run myscript:
cmd.run:
- name: /path/to/myscript
- cwd: /
- stateful:
- test_name: /path/to/myscript test
Run masterscript:
cmd.script:
- name: masterscript
- source: salt://path/to/masterscript
- cwd: /
- stateful:
- test_name: masterscript test
Should I use :mod:`cmd.run <salt.states.cmd.run>` or :mod:`cmd.wait <salt.states.cmd.wait>`?
--------------------------------------------------------------------------------------------
.. note::
Use :mod:`cmd.run <salt.states.cmd.run>` together with :ref:`onchanges <requisites-onchanges>`
instead of :mod:`cmd.wait <salt.states.cmd.wait>`.
These two states are often confused. The important thing to remember about them
is that :mod:`cmd.run <salt.states.cmd.run>` states are run each time the SLS
file that contains them is applied. If it is more desirable to have a command
that only runs after some other state changes, then :mod:`cmd.wait
<salt.states.cmd.wait>` does just that. :mod:`cmd.wait <salt.states.cmd.wait>`
is designed to :ref:`watch <requisites-watch>` other states, and is
executed when the state it is watching changes. Example:
.. code-block:: yaml
/usr/local/bin/postinstall.sh:
cmd.wait:
- watch:
- pkg: mycustompkg
file.managed:
- source: salt://utils/scripts/postinstall.sh
mycustompkg:
pkg.installed:
- require:
- file: /usr/local/bin/postinstall.sh
``cmd.wait`` itself do not do anything; all functionality is inside its ``mod_watch``
function, which is called by ``watch`` on changes.
The preferred format is using the :ref:`onchanges Requisite <requisites-onchanges>`, which
works on ``cmd.run`` as well as on any other state. The example would then look as follows:
.. code-block:: yaml
/usr/local/bin/postinstall.sh:
cmd.run:
- onchanges:
- pkg: mycustompkg
file.managed:
- source: salt://utils/scripts/postinstall.sh
mycustompkg:
pkg.installed:
- require:
- file: /usr/local/bin/postinstall.sh
How do I create an environment from a pillar map?
-------------------------------------------------
The map that comes from a pillar can be directly consumed by the env option!
To use it, one may pass it like this. Example:
.. code-block:: yaml
printenv:
cmd.run:
- env: {{ salt['pillar.get']('example:key', {}) }}
"""
import copy
import logging
import os
import salt.utils.args
import salt.utils.functools
import salt.utils.json
import salt.utils.platform
from salt.exceptions import CommandExecutionError, SaltRenderError
log = logging.getLogger(__name__)
def _reinterpreted_state(state):
"""
Re-interpret the state returned by salt.state.run using our protocol.
"""
ret = state["changes"]
state["changes"] = {}
state["comment"] = ""
out = ret.get("stdout")
if not out:
if ret.get("stderr"):
state["comment"] = ret["stderr"]
return state
is_json = False
try:
data = salt.utils.json.loads(out)
if not isinstance(data, dict):
return _failout(
state, "script JSON output must be a JSON object (e.g., {})!"
)
is_json = True
except ValueError:
idx = out.rstrip().rfind("\n")
if idx != -1:
out = out[idx + 1 :]
data = {}
try:
for item in salt.utils.args.shlex_split(out):
key, val = item.split("=")
data[key] = val
except ValueError:
state = _failout(
state,
"Failed parsing script output! "
"Stdout must be JSON or a line of name=value pairs.",
)
state["changes"].update(ret)
return state
changed = _is_true(data.get("changed", "no"))
if "comment" in data:
state["comment"] = data["comment"]
del data["comment"]
if changed:
for key in ret:
data.setdefault(key, ret[key])
# if stdout is the state output in JSON, don't show it.
# otherwise it contains the one line name=value pairs, strip it.
data["stdout"] = "" if is_json else data.get("stdout", "")[:idx]
state["changes"] = data
# FIXME: if it's not changed but there's stdout and/or stderr then those
# won't be shown as the function output. (though, they will be shown
# inside INFO logs).
return state
def _failout(state, msg):
state["comment"] = msg
state["result"] = False
return state
def _is_true(val):
if val and str(val).lower() in ("true", "yes", "1"):
return True
elif str(val).lower() in ("false", "no", "0"):
return False
raise ValueError("Failed parsing boolean value: {}".format(val))
def wait(
name,
cwd=None,
root=None,
runas=None,
shell=None,
env=(),
stateful=False,
umask=None,
output_loglevel="debug",
hide_output=False,
use_vt=False,
success_retcodes=None,
success_stdout=None,
success_stderr=None,
**kwargs
):
"""
Run the given command only if the watch statement calls it.
.. note::
Use :mod:`cmd.run <salt.states.cmd.run>` together with :mod:`onchanges </ref/states/requisites#onchanges>`
instead of :mod:`cmd.wait <salt.states.cmd.wait>`.
name
The command to execute, remember that the command will execute with the
path and permissions of the salt-minion.
cwd
The current working directory to execute the command in, defaults to
/root
root
Path to the root of the jail to use. If this parameter is set, the command
will run inside a chroot
runas
The user name to run the command as
shell
The shell to use for execution, defaults to /bin/sh
env
A list of environment variables to be set prior to execution.
Example:
.. code-block:: yaml
script-foo:
cmd.wait:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :ref:`here <yaml-idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
script-bar:
cmd.wait:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: jinja
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
.. note::
When using environment variables on Window's, case-sensitivity
matters, i.e. Window's uses `Path` as opposed to `PATH` for other
systems.
umask
The umask (in octal) to use when running the command.
stateful
The command being executed is expected to return data about executing
a state. For more information, see the :ref:`stateful-argument` section.
creates
Only run if the file specified by ``creates`` do not exist. If you
specify a list of files then this state will only run if **any** of
the files do not exist.
.. versionadded:: 2014.7.0
output_loglevel : debug
Control the loglevel at which the output from the command is logged to
the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
hide_output : False
Suppress stdout and stderr in the state's results.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: 2018.3.0
use_vt
Use VT utils (saltstack) to stream the command output more
interactively to the console and the logs.
This is experimental.
success_retcodes: This parameter will allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 2019.2.0
success_stdout: This parameter will allow a list of
strings that when found in standard out should be considered a success.
If stdout returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
success_stderr: This parameter will allow a list of
strings that when found in standard error should be considered a success.
If stderr returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
"""
# Ignoring our arguments is intentional.
return {"name": name, "changes": {}, "result": True, "comment": ""}
# Alias "cmd.watch" to "cmd.wait", as this is a common misconfiguration
watch = salt.utils.functools.alias_function(wait, "watch")
def wait_script(
name,
source=None,
template=None,
cwd=None,
runas=None,
shell=None,
env=None,
stateful=False,
umask=None,
use_vt=False,
output_loglevel="debug",
hide_output=False,
success_retcodes=None,
success_stdout=None,
success_stderr=None,
**kwargs
):
"""
Download a script from a remote source and execute it only if a watch
statement calls it.
source
The source script being downloaded to the minion, this source script is
hosted on the salt master server. If the file is located on the master
in the directory named spam, and is called eggs, the source string is
salt://spam/eggs
template
If this setting is applied then the named templating engine will be
used to render the downloaded file, currently jinja, mako, and wempy
are supported
name
The command to execute, remember that the command will execute with the
path and permissions of the salt-minion.
cwd
The current working directory to execute the command in, defaults to
/root
runas
The user name to run the command as
shell
The shell to use for execution, defaults to the shell grain
env
A list of environment variables to be set prior to execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.wait_script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :ref:`here <yaml-idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.wait_script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: jinja
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
.. note::
When using environment variables on Window's, case-sensitivity
matters, i.e. Window's uses `Path` as opposed to `PATH` for other
systems.
umask
The umask (in octal) to use when running the command.
stateful
The command being executed is expected to return data about executing
a state. For more information, see the :ref:`stateful-argument` section.
use_vt
Use VT utils (saltstack) to stream the command output more
interactively to the console and the logs.
This is experimental.
output_loglevel : debug
Control the loglevel at which the output from the command is logged to
the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
hide_output : False
Suppress stdout and stderr in the state's results.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: 2018.3.0
success_retcodes: This parameter will allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 2019.2.0
success_stdout: This parameter will allow a list of
strings that when found in standard out should be considered a success.
If stdout returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
success_stderr: This parameter will allow a list of
strings that when found in standard error should be considered a success.
If stderr returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
"""
# Ignoring our arguments is intentional.
return {"name": name, "changes": {}, "result": True, "comment": ""}
def run(
name,
cwd=None,
root=None,
runas=None,
shell=None,
env=None,
prepend_path=None,
stateful=False,
umask=None,
output_loglevel="debug",
hide_output=False,
timeout=None,
ignore_timeout=False,
use_vt=False,
success_retcodes=None,
success_stdout=None,
success_stderr=None,
**kwargs
):
"""
Run a command if certain circumstances are met. Use ``cmd.wait`` if you
want to use the ``watch`` requisite.
name
The command to execute, remember that the command will execute with the
path and permissions of the salt-minion.
cwd
The current working directory to execute the command in, defaults to
/root
root
Path to the root of the jail to use. If this parameter is set, the command
will run inside a chroot
runas
The user name to run the command as
shell
The shell to use for execution, defaults to the shell grain
env
A list of environment variables to be set prior to execution.
Example:
.. code-block:: yaml
script-foo:
cmd.run:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :ref:`here <yaml-idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
script-bar:
cmd.run:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: jinja
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
.. note::
When using environment variables on Window's, case-sensitivity
matters, i.e. Window's uses `Path` as opposed to `PATH` for other
systems.
prepend_path
$PATH segment to prepend (trailing ':' not necessary) to $PATH. This is
an easier alternative to the Jinja workaround.
.. versionadded:: 2018.3.0
stateful
The command being executed is expected to return data about executing
a state. For more information, see the :ref:`stateful-argument` section.
umask
The umask (in octal) to use when running the command.
output_loglevel : debug
Control the loglevel at which the output from the command is logged to
the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
hide_output : False
Suppress stdout and stderr in the state's results.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: 2018.3.0
timeout
If the command has not terminated after timeout seconds, send the
subprocess sigterm, and if sigterm is ignored, follow up with sigkill
ignore_timeout
Ignore the timeout of commands, which is useful for running nohup
processes.
.. versionadded:: 2015.8.0
creates
Only run if the file specified by ``creates`` do not exist. If you
specify a list of files then this state will only run if **any** of
the files do not exist.
.. versionadded:: 2014.7.0
use_vt : False
Use VT utils (saltstack) to stream the command output more
interactively to the console and the logs.
This is experimental.
bg : False
If ``True``, run command in background and do not await or deliver its
results.
.. versionadded:: 2016.3.6
success_retcodes: This parameter will allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 2019.2.0
success_stdout: This parameter will allow a list of
strings that when found in standard out should be considered a success.
If stdout returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
success_stderr: This parameter will allow a list of
strings that when found in standard error should be considered a success.
If stderr returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
.. note::
cmd.run supports the usage of ``reload_modules``. This functionality
allows you to force Salt to reload all modules. You should only use
``reload_modules`` if your cmd.run does some sort of installation
(such as ``pip``), if you do not reload the modules future items in
your state which rely on the software being installed will fail.
.. code-block:: yaml
getpip:
cmd.run:
- name: /usr/bin/python /usr/local/sbin/get-pip.py
- unless: which pip
- require:
- pkg: python
- file: /usr/local/sbin/get-pip.py
- reload_modules: True
"""
### NOTE: The keyword arguments in **kwargs are passed directly to the
### ``cmd.run_all`` function and cannot be removed from the function
### definition, otherwise the use of unsupported arguments in a
### ``cmd.run`` state will result in a traceback.
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
test_name = None
if not isinstance(stateful, list):
stateful = stateful is True
elif isinstance(stateful, list) and "test_name" in stateful[0]:
test_name = stateful[0]["test_name"]
if __opts__["test"] and test_name:
name = test_name
# Need the check for None here, if env is not provided then it falls back
# to None and it is assumed that the environment is not being overridden.
if env is not None and not isinstance(env, (list, dict)):
ret["comment"] = "Invalidly-formatted 'env' parameter. See documentation."
return ret
cmd_kwargs = copy.deepcopy(kwargs)
cmd_kwargs.update(
{
"cwd": cwd,
"root": root,
"runas": runas,
"use_vt": use_vt,
"shell": shell or __grains__["shell"],
"env": env,
"prepend_path": prepend_path,
"umask": umask,
"output_loglevel": output_loglevel,
"hide_output": hide_output,
"success_retcodes": success_retcodes,
"success_stdout": success_stdout,
"success_stderr": success_stderr,
}
)
if __opts__["test"] and not test_name:
ret["result"] = None
ret["comment"] = 'Command "{}" would have been executed'.format(name)
return _reinterpreted_state(ret) if stateful else ret
if cwd and not os.path.isdir(cwd):
ret["comment"] = 'Desired working directory "{}" is not available'.format(cwd)
return ret
# Wow, we passed the test, run this sucker!
try:
run_cmd = "cmd.run_all" if not root else "cmd.run_chroot"
cmd_all = __salt__[run_cmd](
cmd=name, timeout=timeout, python_shell=True, **cmd_kwargs
)
except Exception as err: # pylint: disable=broad-except
ret["comment"] = str(err)
return ret
ret["changes"] = cmd_all
ret["result"] = not bool(cmd_all["retcode"])
ret["comment"] = 'Command "{}" run'.format(name)
# Ignore timeout errors if asked (for nohups) and treat cmd as a success
if ignore_timeout:
trigger = "Timed out after"
if ret["changes"].get("retcode") == 1 and trigger in ret["changes"].get(
"stdout"
):
ret["changes"]["retcode"] = 0
ret["result"] = True
if stateful:
ret = _reinterpreted_state(ret)
if __opts__["test"] and cmd_all["retcode"] == 0 and ret["changes"]:
ret["result"] = None
return ret
def script(
name,
source=None,
template=None,
cwd=None,
runas=None,
password=None,
shell=None,
env=None,
stateful=False,
umask=None,
timeout=None,
use_vt=False,
output_loglevel="debug",
hide_output=False,
defaults=None,
context=None,
success_retcodes=None,
success_stdout=None,
success_stderr=None,
**kwargs
):
"""
Download a script and execute it with specified arguments.
source
The location of the script to download. If the file is located on the
master in the directory named spam, and is called eggs, the source
string is salt://spam/eggs
template
If this setting is applied then the named templating engine will be
used to render the downloaded file. Currently jinja, mako, and wempy
are supported
name
Either "cmd arg1 arg2 arg3..." (cmd is not used) or a source
"salt://...".
cwd
The current working directory to execute the command in, defaults to
/root
runas
Specify an alternate user to run the command. The default
behavior is to run as the user under which Salt is running. If running
on a Windows minion you must also use the ``password`` argument, and
the target user account must be in the Administrators group.
.. note::
For Window's users, specifically Server users, it may be necessary
to specify your runas user using the User Logon Name instead of the
legacy logon name. Traditionally, logons would be in the following
format.
``Domain/user``
In the event this causes issues when executing scripts, use the UPN
format which looks like the following.
``user@domain.local``
More information <https://github.com/saltstack/salt/issues/55080>
password
.. versionadded:: 3000
Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
shell
The shell to use for execution. The default is set in grains['shell']
env
A list of environment variables to be set prior to execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :ref:`here <yaml-idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: jinja
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
.. note::
When using environment variables on Window's, case-sensitivity
matters, i.e. Window's uses `Path` as opposed to `PATH` for other
systems.
saltenv : ``base``
The Salt environment to use
umask
The umask (in octal) to use when running the command.
stateful
The command being executed is expected to return data about executing
a state. For more information, see the :ref:`stateful-argument` section.
timeout
If the command has not terminated after timeout seconds, send the
subprocess sigterm, and if sigterm is ignored, follow up with sigkill
args
String of command line args to pass to the script. Only used if no
args are specified as part of the `name` argument. To pass a string
containing spaces in YAML, you will need to doubly-quote it: "arg1
'arg two' arg3"
creates
Only run if the file specified by ``creates`` do not exist. If you
specify a list of files then this state will only run if **any** of
the files do not exist.
.. versionadded:: 2014.7.0
use_vt
Use VT utils (saltstack) to stream the command output more
interactively to the console and the logs.
This is experimental.
context
.. versionadded:: 2016.3.0
Overrides default context variables passed to the template.
defaults
.. versionadded:: 2016.3.0
Default context passed to the template.
output_loglevel : debug
Control the loglevel at which the output from the command is logged to
the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
hide_output : False
Suppress stdout and stderr in the state's results.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: 2018.3.0
success_retcodes: This parameter will allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 2019.2.0
success_stdout: This parameter will allow a list of
strings that when found in standard out should be considered a success.
If stdout returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
success_stderr: This parameter will allow a list of
strings that when found in standard error should be considered a success.
If stderr returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 3004
"""
test_name = None
if not isinstance(stateful, list):
stateful = stateful is True
elif isinstance(stateful, list) and "test_name" in stateful[0]:
test_name = stateful[0]["test_name"]
if __opts__["test"] and test_name:
name = test_name
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
# Need the check for None here, if env is not provided then it falls back
# to None and it is assumed that the environment is not being overridden.
if env is not None and not isinstance(env, (list, dict)):
ret["comment"] = "Invalidly-formatted 'env' parameter. See documentation."
return ret
if context and not isinstance(context, dict):
ret[
"comment"
] = "Invalidly-formatted 'context' parameter. Must be formed as a dict."
return ret
if defaults and not isinstance(defaults, dict):
ret[
"comment"
] = "Invalidly-formatted 'defaults' parameter. Must be formed as a dict."
return ret
if runas and salt.utils.platform.is_windows() and not password:
ret["comment"] = "Must supply a password if runas argument is used on Windows."
return ret
tmpctx = defaults if defaults else {}
if context:
tmpctx.update(context)
cmd_kwargs = copy.deepcopy(kwargs)
cmd_kwargs.update(
{
"runas": runas,
"password": password,
"shell": shell or __grains__["shell"],
"env": env,
"cwd": cwd,
"template": template,
"umask": umask,
"timeout": timeout,
"output_loglevel": output_loglevel,
"hide_output": hide_output,
"use_vt": use_vt,
"context": tmpctx,
"saltenv": __env__,
"success_retcodes": success_retcodes,
"success_stdout": success_stdout,
"success_stderr": success_stderr,
}
)
run_check_cmd_kwargs = {
"cwd": cwd,
"runas": runas,
"shell": shell or __grains__["shell"],
}
# Change the source to be the name arg if it is not specified
if source is None:
source = name
# If script args present split from name and define args
if not cmd_kwargs.get("args", None) and len(name.split()) > 1:
cmd_kwargs.update({"args": name.split(" ", 1)[1]})
if __opts__["test"] and not test_name:
ret["result"] = None
ret["comment"] = "Command '{}' would have been executed".format(name)
return _reinterpreted_state(ret) if stateful else ret
if cwd and not os.path.isdir(cwd):
ret["comment"] = 'Desired working directory "{}" is not available'.format(cwd)
return ret
# Wow, we passed the test, run this sucker!
try:
cmd_all = __salt__["cmd.script"](source, python_shell=True, **cmd_kwargs)
except (CommandExecutionError, SaltRenderError, OSError) as err:
ret["comment"] = str(err)
return ret
ret["changes"] = cmd_all
if kwargs.get("retcode", False):
ret["result"] = not bool(cmd_all)
else:
ret["result"] = not bool(cmd_all["retcode"])
if ret.get("changes", {}).get("cache_error"):
ret["comment"] = "Unable to cache script {} from saltenv '{}'".format(
source, __env__
)
else:
ret["comment"] = "Command '{}' run".format(name)
if stateful:
ret = _reinterpreted_state(ret)
if __opts__["test"] and cmd_all["retcode"] == 0 and ret["changes"]:
ret["result"] = None
return ret
def call(
name,
func,
args=(),
kws=None,
output_loglevel="debug",
hide_output=False,
use_vt=False,
**kwargs
):
"""
Invoke a pre-defined Python function with arguments specified in the state
declaration. This function is mainly used by the
:mod:`salt.renderers.pydsl` renderer.
In addition, the ``stateful`` argument has no effects here.
The return value of the invoked function will be interpreted as follows.
If it's a dictionary then it will be passed through to the state system,
which expects it to have the usual structure returned by any salt state
function.
Otherwise, the return value (denoted as ``result`` in the code below) is
expected to be a JSON serializable object, and this dictionary is returned:
.. code-block:: python
{
'name': name
'changes': {'retval': result},
'result': True if result is None else bool(result),
'comment': result if isinstance(result, str) else ''
}
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
cmd_kwargs = {
"cwd": kwargs.get("cwd"),
"runas": kwargs.get("user"),
"shell": kwargs.get("shell") or __grains__["shell"],
"env": kwargs.get("env"),
"use_vt": use_vt,
"output_loglevel": output_loglevel,
"hide_output": hide_output,
"umask": kwargs.get("umask"),
}
if not kws:
kws = {}
result = func(*args, **kws)
if isinstance(result, dict):
ret.update(result)
return ret
else:
# result must be JSON serializable else we get an error
ret["changes"] = {"retval": result}
ret["result"] = True if result is None else bool(result)
if isinstance(result, str):
ret["comment"] = result
return ret
def wait_call(
name,
func,
args=(),
kws=None,
stateful=False,
use_vt=False,
output_loglevel="debug",
hide_output=False,
**kwargs
):
# Ignoring our arguments is intentional.
return {"name": name, "changes": {}, "result": True, "comment": ""}
def mod_watch(name, **kwargs):
"""
Execute a cmd function based on a watch call
.. note::
This state exists to support special handling of the ``watch``
:ref:`requisite <requisites>`. It should not be called directly.
Parameters for this function should be set by the state being triggered.
"""
if kwargs["sfun"] in ("wait", "run", "watch"):
if kwargs.get("stateful"):
kwargs.pop("stateful")
return _reinterpreted_state(run(name, **kwargs))
return run(name, **kwargs)
elif kwargs["sfun"] == "wait_script" or kwargs["sfun"] == "script":
if kwargs.get("stateful"):
kwargs.pop("stateful")
return _reinterpreted_state(script(name, **kwargs))
return script(name, **kwargs)
elif kwargs["sfun"] == "wait_call" or kwargs["sfun"] == "call":
if kwargs.get("func"):
func = kwargs.pop("func")
return call(name, func, **kwargs)
else:
return {
"name": name,
"changes": {},
"comment": "cmd.{0[sfun]} needs a named parameter func".format(kwargs),
"result": False,
}
return {
"name": name,
"changes": {},
"comment": (
"cmd.{0[sfun]} does not work with the watch requisite, "
"please use cmd.wait or cmd.wait_script".format(kwargs)
),
"result": False,
}
| 31.863671
| 114
| 0.609915
|
e46e0f5e89f9fe4c458a73c5902fba2d58bfc60b
| 579
|
py
|
Python
|
src/tests/test_main.py
|
jugmac00/workout-bo
|
b334187c5b1c7821043b3f2dbfb5f5d74477ffc5
|
[
"BSD-3-Clause"
] | 1
|
2020-12-23T18:26:52.000Z
|
2020-12-23T18:26:52.000Z
|
src/tests/test_main.py
|
jugmac00/workout-bo
|
b334187c5b1c7821043b3f2dbfb5f5d74477ffc5
|
[
"BSD-3-Clause"
] | 13
|
2020-05-01T19:56:49.000Z
|
2021-05-17T07:21:38.000Z
|
src/tests/test_main.py
|
jugmac00/workout-bo
|
b334187c5b1c7821043b3f2dbfb5f5d74477ffc5
|
[
"BSD-3-Clause"
] | 1
|
2020-05-01T19:03:28.000Z
|
2020-05-01T19:03:28.000Z
|
from workout_bot.main import extract_dailydare_url
from workout_bot.main import extract_wod_url
# Please note, that the following tests are just smoke tests in order
# to start testing.
# Yes, they rely on a website which is not under my control.
# Yes, I know, this is not good.
# This won't be permanent.
def test_extract_dailydare_url():
URL = extract_dailydare_url()
assert URL.startswith("https://www.darebee.com/images/promo/dares/")
def test_extract_wod_url():
URL = extract_wod_url()
assert URL.startswith("https://www.darebee.com/images/workouts/")
| 30.473684
| 72
| 0.756477
|
da546e465c19ba2f4f61b28817638cd8915dd5b7
| 2,347
|
py
|
Python
|
src/extensions/booksearch/handlers/shamela_handler.py
|
SafEight/durkabot
|
358c66927e95005c03da3d47080f711666ff95c7
|
[
"MIT"
] | 3
|
2018-07-09T21:01:17.000Z
|
2021-10-17T18:53:47.000Z
|
src/extensions/booksearch/handlers/shamela_handler.py
|
SafEight/durkabot
|
358c66927e95005c03da3d47080f711666ff95c7
|
[
"MIT"
] | 19
|
2018-06-20T05:53:46.000Z
|
2019-04-26T22:36:25.000Z
|
src/extensions/booksearch/handlers/shamela_handler.py
|
SafEight/durkabot
|
358c66927e95005c03da3d47080f711666ff95c7
|
[
"MIT"
] | 2
|
2019-02-24T20:16:43.000Z
|
2020-09-28T02:09:14.000Z
|
from urllib.parse import quote_plus, urljoin
import aiohttp
from bs4 import BeautifulSoup, Tag
from extensions.booksearch.bookdata import BookData
class ShamelaHandler:
domain = 'http://shamela.ws/'
search_url = domain + 'index.php/search/word/{0}'
download_url = 'http://d.shamela.ws/epubs/{0}/{1}.epub'
def __init__(self, loop):
self.session = aiohttp.ClientSession(loop=loop)
async def _fetch(self, url):
async with self.session.get(url) as r:
return BeautifulSoup(await r.text(), 'html.parser')
async def search(self, query):
url = self.search_url.format(quote_plus(query))
soup = await self._fetch(url)
# shamela is nice :]
book_elements = soup.find_all('td', attrs={'class': "regular-book"})
bookdata_instances = [
self.process_result(result)
for result in book_elements
]
return bookdata_instances, url
def process_result(self, result: Tag) -> BookData:
title_element = result.find_next('a')
book_title = title_element.string
relative_link_on_site = title_element['href']
link_on_site = urljoin(self.domain, relative_link_on_site)
# extract book id to formulate direct download link
slash_index = relative_link_on_site.rfind('/')
book_id = relative_link_on_site[slash_index + 1:]
# we also need a "fragment" of sorts
# we drop 2 digits from the end of the id
# what we have left, which may be 3 or less digits,
# we left-pad it with 0 until it is 3 digits.
book_id_fragment = book_id[:-2].rjust(3, '0')
link = self.download_url.format(book_id_fragment, book_id)
author_span = result.find_next('span', attrs={'style': "float:left;"})
author_name = author_span.find_next('a').string
return BookData(book_title, author_name, link, link_on_site)
@staticmethod
def format_result(bookdata):
title = bookdata.title
subtext = (
f"لـ {bookdata.author_name}\n"
+ (f"[---]({bookdata.link}) :الرابط المباشر\n" if bookdata.link is not None else '')
+ (f"[---]({bookdata.site_link}) :الرابط على المكتبة الشاملة\n" if bookdata.site_link is not None else '')
)
return title, subtext
| 33.056338
| 122
| 0.637409
|
b4bbb621ce131225ac7754e79057b99d2d84808a
| 15,974
|
py
|
Python
|
chromium/build/android/pylib/instrumentation/test_runner.py
|
wedataintelligence/vivaldi-source
|
22a46f2c969f6a0b7ca239a05575d1ea2738768c
|
[
"BSD-3-Clause"
] | 27
|
2016-04-27T01:02:03.000Z
|
2021-12-13T08:53:19.000Z
|
chromium/build/android/pylib/instrumentation/test_runner.py
|
wedataintelligence/vivaldi-source
|
22a46f2c969f6a0b7ca239a05575d1ea2738768c
|
[
"BSD-3-Clause"
] | 2
|
2017-03-09T09:00:50.000Z
|
2017-09-21T15:48:20.000Z
|
chromium/build/android/pylib/instrumentation/test_runner.py
|
wedataintelligence/vivaldi-source
|
22a46f2c969f6a0b7ca239a05575d1ea2738768c
|
[
"BSD-3-Clause"
] | 17
|
2016-04-27T02:06:39.000Z
|
2019-12-18T08:07:00.000Z
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Class for running instrumentation tests on a single device."""
import collections
import logging
import os
import re
import time
from devil.android import device_errors
from pylib import constants
from pylib import flag_changer
from pylib import valgrind_tools
from pylib.base import base_test_result
from pylib.base import base_test_runner
from pylib.constants import host_paths
from pylib.instrumentation import instrumentation_test_instance
from pylib.instrumentation import json_perf_parser
from pylib.instrumentation import test_result
from pylib.local.device import local_device_instrumentation_test_run
with host_paths.SysPath(host_paths.BUILD_COMMON_PATH):
import perf_tests_results_helper # pylint: disable=import-error
_PERF_TEST_ANNOTATION = 'PerfTest'
class TestRunner(base_test_runner.BaseTestRunner):
"""Responsible for running a series of tests connected to a single device."""
_DEVICE_COVERAGE_DIR = 'chrome/test/coverage'
_HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile'
_DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (constants.DEVICE_PERF_OUTPUT_DIR +
'/chrome-profile*')
def __init__(self, test_options, device, shard_index, test_pkg,
additional_flags=None):
"""Create a new TestRunner.
Args:
test_options: An InstrumentationOptions object.
device: Attached android device.
shard_index: Shard index.
test_pkg: A TestPackage object.
additional_flags: A list of additional flags to add to the command line.
"""
super(TestRunner, self).__init__(device, test_options.tool)
self._lighttp_port = constants.LIGHTTPD_RANDOM_PORT_FIRST + shard_index
self._logcat_monitor = None
self.coverage_device_file = None
self.coverage_dir = test_options.coverage_dir
self.coverage_host_file = None
self.options = test_options
package_info_candidates = [a for a in constants.PACKAGE_INFO.itervalues()
if a.test_package == test_pkg.GetPackageName()]
assert len(package_info_candidates) < 2, (
'Multiple packages have the same test package')
self.package_info = (package_info_candidates[0] if package_info_candidates
else None)
self.test_pkg = test_pkg
# Use the correct command line file for the package under test.
if self.package_info and self.package_info.cmdline_file:
self.flags = flag_changer.FlagChanger(
self.device, self.package_info.cmdline_file)
if additional_flags:
self.flags.AddFlags(additional_flags)
else:
self.flags = None
#override
def InstallTestPackage(self):
self.test_pkg.Install(self.device)
def _GetInstrumentationArgs(self):
ret = {}
if self.options.wait_for_debugger:
ret['debug'] = 'true'
if self.coverage_dir:
ret['coverage'] = 'true'
ret['coverageFile'] = self.coverage_device_file
return ret
def _TakeScreenshot(self, test):
"""Takes a screenshot from the device."""
screenshot_name = os.path.join(constants.SCREENSHOTS_DIR, '%s.png' % test)
logging.info('Taking screenshot named %s', screenshot_name)
self.device.TakeScreenshot(screenshot_name)
def SetUp(self):
"""Sets up the test harness and device before all tests are run."""
super(TestRunner, self).SetUp()
if not self.device.HasRoot():
logging.warning('Unable to enable java asserts for %s; run `adb root`.',
str(self.device))
else:
if self.device.SetJavaAsserts(self.options.set_asserts):
self.device.RunShellCommand('stop')
self.device.RunShellCommand('start')
self.device.WaitUntilFullyBooted()
# We give different default value to launch HTTP server based on shard index
# because it may have race condition when multiple processes are trying to
# launch lighttpd with same port at same time.
self.LaunchTestHttpServer(
os.path.join(host_paths.DIR_SOURCE_ROOT), self._lighttp_port)
if self.flags:
flags_to_add = ['--disable-fre', '--enable-test-intents']
if self.options.strict_mode and self.options.strict_mode != 'off':
flags_to_add.append('--strict-mode=' + self.options.strict_mode)
if self.options.device_flags:
with open(self.options.device_flags) as device_flags_file:
stripped_flags = (l.strip() for l in device_flags_file)
flags_to_add.extend([flag for flag in stripped_flags if flag])
self.flags.AddFlags(flags_to_add)
def TearDown(self):
"""Cleans up the test harness and saves outstanding data from test run."""
if self.flags:
self.flags.Restore()
super(TestRunner, self).TearDown()
def TestSetup(self, test, flag_modifiers):
"""Sets up the test harness for running a particular test.
Args:
test: The name of the test that will be run.
"""
self.SetupPerfMonitoringIfNeeded(test)
self._SetupIndividualTestTimeoutScale(test)
self.tool.SetupEnvironment()
if self.flags:
self.flags.PushFlags(add=flag_modifiers.add, remove=flag_modifiers.remove)
# Make sure the forwarder is still running.
self._RestartHttpServerForwarderIfNecessary()
if self.coverage_dir:
coverage_basename = '%s.ec' % test
self.coverage_device_file = '%s/%s/%s' % (
self.device.GetExternalStoragePath(),
TestRunner._DEVICE_COVERAGE_DIR, coverage_basename)
self.coverage_host_file = os.path.join(
self.coverage_dir, coverage_basename)
def _IsFreTest(self, test):
"""Determines whether a test is a first run experience test.
Args:
test: The name of the test to be checked.
Returns:
Whether the feature being tested is FirstRunExperience.
"""
annotations = self.test_pkg.GetTestAnnotations(test)
feature = annotations.get('Feature', None)
return feature and 'FirstRunExperience' in feature['value']
def _IsPerfTest(self, test):
"""Determines whether a test is a performance test.
Args:
test: The name of the test to be checked.
Returns:
Whether the test is annotated as a performance test.
"""
return _PERF_TEST_ANNOTATION in self.test_pkg.GetTestAnnotations(test)
def _GetTestCmdlineParameters(self, test):
"""Determines whether the test is parameterized to be run with different
command-line flags.
Args:
test: The name of the test to be checked.
Returns:
The list of parameters.
"""
annotations = self.test_pkg.GetTestAnnotations(test)
params = instrumentation_test_instance.ParseCommandLineFlagParameters(
annotations)
if not params:
params = [collections.namedtuple('Dummy', ['add', 'remove'])([], [])]
return params
def SetupPerfMonitoringIfNeeded(self, test):
"""Sets up performance monitoring if the specified test requires it.
Args:
test: The name of the test to be run.
"""
if not self._IsPerfTest(test):
return
self.device.RunShellCommand(
['rm', TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX])
self._logcat_monitor = self.device.GetLogcatMonitor()
self._logcat_monitor.Start()
def TestTeardown(self, test, results):
"""Cleans up the test harness after running a particular test.
Depending on the options of this TestRunner this might handle performance
tracking. This method will only be called if the test passed.
Args:
test: The name of the test that was just run.
results: results for this test.
"""
self.tool.CleanUpEnvironment()
if self.flags:
self.flags.Restore()
if not results:
return
if results.DidRunPass():
self.TearDownPerfMonitoring(test)
if self.coverage_dir:
self.device.PullFile(
self.coverage_device_file, self.coverage_host_file)
self.device.RunShellCommand(
'rm -f %s' % self.coverage_device_file)
elif self.package_info:
apk_under_test = self.test_pkg.GetApkUnderTest()
permissions = apk_under_test.GetPermissions() if apk_under_test else None
self.device.ClearApplicationState(
self.package_info.package, permissions=permissions)
def TearDownPerfMonitoring(self, test):
"""Cleans up performance monitoring if the specified test required it.
Args:
test: The name of the test that was just run.
Raises:
Exception: if there's anything wrong with the perf data.
"""
if not self._IsPerfTest(test):
return
raw_test_name = test.split('#')[1]
# Wait and grab annotation data so we can figure out which traces to parse
regex = self._logcat_monitor.WaitFor(
re.compile(r'\*\*PERFANNOTATION\(' + raw_test_name + r'\)\:(.*)'))
# If the test is set to run on a specific device type only (IE: only
# tablet or phone) and it is being run on the wrong device, the test
# just quits and does not do anything. The java test harness will still
# print the appropriate annotation for us, but will add --NORUN-- for
# us so we know to ignore the results.
# The --NORUN-- tag is managed by ChromeTabbedActivityTestBase.java
if regex.group(1) != '--NORUN--':
# Obtain the relevant perf data. The data is dumped to a
# JSON formatted file.
json_string = self.device.ReadFile(
'/data/data/com.google.android.apps.chrome/files/PerfTestData.txt',
as_root=True)
if not json_string:
raise Exception('Perf file is empty')
if self.options.save_perf_json:
json_local_file = '/tmp/chromium-android-perf-json-' + raw_test_name
with open(json_local_file, 'w') as f:
f.write(json_string)
logging.info('Saving Perf UI JSON from test ' +
test + ' to ' + json_local_file)
raw_perf_data = regex.group(1).split(';')
for raw_perf_set in raw_perf_data:
if raw_perf_set:
perf_set = raw_perf_set.split(',')
if len(perf_set) != 3:
raise Exception('Unexpected number of tokens in perf annotation '
'string: ' + raw_perf_set)
# Process the performance data
result = json_perf_parser.GetAverageRunInfoFromJSONString(json_string,
perf_set[0])
perf_tests_results_helper.PrintPerfResult(perf_set[1], perf_set[2],
[result['average']],
result['units'])
def _SetupIndividualTestTimeoutScale(self, test):
timeout_scale = self.options.timeout_scale or 1
timeout_scale *= self._GetIndividualTestTimeoutScale(test)
valgrind_tools.SetChromeTimeoutScale(self.device, timeout_scale)
def _GetIndividualTestTimeoutScale(self, test):
"""Returns the timeout scale for the given |test|."""
annotations = self.test_pkg.GetTestAnnotations(test)
timeout_scale = 1
if 'TimeoutScale' in annotations:
try:
timeout_scale = int(annotations['TimeoutScale']['value'])
except ValueError:
logging.warning('Non-integer value of TimeoutScale ignored. (%s)',
annotations['TimeoutScale']['value'])
if self.options.wait_for_debugger:
timeout_scale *= 100
return timeout_scale
# pylint: disable=too-many-return-statements
def _GetIndividualTestTimeoutSecs(self, test):
"""Returns the timeout in seconds for the given |test|."""
annotations = self.test_pkg.GetTestAnnotations(test)
if 'Manual' in annotations:
return 10 * 60 * 60
if 'IntegrationTest' in annotations:
return 30 * 60
if 'External' in annotations:
return 10 * 60
if 'EnormousTest' in annotations:
return 10 * 60
if 'LargeTest' in annotations or _PERF_TEST_ANNOTATION in annotations:
return 5 * 60
if 'MediumTest' in annotations:
return 3 * 60
if 'SmallTest' in annotations:
return 1 * 60
logging.warn("Test size not found in annotations for test '%s', using " +
"1 minute for timeout.", test)
return 1 * 60
def _RunTest(self, test, timeout):
"""Runs a single instrumentation test.
Args:
test: Test class/method.
timeout: Timeout time in seconds.
Returns:
The raw output of am instrument as a list of lines.
"""
extras = self._GetInstrumentationArgs()
extras['class'] = test
return self.device.StartInstrumentation(
'%s/%s' % (self.test_pkg.GetPackageName(), self.options.test_runner),
raw=True, extras=extras, timeout=timeout, retries=0)
# pylint: disable=no-self-use
def _GenerateTestResult(self, test, instr_result_code, instr_result_bundle,
statuses, start_ms, duration_ms):
results = instrumentation_test_instance.GenerateTestResults(
instr_result_code, instr_result_bundle, statuses, start_ms, duration_ms)
for r in results:
if r.GetName() == test:
return r
logging.error('Could not find result for test: %s', test)
return test_result.InstrumentationTestResult(
test, base_test_result.ResultType.UNKNOWN, start_ms, duration_ms)
#override
def RunTest(self, test):
results = base_test_result.TestRunResults()
timeout = (self._GetIndividualTestTimeoutSecs(test) *
self._GetIndividualTestTimeoutScale(test) *
self.tool.GetTimeoutScale())
cmdline_parameters = self._GetTestCmdlineParameters(test)
for flag_modifiers in cmdline_parameters:
start_ms = 0
duration_ms = 0
try:
if self._IsFreTest(test):
flag_modifiers.remove.append('--disable-fre')
self.TestSetup(test, flag_modifiers)
try:
self.device.GoHome()
except device_errors.CommandTimeoutError:
logging.exception('Failed to focus the launcher.')
time_ms = lambda: int(time.time() * 1000)
start_ms = time_ms()
raw_output = self._RunTest(test, timeout)
duration_ms = time_ms() - start_ms
# Parse the test output
result_code, result_bundle, statuses = (
instrumentation_test_instance.ParseAmInstrumentRawOutput(
raw_output))
result = self._GenerateTestResult(
test, result_code, result_bundle, statuses, start_ms, duration_ms)
if local_device_instrumentation_test_run.DidPackageCrashOnDevice(
self.test_pkg.GetPackageName(), self.device):
result.SetType(base_test_result.ResultType.CRASH)
except device_errors.CommandTimeoutError as e:
result = test_result.InstrumentationTestResult(
test, base_test_result.ResultType.TIMEOUT, start_ms, duration_ms,
log=str(e) or 'No information')
if self.package_info:
self.device.ForceStop(self.package_info.package)
self.device.ForceStop(self.package_info.test_package)
except device_errors.DeviceUnreachableError as e:
result = test_result.InstrumentationTestResult(
test, base_test_result.ResultType.CRASH, start_ms, duration_ms,
log=str(e) or 'No information')
if len(cmdline_parameters) > 1:
# Specify commandline flag modifications used in the test run
result_name = result.GetName()
if flag_modifiers.add:
result_name = '%s with {%s}' % (
result_name, ' '.join(flag_modifiers.add))
if flag_modifiers.remove:
result_name = '%s without {%s}' % (
result_name, ' '.join(flag_modifiers.remove))
result.SetName(result_name)
results.AddResult(result)
self.TestTeardown(test, results)
return (results, None if results.DidRunPass() else test)
| 37.585882
| 80
| 0.681295
|
5d735f7c80fe4a6b032a569d07d650d72a489220
| 453
|
py
|
Python
|
fiepipelib/storage/routines/ui/adjectives.py
|
leith-bartrich/fiepipe
|
2f48054a349059ec5919ff9402a02c03b27b5915
|
[
"MIT"
] | null | null | null |
fiepipelib/storage/routines/ui/adjectives.py
|
leith-bartrich/fiepipe
|
2f48054a349059ec5919ff9402a02c03b27b5915
|
[
"MIT"
] | null | null | null |
fiepipelib/storage/routines/ui/adjectives.py
|
leith-bartrich/fiepipe
|
2f48054a349059ec5919ff9402a02c03b27b5915
|
[
"MIT"
] | null | null | null |
import typing
from fiepipelib.storage.routines.localstorage import CommonAdjectivesDict
def get_common_adjectives_choices() -> typing.Dict[str:str]:
adjectives = CommonAdjectivesDict()
choices = typing.Dict[str:str]
for catName in adjectives.keys():
cat = adjectives[catName]
for adj in cat.keys():
desc = cat[adj]
choice = catName + ":" + adj
choices[choice] = adj
return choices
| 26.647059
| 73
| 0.653422
|
5f877d1e0b95cba8590f418561c367eb9786f75e
| 64,576
|
py
|
Python
|
app/window.py
|
0xPrateek/ci_edit
|
6abc609a8f1b5f85f0ca1af11cebc77ece80fc99
|
[
"Apache-2.0"
] | 5
|
2019-10-09T08:01:32.000Z
|
2020-01-01T18:17:21.000Z
|
app/window.py
|
0xPrateek/ci_edit
|
6abc609a8f1b5f85f0ca1af11cebc77ece80fc99
|
[
"Apache-2.0"
] | 1
|
2019-11-01T02:16:28.000Z
|
2019-11-01T02:16:28.000Z
|
app/window.py
|
0xPrateek/ci_edit
|
6abc609a8f1b5f85f0ca1af11cebc77ece80fc99
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
try:
unicode
except NameError:
unicode = str
unichr = chr
import bisect
import os
import sys
import types
import curses
import app.config
import app.controller
import app.cu_editor
import app.em_editor
import app.string
import app.text_buffer
import app.vi_editor
# The terminal area that the curses can draw to.
mainCursesWindow = None
class ViewWindow:
"""A view window is a base window that does not get focus or have
TextBuffer.
See class ActiveWindow for a window that can get focus. See class Window for
a window that can get focus and have a TextBuffer.
"""
def __init__(self, program, parent):
"""
Args:
parent is responsible for the order in which this window is updated,
relative to its siblings.
"""
if app.config.strict_debug:
assert issubclass(self.__class__, ViewWindow), self
assert issubclass(program.__class__, app.ci_program.CiProgram), self
if parent is not None:
assert issubclass(parent.__class__, ViewWindow), parent
self.program = program
self.parent = parent
self.isFocusable = False
self.top = 0
self.left = 0
self.rows = 1
self.cols = 1
self.scrollRow = 0
self.scrollCol = 0
self.showCursor = True
self.writeLineRow = 0
self.zOrder = []
def addStr(self, row, col, text, colorPair):
"""Overwrite text at row, column with text.
The caller is responsible for avoiding overdraw.
"""
if app.config.strict_debug:
app.log.check_le(row, self.rows)
app.log.check_le(col, self.cols)
self.program.backgroundFrame.addStr(self.top + row, self.left + col,
text.encode('utf-8'), colorPair)
def reattach(self):
self.setParent(self.parent)
def blank(self, colorPair):
"""Clear the window."""
for i in range(self.rows):
self.addStr(i, 0, ' ' * self.cols, colorPair)
def bringChildToFront(self, child):
"""Bring it to the top layer."""
try:
self.zOrder.remove(child)
except ValueError:
pass
self.zOrder.append(child)
def bringToFront(self):
"""Bring it to the top layer."""
self.parent.bringChildToFront(self)
def changeFocusTo(self, changeTo):
if app.config.strict_debug:
assert issubclass(self.__class__, ViewWindow), self
assert issubclass(changeTo.__class__, ViewWindow), changeTo
topWindow = self
while topWindow.parent:
topWindow = topWindow.parent
topWindow.changeFocusTo(changeTo)
def colorPref(self, colorType, delta=0):
return self.program.color.get(colorType, delta)
def contains(self, row, col):
"""Determine whether the position at row, col lay within this window."""
for i in self.zOrder:
if i.contains(row, col):
return i
return (self.top <= row < self.top + self.rows and
self.left <= col < self.left + self.cols and self)
def debugDraw(self):
programWindow = self
while programWindow.parent is not None:
programWindow = programWindow.parent
programWindow.debugDraw(self)
def deselect(self):
pass
def detach(self):
"""Hide the window by removing self from parents' children, but keep
same parent to be reattached later."""
try:
self.parent.zOrder.remove(self)
except ValueError:
pass
def layoutHorizontally(self, children, separation=0):
left = self.left
cols = self.cols
for view in children:
preferredCols = view.preferredSize(self.rows, max(0, cols))[1]
view.reshape(self.top, left, self.rows,
max(0, min(cols, preferredCols)))
delta = view.cols + separation
left += delta
cols -= delta
def layoutVertically(self, children, separation=0):
top = self.top
rows = self.rows
for view in children:
preferredRows = view.preferredSize(max(0, rows), self.cols)[0]
view.reshape(top, self.left, max(0, min(rows, preferredRows)),
self.cols)
delta = view.rows + separation
top += delta
rows -= delta
def mouseClick(self, paneRow, paneCol, shift, ctrl, alt):
pass
def mouseDoubleClick(self, paneRow, paneCol, shift, ctrl, alt):
pass
def mouseMoved(self, paneRow, paneCol, shift, ctrl, alt):
pass
def mouseRelease(self, paneRow, paneCol, shift, ctrl, alt):
pass
def mouseTripleClick(self, paneRow, paneCol, shift, ctrl, alt):
pass
def mouseWheelDown(self, shift, ctrl, alt):
pass
def mouseWheelUp(self, shift, ctrl, alt):
pass
def moveTo(self, top, left):
self.top = top
self.left = left
def moveBy(self, top, left):
self.top += top
self.left += left
def _childFocusableWindow(self, reverse=False):
windows = self.zOrder[:]
if reverse:
windows.reverse()
for i in windows:
if i.isFocusable:
return i
else:
r = i._childFocusableWindow(reverse)
if r is not None:
return r
def nextFocusableWindow(self, start, reverse=False):
"""Windows without |isFocusable| are skipped. Ignore (skip) |start| when
searching.
Args:
start (window): the child window to start from. If |start| is not
found, start from the first child window.
reverse (bool): if True, find the prior focusable window.
Returns:
A window that should be focused.
See also: showFullWindowHierarchy() which can help in debugging.
"""
windows = self.parent.zOrder[:]
if reverse:
windows.reverse()
try:
found = windows.index(start)
except ValueError:
found = -1
windows = windows[found + 1:]
for i in windows:
if i.isFocusable:
return i
else:
r = i._childFocusableWindow(reverse)
if r is not None:
return r
r = self.parent.nextFocusableWindow(self.parent, reverse)
if r is not None:
return r
return self._childFocusableWindow(reverse)
def normalize(self):
self.parent.normalize()
def onPrefChanged(self, category, name):
self.parent.onPrefChanged(category, name)
def paint(self, row, col, count, colorPair):
"""Paint text a row, column with colorPair.
fyi, I thought this may be faster than using addStr to paint over the
text with a different colorPair. It looks like there isn't a significant
performance difference between chgat and addstr.
"""
mainCursesWindow.chgat(self.top + row, self.left + col, count,
colorPair)
def preferredSize(self, rowLimit, colLimit):
# Derived classes should override this.
return rowLimit, colLimit
def presentModal(self, changeTo, paneRow, paneCol):
self.parent.presentModal(changeTo, paneRow, paneCol)
def priorFocusableWindow(self, start):
return self.nextFocusableWindow(start, True)
def quitNow(self):
self.program.quitNow()
def render(self):
"""Redraw window."""
for child in self.zOrder:
child.render()
def showWindowHierarchy(self, indent=' '):
"""For debugging."""
focus = u'[f]' if self.isFocusable else u'[ ]'
extra = u''
if hasattr(self, 'label'):
extra += u' "' + self.label + u'"'
app.log.info("%s%s%s%s" % (indent, focus, self, extra))
for child in self.zOrder:
child.showWindowHierarchy(indent + u' ')
def showFullWindowHierarchy(self, indent=u' '):
"""For debugging."""
f = self
while f.parent is not None:
f = f.parent
assert f
f.showWindowHierarchy()
def doPreCommand(self):
pass
def longTimeSlice(self):
"""returns whether work is finished (no need to call again)."""
return True
def shortTimeSlice(self):
"""returns whether work is finished (no need to call again)."""
return True
def reshape(self, top, left, rows, cols):
self.moveTo(top, left)
self.resizeTo(rows, cols)
#app.log.debug(self, top, left, rows, cols)
def resizeBottomBy(self, rows):
self.rows += rows
def resizeBy(self, rows, cols):
self.rows += rows
self.cols += cols
def resizeTo(self, rows, cols):
#app.log.detail(rows, cols, self)
if app.config.strict_debug:
assert rows >= 0, rows
assert cols >= 0, cols
self.rows = rows
self.cols = cols
def resizeTopBy(self, rows):
self.top += rows
self.rows -= rows
def setParent(self, parent, layerIndex=sys.maxsize):
"""Setting the parent will cause the the window to refresh (i.e. if self
was hidden with detach() it will no longer be hidden)."""
if app.config.strict_debug:
assert issubclass(self.__class__, ViewWindow), self
assert issubclass(parent.__class__, ViewWindow), parent
if self.parent:
try:
self.parent.zOrder.remove(self)
except ValueError:
pass
self.parent = parent
if parent:
self.parent.zOrder.insert(layerIndex, self)
def writeLine(self, text, color):
"""Simple line writer for static windows."""
if app.config.strict_debug:
assert isinstance(text, unicode)
text = text[:self.cols]
text = text + u' ' * max(0, self.cols - len(text))
self.program.backgroundFrame.addStr(self.top + self.writeLineRow,
self.left, text.encode(u'utf-8'),
color)
self.writeLineRow += 1
def getProgram(self):
return self.program
class ActiveWindow(ViewWindow):
"""An ActiveWindow may have focus and a controller."""
def __init__(self, program, parent):
if app.config.strict_debug:
assert issubclass(self.__class__, ActiveWindow), self
assert issubclass(program.__class__,
app.ci_program.CiProgram), repr(program)
if parent is not None:
assert issubclass(parent.__class__, ViewWindow), parent
ViewWindow.__init__(self, program, parent)
self.controller = None
self.hasFocus = False
self.isFocusable = True
def focus(self):
"""
Note: to focus a view it must have a controller. Focusing a view without
a controller would make the program appear to freeze since nothing
would be responding to user input.
"""
self.hasFocus = True
self.controller.focus()
def setController(self, controller):
if app.config.strict_debug:
assert issubclass(self.__class__, Window), self
self.controller = controller(self)
def unfocus(self):
self.hasFocus = False
self.controller.unfocus()
class Window(ActiveWindow):
"""A Window holds a TextBuffer and a controller that operates on the
TextBuffer."""
def __init__(self, program, parent):
if app.config.strict_debug:
assert issubclass(self.__class__, Window), self
assert issubclass(program.__class__, app.ci_program.CiProgram), self
assert issubclass(parent.__class__, ViewWindow), parent
ActiveWindow.__init__(self, program, parent)
self.hasCaptiveCursor = self.program.prefs.editor['captiveCursor']
self.textBuffer = None
def mouseClick(self, paneRow, paneCol, shift, ctrl, alt):
if self.textBuffer:
self.textBuffer.mouseClick(paneRow, paneCol, shift, ctrl, alt)
def mouseDoubleClick(self, paneRow, paneCol, shift, ctrl, alt):
if self.textBuffer:
self.textBuffer.mouseDoubleClick(paneRow, paneCol, shift, ctrl, alt)
def mouseMoved(self, paneRow, paneCol, shift, ctrl, alt):
if self.textBuffer:
self.textBuffer.mouseMoved(paneRow, paneCol, shift, ctrl, alt)
def mouseRelease(self, paneRow, paneCol, shift, ctrl, alt):
if self.textBuffer:
self.textBuffer.mouseRelease(paneRow, paneCol, shift, ctrl, alt)
def mouseTripleClick(self, paneRow, paneCol, shift, ctrl, alt):
if self.textBuffer:
self.textBuffer.mouseTripleClick(paneRow, paneCol, shift, ctrl, alt)
def mouseWheelDown(self, shift, ctrl, alt):
if self.textBuffer:
self.textBuffer.mouseWheelDown(shift, ctrl, alt)
def mouseWheelUp(self, shift, ctrl, alt):
if self.textBuffer:
self.textBuffer.mouseWheelUp(shift, ctrl, alt)
def preferredSize(self, rowLimit, colLimit):
return min(rowLimit, self.textBuffer.parser.rowCount()), colLimit
def render(self):
if self.textBuffer:
self.textBuffer.draw(self)
ViewWindow.render(self)
def setController(self, controller):
ActiveWindow.setController(self, controller)
self.controller.setTextBuffer(self.textBuffer)
def setTextBuffer(self, textBuffer):
textBuffer.setView(self)
self.textBuffer = textBuffer
def doPreCommand(self):
if self.textBuffer is not None:
self.textBuffer.setMessage()
def longTimeSlice(self):
"""returns whether work is finished (no need to call again)."""
finished = True
tb = self.textBuffer
if tb is not None and tb.parser.resumeAtRow < tb.parser.rowCount():
tb.parseDocument()
# If a user event came in while parsing, the parsing will be paused
# (to be resumed after handling the event).
finished = tb.parser.resumeAtRow >= tb.parser.rowCount()
for child in self.zOrder:
finished = finished and child.longTimeSlice()
return finished
def shortTimeSlice(self):
"""returns whether work is finished (no need to call again)."""
tb = self.textBuffer
if tb is not None:
tb.parseScreenMaybe()
return tb.parser.resumeAtRow >= tb.parser.rowCount()
return True
class LabelWindow(ViewWindow):
"""A text label.
The label is inert, it will pass events to its parent.
"""
def __init__(self,
program,
parent,
label,
preferredWidth=None,
align=u'left'):
if app.config.strict_debug:
assert issubclass(program.__class__, app.ci_program.CiProgram), self
assert issubclass(parent.__class__, ViewWindow), parent
assert isinstance(label, unicode)
assert preferredWidth is None or isinstance(preferredWidth, int)
assert isinstance(align, unicode)
ViewWindow.__init__(self, program, parent)
self.label = label
self.preferredWidth = preferredWidth
self.align = -1 if align == u'left' else 1
self.color = self.program.color.get(u'keyword')
def preferredSize(self, rowLimit, colLimit):
if app.config.strict_debug:
assert self.parent
assert rowLimit >= 0
assert colLimit >= 0
preferredWidth = (self.preferredWidth if self.preferredWidth is not None
else len(self.label))
return (min(rowLimit, 1), min(colLimit, preferredWidth))
def render(self):
if self.rows <= 0:
return
line = self.label[:self.cols]
line = u"%*s" % (self.cols * self.align, line)
self.addStr(0, 0, line, self.color)
ViewWindow.render(self)
class LabeledLine(Window):
"""A single line with a label.
This is akin to a line prompt or gui modal dialog. It's used for things like
'find' and 'goto line'.
"""
def __init__(self, program, parent, label):
if app.config.strict_debug:
assert issubclass(self.__class__, LabeledLine), self
assert issubclass(program.__class__, app.ci_program.CiProgram), self
assert issubclass(parent.__class__, ViewWindow), parent
Window.__init__(self, program, parent)
self.host = parent
tb = app.text_buffer.TextBuffer(self.program)
tb.rootGrammar = self.program.prefs.grammars[u'none']
self.setTextBuffer(tb)
self.label = label
self.leftColumn = ViewWindow(self.program, self)
# TODO(dschuyler) Add self.rightColumn.
def focus(self):
self.bringToFront()
if not self.controller:
app.log.info(self, repr(self.label))
Window.focus(self)
def preferredSize(self, rowLimit, colLimit):
return min(rowLimit, 1), colLimit
def render(self):
#app.log.info('LabeledLine', self.label, self.rows, self.cols)
if self.rows <= 0:
return
self.leftColumn.addStr(0, 0, self.label,
self.program.color.get(u'keyword'))
Window.render(self)
def reshape(self, top, left, rows, cols):
labelWidth = len(self.label)
Window.reshape(self, top, left + labelWidth, rows,
max(0, cols - labelWidth))
self.leftColumn.reshape(top, left, rows, labelWidth)
def setLabel(self, label):
self.label = label
self.reshape(self.top, self.left, self.rows, self.cols)
class Menu(ViewWindow):
"""Work in progress on a context menu."""
def __init__(self, program, host):
if app.config.strict_debug:
assert issubclass(self.__class__, Menu), self
assert issubclass(host.__class__, ActiveWindow)
ViewWindow.__init__(self, program, host)
self.host = host
self.label = u''
self.lines = []
self.commands = []
def addItem(self, label, command):
self.lines.append(label)
self.commands.append(command)
def clear(self):
self.lines = []
self.commands = []
def moveSizeToFit(self, left, top):
self.clear()
self.addItem(u'some menu', None)
#self.addItem('sort', self.host.textBuffer.sortSelection)
self.addItem(u'cut', self.host.textBuffer.editCut)
self.addItem(u'paste', self.host.textBuffer.editPaste)
longest = 0
for i in self.lines:
if len(i) > longest:
longest = len(i)
self.reshape(left, top, len(self.lines), longest + 2)
def render(self):
color = self.program.color.get(u'context_menu')
self.writeLineRow = 0
for i in self.lines[:self.rows]:
self.writeLine(" " + i, color)
ViewWindow.render(self)
class LineNumbers(ViewWindow):
def __init__(self, program, host):
ViewWindow.__init__(self, program, host)
self.host = host
def drawLineNumbers(self):
if app.config.strict_debug:
assert isinstance(self.rows, int)
assert isinstance(self.host.scrollRow, int)
assert self.rows >= 1
assert self.host.textBuffer.parser.rowCount() >= 1
assert self.host.scrollRow >= 0
limit = min(self.rows,
self.host.textBuffer.parser.rowCount() - self.host.scrollRow)
cursorBookmarkColorIndex = None
visibleBookmarks = self.getVisibleBookmarks(self.host.scrollRow,
self.host.scrollRow + limit)
currentBookmarkIndex = 0
colorPrefs = self.program.color
for i in range(limit):
color = colorPrefs.get(u'line_number')
currentRow = self.host.scrollRow + i
if currentBookmarkIndex < len(visibleBookmarks):
currentBookmark = visibleBookmarks[currentBookmarkIndex]
else:
currentBookmark = None
# Use a different color if the row is associated with a bookmark.
if currentBookmark:
if (currentRow >= currentBookmark.begin and
currentRow <= currentBookmark.end):
color = colorPrefs.get(
currentBookmark.data.get(u'colorIndex'))
if self.host.textBuffer.penRow == currentRow:
cursorBookmarkColorIndex = currentBookmark.data.get(
u'colorIndex')
if currentRow + 1 > currentBookmark.end:
currentBookmarkIndex += 1
self.addStr(i, 0, u' %5d ' % (currentRow + 1), color)
# Draw indicators for text off of the left edge.
if self.host.scrollCol > 0:
color = colorPrefs.get(u'line_overflow')
for i in range(limit):
if self.host.textBuffer.parser.rowWidth(
self.host.scrollRow + i) > 0:
self.addStr(i, 6, u' ', color)
# Draw blank line number rows past the end of the document.
color = colorPrefs.get(u'outside_document')
for i in range(limit, self.rows):
self.addStr(i, 0, u' ', color)
# Highlight the line numbers for the current cursor line.
cursorAt = self.host.textBuffer.penRow - self.host.scrollRow
if 0 <= cursorAt < limit:
if cursorBookmarkColorIndex:
if self.program.prefs.startup[u'numColors'] == 8:
color = colorPrefs.get(cursorBookmarkColorIndex)
else:
color = colorPrefs.get(cursorBookmarkColorIndex % 32 + 128)
else:
color = colorPrefs.get(u'line_number_current')
self.addStr(cursorAt, 1, u'%5d' % (self.host.textBuffer.penRow + 1),
color)
def getVisibleBookmarks(self, beginRow, endRow):
"""
Args:
beginRow (int): the index of the line number that you want the list of
bookmarks to start from.
endRow (int): the index of the line number that you want the list of
bookmarks to end at (exclusive).
Returns:
A list containing the bookmarks that are displayed on the screen. If
there are no bookmarks, returns an empty list.
"""
bookmarkList = self.host.textBuffer.bookmarks
beginIndex = endIndex = 0
if len(bookmarkList):
needle = app.bookmark.Bookmark(beginRow, beginRow, {})
beginIndex = bisect.bisect_left(bookmarkList, needle)
if beginIndex > 0 and bookmarkList[beginIndex - 1].end >= beginRow:
beginIndex -= 1
needle.range = (endRow, endRow)
endIndex = bisect.bisect_left(bookmarkList, needle)
return bookmarkList[beginIndex:endIndex]
def mouseClick(self, paneRow, paneCol, shift, ctrl, alt):
if ctrl:
app.log.info(u'click at', paneRow, paneCol)
return
self.host.changeFocusTo(self.host)
tb = self.host.textBuffer
if self.host.scrollRow + paneRow >= tb.parser.rowCount():
tb.selectionNone()
return
if shift:
if tb.selectionMode == app.selectable.kSelectionNone:
tb.selectionLine()
self.mouseRelease(paneRow, paneCol, shift, ctrl, alt)
else:
tb.cursorMoveAndMark(
self.host.scrollRow + paneRow - tb.penRow, 0,
self.host.scrollRow + paneRow - tb.markerRow, 0,
app.selectable.kSelectionNone - tb.selectionMode)
self.mouseRelease(paneRow, paneCol, shift, ctrl, alt)
def mouseDoubleClick(self, paneRow, paneCol, shift, ctrl, alt):
self.host.textBuffer.selectionAll()
def mouseMoved(self, paneRow, paneCol, shift, ctrl, alt):
app.log.info(paneRow, paneCol, shift)
self.host.textBuffer.mouseClick(paneRow, paneCol - self.cols, True,
ctrl, alt)
def mouseRelease(self, paneRow, paneCol, shift, ctrl, alt):
app.log.info(paneRow, paneCol, shift)
tb = self.host.textBuffer
tb.selectLineAt(self.host.scrollRow + paneRow)
def mouseTripleClick(self, paneRow, paneCol, shift, ctrl, alt):
pass
def mouseWheelDown(self, shift, ctrl, alt):
self.host.mouseWheelDown(shift, ctrl, alt)
def mouseWheelUp(self, shift, ctrl, alt):
self.host.mouseWheelUp(shift, ctrl, alt)
def render(self):
self.drawLineNumbers()
class LogWindow(ViewWindow):
def __init__(self, program, parent):
ViewWindow.__init__(self, program, parent)
self.lines = app.log.getLines()
self.renderCounter = 0
def render(self):
self.renderCounter += 1
app.log.meta(u" " * 10, self.renderCounter, u"- screen render -")
self.writeLineRow = 0
colorPrefs = self.program.color
colorA = colorPrefs.get(u'default')
colorB = colorPrefs.get(u'highlight')
for i in self.lines[-self.rows:]:
color = colorA
if len(i) and i[-1] == u'-':
color = colorB
self.writeLine(i, color)
ViewWindow.render(self)
class InteractiveFind(Window):
def __init__(self, program, host):
Window.__init__(self, program, host)
self.host = host
self.expanded = False
self.setController(app.cu_editor.InteractiveFind)
indent = u' '
self.findLine = LabeledLine(self.program, self, u'Find: ')
self.findLine.setController(app.cu_editor.InteractiveFindInput)
self.findLine.setParent(self)
self.replaceLine = LabeledLine(self.program, self, u'Replace: ')
self.replaceLine.setController(app.cu_editor.InteractiveReplaceInput)
self.replaceLine.setParent(self)
self.matchOptionsRow = RowWindow(self.program, self, 2)
self.matchOptionsRow.setParent(self)
# If findUseRegex is false, re.escape the search.
OptionsToggle(self.program, self.matchOptionsRow, u'regex', u'editor',
u'findUseRegex')
# If findWholeWord, wrap with \b.
OptionsToggle(self.program, self.matchOptionsRow, u'wholeWord',
u'editor', u'findWholeWord')
# If findIgnoreCase, pass ignore case flag to regex.
OptionsToggle(self.program, self.matchOptionsRow, u'ignoreCase',
u'editor', u'findIgnoreCase')
if 0:
# Use locale.
OptionsToggle(self.program, self.matchOptionsRow, u'locale',
u'editor', u'findLocale')
# Span lines.
OptionsToggle(self.program, self.matchOptionsRow, u'multiline',
u'editor', u'findMultiline')
# Dot matches anything (even \n).
OptionsToggle(self.program, self.matchOptionsRow, u'dotAll',
u'editor', u'findDotAll')
# Unicode match.
OptionsToggle(self.program, self.matchOptionsRow, u'unicode',
u'editor', u'findUnicode')
# Replace uppercase with upper and lowercase with lower.
OptionsToggle(self.program, self.matchOptionsRow, u'smartCaps',
u'editor', u'findReplaceSmartCaps')
if 0:
self.scopeOptions, self.scopeRow = self.addSelectOptionsRow(
indent + u'scope ',
[u'file', u'directory', u'openFiles', u'project'])
(self.changeCaseOptions,
self.changeCaseRow) = self.addSelectOptionsRow(
indent + u'changeCase',
[u'none', u'smart', u'upper', u'lower'])
(self.withinOptions,
self.withinOptionsRow) = self.addSelectOptionsRow(
indent + u'within ',
[
u'any',
u'code',
u'comment',
u'error',
u'markup',
u'misspelled', # Find in misspelled words.
u'quoted', # Find in strings.
])
(self.searchSelectionOption,
self.searchSelectionRow) = self.addSelectOptionsRow(
indent + u'selection ', [u'any', u'yes', u'no'])
(self.searchChangedOption,
self.searchChangedRow) = self.addSelectOptionsRow(
indent + u'changed ', [u'any', u'yes', u'no'])
self.pathsLine = LabeledLine(self.program, self, u'Paths: ')
self.pathsLine.setController(app.cu_editor.InteractiveFindInput)
self.pathsLine.setParent(self)
def reattach(self):
Window.reattach(self)
# TODO(dschuyler): consider removing expanded control.
# See https://github.com/google/ci_edit/issues/170
self.expanded = True
self.parent.layout()
def detach(self):
Window.detach(self)
self.parent.layout()
def addSelectOptionsRow(self, label, optionsList):
"""Such as a radio group."""
optionsRow = OptionsRow(self.program, self)
optionsRow.color = self.program.color.get(u'keyword')
optionsRow.addLabel(label)
optionsDict = {}
optionsRow.beginGroup()
for key in optionsList:
optionsDict[key] = False
optionsRow.addSelection(key, optionsDict)
optionsRow.endGroup()
optionsDict[optionsList[0]] = True
optionsRow.setParent(self)
return optionsDict, optionsRow
def bringChildToFront(self, child):
# The find window doesn't reorder children.
pass
def focus(self):
self.reattach()
if app.config.strict_debug:
assert self.parent
assert self.findLine.parent
assert self.rows > 0, self.rows
assert self.findLine.rows > 0, self.findLine.rows
self.controller.focus()
self.changeFocusTo(self.findLine)
def preferredSize(self, rowLimit, colLimit):
if app.config.strict_debug:
assert self.parent
assert rowLimit >= 0
assert colLimit >= 0
if self.parent and self in self.parent.zOrder and self.expanded:
return (min(rowLimit, len(self.zOrder)), colLimit)
return (1, -1)
def expandFindWindow(self, expanded):
self.expanded = expanded
self.parent.layout()
def reshape(self, top, left, rows, cols):
Window.reshape(self, top, left, rows, cols)
self.layoutVertically(self.zOrder)
def unfocus(self):
self.detach()
Window.unfocus(self)
class MessageLine(ViewWindow):
"""The message line appears at the bottom of the screen."""
def __init__(self, program, host):
ViewWindow.__init__(self, program, host)
self.host = host
self.message = None
self.renderedMessage = None
def render(self):
colorPrefs = self.program.color
if self.message:
if self.message != self.renderedMessage:
self.writeLineRow = 0
self.writeLine(self.message, colorPrefs.get(u'message_line'))
else:
self.blank(colorPrefs.get(u'message_line'))
class StatusLine(ViewWindow):
"""The status line appears at the bottom of the screen.
It shows the current line and column the cursor is on.
"""
def __init__(self, program, host):
ViewWindow.__init__(self, program, host)
self.host = host
def render(self):
tb = self.host.textBuffer
colorPrefs = self.program.color
color = colorPrefs.get(u'status_line')
if self.host.showTips:
tipRows = app.help.docs[u'tips']
if len(tipRows) + 1 < self.rows:
for i in range(self.rows):
self.addStr(i, 0, u' ' * self.cols, color)
for i, k in enumerate(tipRows):
self.addStr(i + 1, 4, k, color)
self.addStr(1, 40, u"(Press F1 to show/hide tips)",
color | curses.A_REVERSE)
statusLine = u''
if tb.message:
statusLine = tb.message[0]
color = (tb.message[1] if tb.message[1] is not None else
colorPrefs.get(u'status_line'))
if 0:
if tb.isDirty():
statusLine += u' * '
else:
statusLine += u' . '
# Percentages.
rowPercentage = 0
colPercentage = 0
lineCount = tb.parser.rowCount()
if lineCount:
rowPercentage = self.host.textBuffer.penRow * 100 // lineCount
charCount = tb.parser.rowWidth(self.host.textBuffer.penRow)
if charCount and self.host.textBuffer.penCol != 0:
colPercentage = self.host.textBuffer.penCol * 100 // charCount
# Format.
rightSide = u''
if len(statusLine):
rightSide += u' |'
if self.program.prefs.startup.get('showLogWindow'):
rightSide += u' %s | %s |' % (tb.cursorGrammarName(),
tb.selectionModeName())
rightSide += u' %4d,%2d | %3d%%,%3d%%' % (
self.host.textBuffer.penRow + 1, self.host.textBuffer.penCol + 1,
rowPercentage, colPercentage)
statusLine += \
u' ' * (self.cols - len(statusLine) - len(rightSide)) + rightSide
self.addStr(self.rows - 1, 0, statusLine[:self.cols], color)
class TopInfo(ViewWindow):
def __init__(self, program, host):
ViewWindow.__init__(self, program, host)
self.host = host
self.borrowedRows = 0
self.lines = []
self.mode = 2
def onChange(self):
if self.mode == 0:
return
tb = self.host.textBuffer
lines = []
# TODO: Make dynamic topInfo work properly
if tb.parser.rowCount():
lineCursor = self.host.scrollRow
line = ""
# Check for extremely small window.
if tb.parser.rowCount() > lineCursor:
while len(line) == 0 and lineCursor > 0:
line = tb.parser.rowText(lineCursor)
lineCursor -= 1
if len(line):
indent = len(line) - len(line.lstrip(u' '))
lineCursor += 1
while lineCursor < tb.parser.rowCount():
line = tb.parser.rowText(lineCursor)
if not len(line):
continue
z = len(line) - len(line.lstrip(u' '))
if z > indent:
indent = z
lineCursor += 1
else:
break
while indent and lineCursor > 0:
line = tb.parser.rowText(lineCursor)
if len(line):
z = len(line) - len(line.lstrip(u' '))
if z < indent:
indent = z
lines.append(line)
lineCursor -= 1
pathLine = app.string.pathEncode(self.host.textBuffer.fullPath)
if 1:
if tb.isReadOnly:
pathLine += u' [RO]'
if 1:
if tb.isDirty():
pathLine += u' * '
else:
pathLine += u' . '
lines.append(pathLine[-self.cols:])
self.lines = lines
infoRows = len(self.lines)
if self.mode > 0:
infoRows = self.mode
if self.borrowedRows != infoRows:
self.host.topRows = infoRows
self.host.layout()
self.borrowedRows = infoRows
def render(self):
"""Render the context information at the top of the window."""
lines = self.lines[-self.mode:]
lines.reverse()
color = self.program.color.get('top_info')
for i, line in enumerate(lines):
self.addStr(i, 0,
(line + u' ' * (self.cols - len(line)))[:self.cols],
color)
for i in range(len(lines), self.rows):
self.addStr(i, 0, u' ' * self.cols, color)
def reshape(self, top, left, rows, cols):
self.borrowedRows = 0
ViewWindow.reshape(self, top, left, rows, cols)
class InputWindow(Window):
"""This is the main content window.
Often the largest pane displayed.
"""
def __init__(self, program, host):
if app.config.strict_debug:
assert host
Window.__init__(self, program, host)
self.host = host
self.showFooter = True
self.savedScrollPositions = {}
self.showLineNumbers = self.program.prefs.editor.get(
'showLineNumbers', True)
self.showMessageLine = True
self.showRightColumn = True
self.showTopInfo = True
self.statusLineCount = 0 if self.program.prefs.status.get(
'seenTips') else 8
self.topRows = 2 # Number of lines in default TopInfo status.
self.controller = app.controller.MainController(self)
self.controller.add(app.em_editor.EmacsEdit(self))
self.controller.add(app.vi_editor.ViEdit(self))
self.controller.add(app.cu_editor.CuaPlusEdit(self))
# What does the user appear to want: edit, quit, or something else?
self.userIntent = 'edit'
if 1:
self.confirmClose = LabeledLine(
self.program, self, "Save changes? (yes, no, or cancel): ")
self.confirmClose.setController(app.cu_editor.ConfirmClose)
if 1:
self.confirmOverwrite = LabeledLine(
self.program, self, "Overwrite exiting file? (yes or no): ")
self.confirmOverwrite.setController(app.cu_editor.ConfirmOverwrite)
self.contextMenu = Menu(self.program, self)
if 1: # wip on multi-line interactive find.
self.interactiveFind = InteractiveFind(self.program, self)
self.interactiveFind.setParent(self, 0)
else:
self.interactiveFind = LabeledLine(self.program, self, u'find: ')
self.interactiveFind.setController(app.cu_editor.InteractiveFind)
if 1:
self.interactiveGoto = LabeledLine(self.program, self, u'goto: ')
self.interactiveGoto.setController(app.cu_editor.InteractiveGoto)
if 1:
self.interactivePrediction = LabeledLine(self.program, self, u'p: ')
self.interactivePrediction.setController(
app.cu_editor.InteractivePrediction)
if 1:
self.interactivePrompt = LabeledLine(self.program, self, u"e: ")
self.interactivePrompt.setController(
app.cu_editor.InteractivePrompt)
if 1:
self.interactiveQuit = LabeledLine(
self.program, self, u"Save changes? (yes, no, or cancel): ")
self.interactiveQuit.setController(app.cu_editor.InteractiveQuit)
if 1:
self.topInfo = TopInfo(self.program, self)
self.topInfo.setParent(self, 0)
if not self.showTopInfo:
self.topInfo.detach()
if 1:
self.statusLine = StatusLine(self.program, self)
self.statusLine.setParent(self, 0)
if not self.showFooter:
self.statusLine.detach()
if 1:
self.lineNumberColumn = LineNumbers(self.program, self)
self.lineNumberColumn.setParent(self, 0)
if not self.showLineNumbers:
self.lineNumberColumn.detach()
if 1:
self.logoCorner = ViewWindow(self.program, self)
self.logoCorner.name = u'Logo'
self.logoCorner.setParent(self, 0)
if 1:
self.rightColumn = ViewWindow(self.program, self)
self.rightColumn.name = u'Right'
self.rightColumn.setParent(self, 0)
if not self.showRightColumn:
self.rightColumn.detach()
if 1:
self.popupWindow = PopupWindow(self.program, self)
if self.showMessageLine:
self.messageLine = MessageLine(self.program, self)
self.messageLine.setParent(self, 0)
self.showTips = self.program.prefs.status.get(u'showTips')
self.statusLineCount = 8 if self.showTips else 1
if 0:
def splitWindow(self):
"""Experimental."""
app.log.info()
other = InputWindow(self.prg, self)
other.setTextBuffer(self.textBuffer)
app.log.info()
self.prg.zOrder.append(other)
self.prg.layout()
app.log.info()
def layout(self):
"""Change self and sub-windows to fit within the given rectangle."""
top, left, rows, cols = self.outerShape
lineNumbersCols = 7
topRows = self.topRows
bottomRows = max(1, self.interactiveFind.preferredSize(rows, cols)[0])
# The top, left of the main window is the rows, cols of the logo corner.
self.logoCorner.reshape(top, left, 2, lineNumbersCols)
if self.showTopInfo and rows > topRows and cols > lineNumbersCols:
self.topInfo.reshape(top, left + lineNumbersCols, topRows,
cols - lineNumbersCols)
top += topRows
rows -= topRows
rows -= bottomRows
bottomFirstRow = top + rows
self.confirmClose.reshape(bottomFirstRow, left, bottomRows, cols)
self.confirmOverwrite.reshape(bottomFirstRow, left, bottomRows, cols)
self.interactivePrediction.reshape(bottomFirstRow, left, bottomRows,
cols)
self.interactivePrompt.reshape(bottomFirstRow, left, bottomRows, cols)
self.interactiveQuit.reshape(bottomFirstRow, left, bottomRows, cols)
if self.showMessageLine:
self.messageLine.reshape(bottomFirstRow, left, bottomRows, cols)
self.interactiveFind.reshape(bottomFirstRow, left, bottomRows, cols)
if 1:
self.interactiveGoto.reshape(bottomFirstRow, left, bottomRows, cols)
if self.showFooter and rows > 0:
self.statusLine.reshape(bottomFirstRow - self.statusLineCount, left,
self.statusLineCount, cols)
rows -= self.statusLineCount
if self.showLineNumbers and cols > lineNumbersCols:
self.lineNumberColumn.reshape(top, left, rows, lineNumbersCols)
cols -= lineNumbersCols
left += lineNumbersCols
if self.showRightColumn and cols > 0:
self.rightColumn.reshape(top, left + cols - 1, rows, 1)
cols -= 1
Window.reshape(self, top, left, rows, cols)
def drawLogoCorner(self):
"""."""
logo = self.logoCorner
if logo.rows <= 0 or logo.cols <= 0:
return
color = self.program.color.get('logo')
for i in range(logo.rows):
logo.addStr(i, 0, u' ' * logo.cols, color)
logo.addStr(0, 1, u'ci' [:self.cols], color)
logo.render()
def drawRightEdge(self):
"""Draw makers to indicate text extending past the right edge of the
window."""
maxRow, maxCol = self.rows, self.cols
limit = min(maxRow, self.textBuffer.parser.rowCount() - self.scrollRow)
colorPrefs = self.program.color
for i in range(limit):
color = colorPrefs.get('right_column')
if self.textBuffer.parser.rowWidth(i + self.scrollRow) - self.scrollCol > maxCol:
color = colorPrefs.get('line_overflow')
self.rightColumn.addStr(i, 0, u' ', color)
color = colorPrefs.get('outside_document')
for i in range(limit, maxRow):
self.rightColumn.addStr(i, 0, u' ', color)
def focus(self):
self.layout()
if self.showMessageLine:
self.messageLine.bringToFront()
Window.focus(self)
def nextFocusableWindow(self, start, reverse=False):
# Keep the tab focus in the child branch. (The child view will call
# this, tell the child there is nothing to tab to up here).
return None
def render(self):
self.topInfo.onChange()
self.drawLogoCorner()
self.drawRightEdge()
Window.render(self)
def reshape(self, top, left, rows, cols):
"""Change self and sub-windows to fit within the given rectangle."""
app.log.detail(top, left, rows, cols)
Window.reshape(self, top, left, rows, cols)
self.outerShape = (top, left, rows, cols)
self.layout()
def setTextBuffer(self, textBuffer):
if app.config.strict_debug:
assert issubclass(
textBuffer.__class__, app.text_buffer.TextBuffer), \
repr(textBuffer)
app.log.info('setTextBuffer')
if self.textBuffer is not None:
self.savedScrollPositions[self.textBuffer.fullPath] = (
self.scrollRow, self.scrollCol)
#self.normalize()
textBuffer.lineLimitIndicator = self.program.prefs.editor[
'lineLimitIndicator']
textBuffer.debugRedo = self.program.prefs.startup.get('debugRedo')
Window.setTextBuffer(self, textBuffer)
self.controller.setTextBuffer(textBuffer)
savedScroll = self.savedScrollPositions.get(self.textBuffer.fullPath)
if savedScroll is not None:
self.scrollRow, self.scrollCol = savedScroll
else:
historyScroll = self.textBuffer.fileHistory.get('scroll')
if historyScroll is not None:
self.scrollRow, self.scrollCol = historyScroll
else:
self.textBuffer.scrollToOptimalScrollPosition()
def startup(self):
bufferManager = self.program.bufferManager
for f in self.program.prefs.startup.get('cliFiles', []):
tb = bufferManager.loadTextBuffer(f['path'])
if tb is None:
# app.log.info('failed to load', repr(f["path"]))
continue
tb.parseDocument()
if f['row'] is not None:
if f['col'] is not None:
tb.selectText(f['row'], f['col'], 0,
app.selectable.kSelectionNone)
else:
tb.selectText(f['row'], 0, 0, app.selectable.kSelectionNone)
if self.program.prefs.startup.get('readStdin'):
bufferManager.readStdin()
bufferManager.buffers.reverse()
tb = bufferManager.topBuffer()
if not tb:
tb = bufferManager.newTextBuffer()
self.setTextBuffer(tb)
# Should parsing the document be a standard part of setTextBuffer? TBD.
self.textBuffer.parseDocument()
openToLine = self.program.prefs.startup.get('openToLine')
if openToLine is not None:
self.textBuffer.selectText(openToLine - 1, 0, 0,
app.selectable.kSelectionNone)
def toggleShowTips(self):
self.showTips = not self.showTips
self.statusLineCount = 8 if self.showTips else 1
self.layout()
self.program.prefs.save('status', 'showTips', self.showTips)
def unfocus(self):
if self.showMessageLine:
self.messageLine.detach()
Window.unfocus(self)
class OptionsSelectionWindow(ViewWindow):
"""Mutex window."""
def __init__(self, program, parent):
if app.config.strict_debug:
assert parent is not None
ViewWindow.__init__(self, program, parent)
self.color = self.program.color.get('top_info')
def reshape(self, top, left, rows, cols):
ViewWindow.reshape(self, top, left, rows, cols)
self.layoutHorizontally(self.zOrder)
def childSelected(self, selectedChild):
app.log.info(self.zOrder)
for child in self.zOrder:
if child is not selectedChild:
child.deselect()
def render(self):
self.blank(self.color)
ViewWindow.render(self)
class OptionsTrinaryStateWindow(Window):
def __init__(self, program, parent, label, prefCategory, prefName):
if app.config.strict_debug:
assert isinstance(label, unicode)
assert isinstance(prefCategory, unicode)
assert isinstance(prefName, unicode)
Window.__init__(self, program, parent)
# TODO(dschuyler): Creating a text buffer is rather heavy for a toggle
# control. This should get some optimization.
self.setTextBuffer(app.text_buffer.TextBuffer(self.program))
self.setController(app.cu_editor.ToggleController)
self.setParent(parent)
self.name = label
self.prefCategory = prefCategory
self.prefName = prefName
colorPrefs = self.program.color
self.color = colorPrefs.get('keyword')
self.focusColor = colorPrefs.get('selected')
self.textBuffer.view.showCursor = False
def focus(self):
Window.focus(self)
def setUp(self, toggleOn, toggleOff, toggleUndefined, width=None):
if app.config.strict_debug:
assert isinstance(toggleOn, unicode)
assert isinstance(toggleOff, unicode)
assert isinstance(toggleUndefined, unicode)
assert width is None or isinstance(width, int)
self.toggleOn = toggleOn
self.toggleOff = toggleOff
self.toggleUndefined = toggleUndefined
longest = max(len(toggleOn), len(toggleOff), len(toggleUndefined))
self.width = width if width is not None else longest
self.updateLabel()
def mouseClick(self, paneRow, paneCol, shift, ctrl, alt):
self.controller.toggleValue()
def onPrefChanged(self, category, name):
Window.onPrefChanged(self, category, name)
if category != self.prefCategory or name != self.prefName:
return
self.updateLabel()
def updateLabel(self):
pref = self.program.prefs.category(self.prefCategory)[self.prefName]
if pref is None:
label = self.toggleUndefined
else:
label = self.toggleOn if pref else self.toggleOff
self.label = u'%*s' % (self.width, label)
def preferredSize(self, rowLimit, colLimit):
return min(rowLimit, 1), min(colLimit, abs(self.width))
def render(self):
Window.render(self)
if self.rows <= 0:
return
self.writeLineRow = 0
color = self.focusColor if self.hasFocus else self.color
self.writeLine(self.label[:self.cols], color)
class OptionsToggle(OptionsTrinaryStateWindow):
def __init__(self,
program,
parent,
label,
prefCategory,
prefName,
width=None):
if app.config.strict_debug:
assert isinstance(label, unicode)
assert isinstance(prefCategory, unicode)
assert isinstance(prefName, unicode)
OptionsTrinaryStateWindow.__init__(self, program, parent, label,
prefCategory, prefName)
# I considered these unicode characters, but [x] looks clearer to me.
# toggleOn = unichr(0x2612) + ' ' + control['name']
# toggleOff = unichr(0x2610) + ' ' + control['name']
OptionsTrinaryStateWindow.setUp(self, u'[x]' + label, u'[ ]' + label,
u'[-]' + label, width)
class RowWindow(ViewWindow):
def __init__(self, program, host, separator):
if app.config.strict_debug:
assert host
ViewWindow.__init__(self, program, host)
self.color = self.program.color.get('keyword')
self.separator = separator
def preferredSize(self, rowLimit, colLimit):
return min(rowLimit, 1), colLimit
def render(self):
self.blank(self.color)
ViewWindow.render(self)
def reshape(self, top, left, rows, cols):
ViewWindow.reshape(self, top, left, rows, cols)
#app.log.info(top, left, rows, cols, self)
self.layoutHorizontally(self.zOrder, self.separator)
class OptionsRow(ViewWindow):
class ControlElement:
def __init__(self, elementType, name, reference, width=None, sep=" "):
self.type = elementType
self.name = name
self.reference = reference
self.width = width if width is not None else len(name)
self.sep = sep
def __init__(self, program, host):
if app.config.strict_debug:
assert host
ViewWindow.__init__(self, program, host)
self.host = host
self.color = self.program.color.get('top_info')
self.controlList = []
self.group = None
def addElement(self, draw, kind, name, reference, width, sep, extraWidth=0):
if app.config.strict_debug:
assert isinstance(name, unicode)
assert isinstance(sep, unicode)
assert width is None or isinstance(width, int)
assert isinstance(extraWidth, int)
if reference is not None:
assert isinstance(reference, dict)
assert name in reference
if self.group is not None:
self.group.append(len(self.controlList))
element = {
'dict': reference,
'draw': draw,
'name': name,
'sep': sep,
'type': kind,
'width': width if width is not None else len(name) + extraWidth
}
self.controlList.append(element)
return element
def addLabel(self, name, width=None, sep=u" "):
def draw(control):
return control[u'name']
return self.addElement(draw, u'label', name, None, width, sep)
def addSortHeader(self, name, reference, width=None, sep=u" |"):
def draw(control):
decoration = u'v' if control[u'dict'][control[u'name']] else u'^'
if control[u'dict'][control[u'name']] is None:
decoration = u'-'
if control[u'width'] < 0:
return u'%s %s' % (control[u'name'], decoration)
return u'%s %s' % (decoration, control[u'name'])
self.addElement(draw, u'sort', name, reference, width, sep, len(u' v'))
def addSelection(self, name, reference, width=None, sep=u" "):
if app.config.strict_debug:
assert isinstance(name, unicode)
if 1:
toggleOn = u'(*)' + name
toggleOff = u'( )' + name
def draw(control):
return toggleOn if control[u'dict'][control[u'name']] else toggleOff
width = max(width, min(len(toggleOn), len(toggleOff)))
self.addElement(draw, u'selection', name, reference, width, sep,
len(u'(*)'))
def removeThis_addToggle(self, name, reference, width=None, sep=u" "):
if app.config.strict_debug:
assert isinstance(name, unicode)
if 1:
toggleOn = u'[x]' + name
toggleOff = u'[ ]' + name
if 0:
toggleOn = unichr(0x2612) + ' ' + control['name']
toggleOff = unichr(0x2610) + ' ' + control['name']
if 0:
toggleOn = '[+' + control['name'] + ']'
toggleOff = '[-' + control['name'] + ']'
def draw(control):
return toggleOn if control['dict'][control['name']] else toggleOff
width = max(width, min(len(toggleOn), len(toggleOff)))
self.addElement(draw, u'toggle', name, reference, width, sep,
len('[-]'))
def beginGroup(self):
"""Like a radio group, or column sort headers."""
self.group = []
def endGroup(self):
"""Like a radio group, or column sort headers."""
pass
def mouseClick(self, paneRow, paneCol, shift, ctrl, alt):
#row = self.scrollRow + paneRow
col = self.scrollCol + paneCol
offset = 0
for index, control in enumerate(self.controlList):
width = abs(control['width'])
if offset <= col < offset + width:
if control['type'] == 'selection':
name = control['name']
for element in self.group:
elementName = self.controlList[element]['name']
self.controlList[element]['dict'][elementName] = False
control['dict'][name] = True
self.host.controller.optionChanged(name,
control['dict'][name])
break
if control['type'] == 'sort':
name = control['name']
newValue = not control['dict'][name]
if index in self.group:
for element in self.group:
elementName = self.controlList[element]['name']
self.controlList[element]['dict'][
elementName] = None
control['dict'][name] = newValue
self.host.controller.optionChanged(name,
control['dict'][name])
break
if control['type'] == 'toggle':
name = control['name']
control['dict'][name] = not control['dict'][name]
self.host.controller.optionChanged(name,
control['dict'][name])
break
offset += width + len(control['sep'])
def preferredSize(self, rowLimit, colLimit):
return min(rowLimit, 1), colLimit
def render(self):
if self.rows <= 0:
return
line = u''
for control in self.controlList:
label = control['draw'](control)
line += u'%*s%s' % (control['width'], label, control['sep'])
if len(line) >= self.cols:
break
self.writeLineRow = 0
self.writeLine(line[:self.cols], self.color)
class PopupWindow(Window):
def __init__(self, program, host):
if app.config.strict_debug:
assert host
Window.__init__(self, program, host)
self.host = host
self.controller = app.cu_editor.PopupController(self)
self.setTextBuffer(app.text_buffer.TextBuffer(self.program))
self.longestLineLength = 0
self.__message = []
self.showOptions = True
# This will be displayed and should contain the keys that respond to
# user input. This should be updated if you change the controller's
# command set.
self.options = []
def render(self):
"""Display a box of text in the center of the window."""
maxRows, maxCols = self.host.rows, self.host.cols
cols = min(self.longestLineLength + 6, maxCols)
rows = min(len(self.__message) + 4, maxRows)
self.resizeTo(rows, cols)
self.moveTo(maxRows // 2 - rows // 2, maxCols // 2 - cols // 2)
color = self.program.color.get('popup_window')
for row in range(rows):
if row == rows - 2 and self.showOptions:
message = '/'.join(self.options)
elif row == 0 or row >= rows - 3:
self.addStr(row, 0, ' ' * cols, color)
continue
else:
message = self.__message[row - 1]
lineLength = len(message)
spacing1 = (cols - lineLength) // 2
spacing2 = cols - lineLength - spacing1
self.addStr(row, 0, ' ' * spacing1 + message + ' ' * spacing2,
color)
def setMessage(self, message):
"""Sets the Popup window's message to the given message.
message (str): A string that you want to display.
Returns:
None.
"""
self.__message = message.split("\n")
self.longestLineLength = max([len(line) for line in self.__message])
def setOptionsToDisplay(self, options):
"""
This function is used to change the options that are displayed in the
popup window. They will be separated by a '/' character when displayed.
Args:
options (list): A list of possible keys which the user can press and
should be responded to by the controller.
"""
self.options = options
def setTextBuffer(self, textBuffer):
Window.setTextBuffer(self, textBuffer)
self.controller.setTextBuffer(textBuffer)
def unfocus(self):
self.detach()
Window.unfocus(self)
class PaletteWindow(Window):
"""A window with example foreground and background text colors."""
def __init__(self, prg, host):
Window.__init__(self, prg, host)
self.prg = prg
self.resizeTo(16, 16 * 5)
self.moveTo(8, 8)
self.controller = app.cu_editor.PaletteDialogController(self)
self.setTextBuffer(app.text_buffer.TextBuffer(self.program))
def render(self):
width = 16
rows = 16
colorPrefs = self.program.color
for i in range(width):
for k in range(rows):
self.addStr(k, i * 5, ' %3d ' % (i + k * width,),
colorPrefs.get(i + k * width))
def setTextBuffer(self, textBuffer):
Window.setTextBuffer(self, textBuffer)
self.controller.setTextBuffer(textBuffer)
def unfocus(self):
self.detach()
Window.unfocus(self)
class SortableHeaderWindow(OptionsTrinaryStateWindow):
def __init__(self,
program,
parent,
label,
prefCategory,
prefName,
width=None):
if app.config.strict_debug:
assert issubclass(program.__class__,
app.ci_program.CiProgram), program
assert isinstance(label, unicode)
assert isinstance(prefCategory, unicode)
assert isinstance(prefName, unicode)
OptionsTrinaryStateWindow.__init__(self, program, parent, label,
prefCategory, prefName)
self.color = self.program.color.get(u'top_info')
def draw(label, decoration, width):
if width < 0:
x = u'%s %s' % (label, decoration)
else:
x = u'%s %s' % (decoration, label)
return u'%*s' % (width, x)
OptionsTrinaryStateWindow.setUp(self, draw(label, u'v', width),
draw(label, u'^', width),
draw(label, u'-', width))
def deselect(self):
self.controller.clearValue()
def mouseClick(self, paneRow, paneCol, shift, ctrl, alt):
self.parent.childSelected(self)
self.controller.toggleValue()
| 37.155351
| 93
| 0.580463
|
138e9634ecd5186c93f3d38bc18b6552c5997187
| 2,381
|
py
|
Python
|
src/oci/data_safe/models/change_data_safe_private_endpoint_compartment_details.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 249
|
2017-09-11T22:06:05.000Z
|
2022-03-04T17:09:29.000Z
|
src/oci/data_safe/models/change_data_safe_private_endpoint_compartment_details.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 228
|
2017-09-11T23:07:26.000Z
|
2022-03-23T10:58:50.000Z
|
src/oci/data_safe/models/change_data_safe_private_endpoint_compartment_details.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 224
|
2017-09-27T07:32:43.000Z
|
2022-03-25T16:55:42.000Z
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ChangeDataSafePrivateEndpointCompartmentDetails(object):
"""
The details used to change the compartment of a Data Safe private endpoint.
"""
def __init__(self, **kwargs):
"""
Initializes a new ChangeDataSafePrivateEndpointCompartmentDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param compartment_id:
The value to assign to the compartment_id property of this ChangeDataSafePrivateEndpointCompartmentDetails.
:type compartment_id: str
"""
self.swagger_types = {
'compartment_id': 'str'
}
self.attribute_map = {
'compartment_id': 'compartmentId'
}
self._compartment_id = None
@property
def compartment_id(self):
"""
Gets the compartment_id of this ChangeDataSafePrivateEndpointCompartmentDetails.
The OCID of the new compartment.
:return: The compartment_id of this ChangeDataSafePrivateEndpointCompartmentDetails.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this ChangeDataSafePrivateEndpointCompartmentDetails.
The OCID of the new compartment.
:param compartment_id: The compartment_id of this ChangeDataSafePrivateEndpointCompartmentDetails.
:type: str
"""
self._compartment_id = compartment_id
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 33.535211
| 245
| 0.699286
|
19b4799636fe09fb08236a9b05c19a4cad6f42a4
| 683
|
py
|
Python
|
src/com/python/gui/gui_Demo.py
|
Leeo1124/pythonDemo
|
72e2209c095301a3f1f61edfe03ea69c3c05be40
|
[
"Apache-2.0"
] | null | null | null |
src/com/python/gui/gui_Demo.py
|
Leeo1124/pythonDemo
|
72e2209c095301a3f1f61edfe03ea69c3c05be40
|
[
"Apache-2.0"
] | null | null | null |
src/com/python/gui/gui_Demo.py
|
Leeo1124/pythonDemo
|
72e2209c095301a3f1f61edfe03ea69c3c05be40
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on 2016年8月10日
@author: Administrator
'''
from tkinter import *
import tkinter.messagebox as messagebox
class Application(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.pack()
self.createWidgets()
def createWidgets(self):
self.nameInput = Entry(self)
self.nameInput.pack()
self.alertButton = Button(self, text='Hello', command=self.hello)
self.alertButton.pack()
def hello(self):
name = self.nameInput.get() or 'world'
messagebox.showinfo('Message', 'Hello, %s' % name)
app = Application()
# 设置窗口标题:
app.master.title('Hello World')
# 主消息循环:
app.mainloop()
| 23.551724
| 73
| 0.653001
|
c471a28daf797ae1dd79299286cdad3e9be0d4f7
| 4,092
|
py
|
Python
|
networks/mlp_mdn.py
|
wx-b/ibc
|
2c9202e50cfee1abdcd955d3ac1b9d68b5d81e53
|
[
"Apache-2.0"
] | null | null | null |
networks/mlp_mdn.py
|
wx-b/ibc
|
2c9202e50cfee1abdcd955d3ac1b9d68b5d81e53
|
[
"Apache-2.0"
] | null | null | null |
networks/mlp_mdn.py
|
wx-b/ibc
|
2c9202e50cfee1abdcd955d3ac1b9d68b5d81e53
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2021 The Reach ML Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements a tf_agents compatible mlp-mse."""
import gin
from ibc.networks.layers import mlp_dropout
from ibc.networks.layers import resnet
import tensorflow as tf
import tensorflow_probability as tfp
from tf_agents.networks import network
tfd = tfp.distributions
@gin.configurable
class MLPMDN(network.Network):
"""MLP-MDN compatible with tfagents."""
def __init__(self,
obs_spec,
action_spec,
width=512,
depth=2,
rate=0.1,
act_denorm_layer=None,
num_components=1,
training_temperature=2.5,
test_temperature=2.5,
test_variance_exponent=1.,
name='MLPMDN',
layers='MLPDropout'):
super(MLPMDN, self).__init__(
input_tensor_spec=obs_spec, state_spec=(), name=name)
# For inference time, use to denormalize mdn action output.
self._act_denorm_layer = act_denorm_layer
# Define MLP.
hidden_sizes = [width for _ in range(depth)]
dense = tf.keras.layers.Dense
if layers == 'MLPDropout':
self._mlp = mlp_dropout.MLPDropoutLayer(
hidden_sizes, rate, kernel_initializer='normal',
bias_initializer='normal', dense=dense)
elif layers == 'ResNetOrig':
self._mlp = resnet.ResNetOrigLayer(
hidden_sizes, rate, kernel_initializer='normal',
bias_initializer='normal', dense=dense)
elif layers == 'ResNetPreActivation':
self._mlp = resnet.ResNetPreActivationLayer(
hidden_sizes, rate, kernel_initializer='normal',
bias_initializer='normal', dense=dense)
self.num_components = num_components
self.action_size = action_spec.shape[0]
self.mu = tf.keras.layers.Dense(
(self.action_size * num_components),
kernel_initializer='normal',
bias_initializer='normal')
self.logvar = tf.keras.layers.Dense(
(self.action_size * num_components),
kernel_initializer='normal',
bias_initializer='normal')
self.pi = tf.keras.layers.Dense(
num_components,
kernel_initializer='normal',
bias_initializer='normal')
self.training_temp = training_temperature
self.test_temp = test_temperature
self.test_variance_exponent = test_variance_exponent
def call(self, obs, training, step_type=(), network_state=()):
# Combine dict of observations to concatenated tensor. [B x T x obs_spec]
obs = tf.concat(tf.nest.flatten(obs), axis=-1)
# Flatten obs across time: [B x T * obs_spec]
batch_size = tf.shape(obs)[0]
x = tf.reshape(obs, [batch_size, -1])
# Forward mlp.
x = self._mlp(x, training=training)
# Project to params.
mu = self.mu(x)
var = tf.exp(self.logvar(x))
if not training:
var = var**self.test_variance_exponent
pi = self.pi(x)
temp = self.training_temp if training else self.test_temp
pi = pi / temp
# Reshape into MDN distribution.
batch_size = tf.shape(mu)[0]
param_shape = [batch_size, self.num_components, self.action_size]
mu = tf.reshape(mu, param_shape)
var = tf.reshape(var, param_shape)
if not training:
mu = self._act_denorm_layer(mu)
var = self._act_denorm_layer(var, mean_offset=False)
components_distribution = tfd.MultivariateNormalDiag(loc=mu, scale_diag=var)
x = tfd.MixtureSameFamily(
tfd.Categorical(logits=pi), components_distribution)
return x, network_state
| 34.1
| 80
| 0.676442
|
a3060e7327ec042ddd7de204fe267533c303a859
| 3,549
|
py
|
Python
|
mria_py/core/ua.py
|
ElcoK/MRIA
|
ce4fc1665004506c43320f968b1f1c435af5bc59
|
[
"MIT"
] | null | null | null |
mria_py/core/ua.py
|
ElcoK/MRIA
|
ce4fc1665004506c43320f968b1f1c435af5bc59
|
[
"MIT"
] | null | null | null |
mria_py/core/ua.py
|
ElcoK/MRIA
|
ce4fc1665004506c43320f968b1f1c435af5bc59
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
This script will produce an uncertainty analysis around the results. This
should improve the interpretation of the modelling outcomes
@author: Elco Koks
@date: Nov, 2017
"""
import pandas as pd
import itertools
from mria_py.core.create_table import Table
from mria_py.core.base_model import MRIA_IO as MRIA
class ua(object):
def __init__(self, data):
self.countries = data.countries
self.total_countries = len(data.countries)
self.sectors = data.sectors
self.data = data
def run(self,disruption = 1.1,disrupted_ctry= [],disrupted_sctr=[]):
Weights = [0,0.5,1,1.25,1.5,1.75,2,2.25,2.5]
output_UA = pd.DataFrame(columns=['DisWeight','RatWeight','Loss'])
for DisWeight,RatWeight in itertools.combinations(Weights,2):
'''Create model'''
MRIA_RUN = MRIA(self.data.name,self.data.countries,self.data.sectors)
'''Define sets and alias'''
# CREATE SETS
MRIA_RUN.create_sets()
# CREATE ALIAS
MRIA_RUN.create_alias()
''' Define tables and parameters'''
output = pd.DataFrame()
MRIA_RUN.baseline_data(self.data,disruption,disrupted_ctry,disrupted_sctr)
# MRIA_RUN.impact_data(self.data,disruption,disrupted_ctry,disrupted_sctr)
# output['x_in'] = pd.Series(MRIA_RUN.X.get_values())
#
# MRIA_RUN.run_impactmodel(DisWeight=DisWeight,RatWeight=RatWeight)
#
# output['x_out'] = pd.Series(MRIA_RUN.X.get_values())
# output['loss'] = output['x_in'] - output['x_out']
#
# print('A DisWeight of '+str(DisWeight)+' and a RatWeight of '+str(RatWeight)+' gives a loss of '+str(sum(output['loss']))+ ' dollar')
#
# output_UA = output_UA.append({'DisWeight':DisWeight,'RatWeight':RatWeight,'Loss':sum(output['loss'])}, ignore_index=True)
#
# del MRIA_RUN
return output_UA
if __name__ == '__main__':
''' Specify file path '''
filepath = '..\..\input_data\The_Vale.xlsx'
'''Specify which countries should be included in the subset'''
list_countries = ['Elms','Hazel','Montagu','Fogwell','Riverside','Oatlands']
'''Create data input'''
DATA = Table('TheVale',filepath,2010,list_countries)
DATA.prep_data()
'''Create model '''
MRIA_model = MRIA(DATA.name,list_countries,DATA.sectors)
MRIA_model.create_sets(FD_SET=['FinDem'])
MRIA_model.create_alias()
'''Run model and create some output'''
output = pd.DataFrame()
# MRIA_model.run_basemodel()
'''Specify disruption'''
disruption = 1.1
disrupted_ctry = ['Elms']
disrupted_sctr = ['Manu']
MRIA_model.baseline_data(DATA,disruption,disrupted_ctry,disrupted_sctr)
output['x_in'] = pd.Series(MRIA_model.X.get_values())
# MRIA_model.run_basemodel()
MRIA_model.impact_data(DATA,disruption,disrupted_ctry,disrupted_sctr)
MRIA_model.run_impactmodel()
output['x_out'] = pd.Series(MRIA_model.X.get_values())
output['loss'] = output['x_out'] - output['x_in']
MRIA_RUN = MRIA(ua(DATA).data.name,ua(DATA).data.countries,ua(DATA).data.sectors)
MRIA_RUN.create_sets(FD_SET=['FinDem'])
MRIA_RUN.create_alias()
ua(DATA)
MRIA_RUN.baseline_data(ua(DATA).data,disruption,disrupted_ctry,disrupted_sctr)
MRIA_RUN.run_basemodel()
| 30.86087
| 146
| 0.629191
|
4f2c109aaf0a1fafa6ef733f6d5261ee2ce558ec
| 3,705
|
py
|
Python
|
src/catkin_lint/util.py
|
eurogroep/catkin_lint
|
987d8e1378b4963e4d36032b9410e13c7bbab8f3
|
[
"BSD-3-Clause"
] | null | null | null |
src/catkin_lint/util.py
|
eurogroep/catkin_lint
|
987d8e1378b4963e4d36032b9410e13c7bbab8f3
|
[
"BSD-3-Clause"
] | null | null | null |
src/catkin_lint/util.py
|
eurogroep/catkin_lint
|
987d8e1378b4963e4d36032b9410e13c7bbab8f3
|
[
"BSD-3-Clause"
] | null | null | null |
# coding=utf-8
#
# catkin_lint
# Copyright (c) 2013-2020 Fraunhofer FKIE
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Fraunhofer organization nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
import tempfile
def word_split(s):
ws = re.compile(r"(\W|_)+|(?<=[^A-Z])(?=[A-Z])|(?<=\w)(?=[A-Z][a-z])")
mo = ws.search(s)
result = []
while mo:
result.append(s[:mo.start()].lower())
s = s[mo.end():]
mo = ws.search(s)
result.append(s.lower())
return result
try:
from itertools import zip_longest
except ImportError:
def zip_longest(*args):
return map(None, *args)
def write_atomic(filepath, data):
fd, filepath_tmp = tempfile.mkstemp(prefix=os.path.basename(filepath) + ".tmp.", dir=os.path.dirname(filepath))
with os.fdopen(fd, "wb") as f:
f.write(data)
f.close()
try:
os.rename(filepath_tmp, filepath)
except OSError:
try:
os.unlink(filepath)
except OSError:
pass
try:
os.rename(filepath_tmp, filepath)
except OSError:
os.unlink(filepath_tmp)
def is_sorted(lst, key=lambda x, y: x < y):
for i, el in enumerate(lst[1:]):
if key(el, lst[i]):
return False
return True
def is_active_depend(d):
if hasattr(d, "evaluated_condition"):
return d.evaluated_condition
return True
def enumerate_package_files(rootdir, catkin_ignore=True, ignore_dot=True, ignore_unimportant=True):
for dirpath, dirnames, filenames in os.walk(rootdir, topdown=True):
if "CATKIN_IGNORE" in filenames and catkin_ignore:
del dirnames[:]
else:
if ignore_dot:
dirnames[:] = [d for d in dirnames if not d.startswith(".")]
if ignore_unimportant:
dirnames[:] = [d for d in dirnames if "test" not in d.lower() and "example" not in d.lower() and d != "build"]
filenames[:] = [f for f in filenames if "test" not in f.lower() and "example" not in f.lower()]
for filename in filenames:
yield dirpath, filename
# Python 3 compatibility without sacrificing the speed gain of iteritems in Python 2
try:
iteritems = dict.iteritems
except AttributeError:
iteritems = dict.items
| 35.285714
| 126
| 0.676653
|
889d26fdd4bc59a0685fe4bc241a51e9b9845189
| 51,473
|
py
|
Python
|
7.30.1.dev0/ietf/doc/views_review.py
|
kesara/ietf-datatracker
|
dca3ee2ee98bcb75a10687587cf631750be34c79
|
[
"Unlicense"
] | null | null | null |
7.30.1.dev0/ietf/doc/views_review.py
|
kesara/ietf-datatracker
|
dca3ee2ee98bcb75a10687587cf631750be34c79
|
[
"Unlicense"
] | null | null | null |
7.30.1.dev0/ietf/doc/views_review.py
|
kesara/ietf-datatracker
|
dca3ee2ee98bcb75a10687587cf631750be34c79
|
[
"Unlicense"
] | null | null | null |
# Copyright The IETF Trust 2016-2020, All Rights Reserved
# -*- coding: utf-8 -*-
import io
import itertools
import json
import os
import datetime
import requests
import email.utils
from django.utils.http import is_safe_url
from simple_history.utils import update_change_reason
import debug # pyflakes:ignore
from django.http import JsonResponse, Http404, HttpResponse, HttpResponseRedirect
from django.shortcuts import render, get_object_or_404, redirect
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.utils.html import mark_safe # type:ignore
from django.core.exceptions import ValidationError
from django.template.loader import render_to_string, TemplateDoesNotExist
from django.urls import reverse as urlreverse
from ietf.doc.models import (Document, NewRevisionDocEvent, State, DocAlias,
LastCallDocEvent, ReviewRequestDocEvent, ReviewAssignmentDocEvent, DocumentAuthor)
from ietf.name.models import (ReviewRequestStateName, ReviewAssignmentStateName, ReviewResultName,
ReviewTypeName)
from ietf.person.models import Person
from ietf.review.models import ReviewRequest, ReviewAssignment, ReviewWish
from ietf.group.models import Group
from ietf.ietfauth.utils import is_authorized_in_doc_stream, user_is_person, has_role
from ietf.message.models import Message
from ietf.message.utils import infer_message
from ietf.person.fields import PersonEmailChoiceField, SearchablePersonField
from ietf.review.policies import get_reviewer_queue_policy
from ietf.review.utils import (active_review_teams, assign_review_request_to_reviewer,
can_request_review_of_doc, can_manage_review_requests_for_team,
email_review_assignment_change, email_review_request_change,
close_review_request_states,
close_review_request)
from ietf.review import mailarch
from ietf.utils.fields import DatepickerDateField
from ietf.utils.text import strip_prefix, xslugify
from ietf.utils.textupload import get_cleaned_text_file_content
from ietf.utils.mail import send_mail_message
from ietf.mailtrigger.utils import gather_address_lists
from ietf.utils.fields import MultiEmailField
from ietf.utils.response import permission_denied
def clean_doc_revision(doc, rev):
if rev:
rev = rev.rjust(2, "0")
if not NewRevisionDocEvent.objects.filter(doc=doc, rev=rev).exists():
raise forms.ValidationError("Could not find revision \"{}\" of the document.".format(rev))
return rev
class RequestReviewForm(forms.ModelForm):
team = forms.ModelMultipleChoiceField(queryset=Group.objects.all(), widget=forms.CheckboxSelectMultiple)
deadline = DatepickerDateField(date_format="yyyy-mm-dd", picker_settings={ "autoclose": "1", "start-date": "+0d" })
class Meta:
model = ReviewRequest
fields = ('requested_by', 'type', 'deadline', 'requested_rev', 'comment')
def __init__(self, user, doc, *args, **kwargs):
super(RequestReviewForm, self).__init__(*args, **kwargs)
self.doc = doc
f = self.fields["team"]
f.queryset = active_review_teams()
f.initial = [group.pk for group in f.queryset if can_manage_review_requests_for_team(user, group, allow_personnel_outside_team=False)]
self.fields['type'].queryset = self.fields['type'].queryset.filter(used=True, reviewteamsettings__group__in=self.fields["team"].queryset).distinct()
self.fields['type'].widget = forms.RadioSelect(choices=[t for t in self.fields['type'].choices if t[0]])
self.fields["requested_rev"].label = "Document revision"
if has_role(user, "Secretariat"):
self.fields["requested_by"] = SearchablePersonField()
else:
self.fields["requested_by"].widget = forms.HiddenInput()
self.fields["requested_by"].initial = user.person.pk
def clean_deadline(self):
v = self.cleaned_data.get('deadline')
if v < datetime.date.today():
raise forms.ValidationError("Select today or a date in the future.")
return v
def clean_requested_rev(self):
return clean_doc_revision(self.doc, self.cleaned_data.get("requested_rev"))
def clean(self):
chosen_type = self.cleaned_data.get("type")
chosen_teams = self.cleaned_data.get("team")
if chosen_type and chosen_teams:
for t in chosen_teams:
if chosen_type not in t.reviewteamsettings.review_types.all():
self.add_error("type", "{} does not use the review type {}.".format(t.name, chosen_type.name))
return self.cleaned_data
@login_required
def request_review(request, name):
doc = get_object_or_404(Document, name=name)
if not can_request_review_of_doc(request.user, doc):
permission_denied(request, "You do not have permission to perform this action")
now = datetime.datetime.now()
lc_ends = None
e = doc.latest_event(LastCallDocEvent, type="sent_last_call")
if e and e.expires >= now:
lc_ends = e.expires
scheduled_for_telechat = doc.telechat_date()
if request.method == "POST":
form = RequestReviewForm(request.user, doc, request.POST)
if form.is_valid():
teams = form.cleaned_data["team"]
for team in teams:
review_req = form.save(commit=False)
review_req.id = None
review_req.doc = doc
review_req.state = ReviewRequestStateName.objects.get(slug="requested", used=True)
review_req.team = team
review_req.save()
descr = "Requested {} review by {}".format(review_req.type.name,
review_req.team.acronym.upper())
update_change_reason(review_req, descr)
ReviewRequestDocEvent.objects.create(
type="requested_review",
doc=doc,
rev=doc.rev,
by=request.user.person,
desc=descr,
time=review_req.time,
review_request=review_req,
state=None,
)
subject = "%s %s Review requested: %s" % (review_req.team.acronym, review_req.type.name, doc.name)
msg = subject
if review_req.comment:
msg += "\n\n"+review_req.comment
email_review_request_change(request, review_req, subject, msg, by=request.user.person, notify_secretary=True, notify_reviewer=False, notify_requested_by=True)
return redirect('ietf.doc.views_doc.document_main', name=doc.name)
else:
if lc_ends:
review_type = "lc"
deadline = lc_ends.date().isoformat()
elif scheduled_for_telechat:
review_type = "telechat"
deadline = doc.telechat_date()-datetime.timedelta(days=2)
else:
review_type = "early"
deadline = None
form = RequestReviewForm(request.user, doc,
initial={ "type": review_type,
"requested_by": request.user.person,
"deadline": deadline,
})
return render(request, 'doc/review/request_review.html', {
'doc': doc,
'form': form,
'lc_ends': lc_ends,
'lc_ends_days': (lc_ends - now).days if lc_ends else None,
'scheduled_for_telechat': scheduled_for_telechat,
'scheduled_for_telechat_days': (scheduled_for_telechat - now.date()).days if scheduled_for_telechat else None,
})
@login_required
def review_request_forced_login(request, name, request_id):
return redirect(urlreverse("ietf.doc.views_review.review_request", kwargs={ "name": name, "request_id": request_id }))
def review_request(request, name, request_id):
doc = get_object_or_404(Document, name=name)
review_req = get_object_or_404(ReviewRequest, pk=request_id)
if review_req.doc != doc:
raise Http404('The indicated ReviewRequest is not a request for the indicated document')
can_manage_request = can_manage_review_requests_for_team(request.user, review_req.team)
can_close_request = (review_req.state_id in ["requested", "assigned"]
and (is_authorized_in_doc_stream(request.user, doc)
or can_manage_request))
can_assign_reviewer = (review_req.state_id in ["requested", "assigned"]
and can_manage_request)
can_edit_comment = can_request_review_of_doc(request.user, doc)
can_edit_deadline = can_edit_comment
assignments = review_req.reviewassignment_set.all()
for assignment in assignments:
assignment.is_reviewer = user_is_person(request.user, assignment.reviewer.person)
assignment.can_accept_reviewer_assignment = (assignment.state_id == "assigned"
and (assignment.is_reviewer or can_manage_request))
assignment.can_reject_reviewer_assignment = (assignment.state_id in ["assigned", "accepted"]
and (assignment.is_reviewer or can_manage_request))
assignment.can_complete_review = (assignment.state_id in ["assigned", "accepted", "overtaken", "no-response", "part-completed", "completed"]
and (assignment.is_reviewer or can_manage_request))
# This implementation means if a reviewer accepts one assignment for a review_request, he accepts all assigned to him (for that request)
# This problematic - it's a bug (probably) for the same person to have more than one assignment for the same request.
# It is, however unintuitive, and acceptance should be refactored to be something that works on assignments, not requests
if request.method == "POST" and request.POST.get("action") == "accept":
for assignment in assignments:
if assignment.can_accept_reviewer_assignment:
assignment.state = ReviewAssignmentStateName.objects.get(slug="accepted")
assignment.save()
update_change_reason(assignment, 'Assignment for {} accepted'.format(assignment.reviewer.person))
return redirect(review_request, name=review_req.doc.name, request_id=review_req.pk)
wg_chairs = None
if review_req.doc.group:
wg_chairs = [role.person for role in review_req.doc.group.role_set.filter(name__slug='chair')]
history = list(review_req.history.all())
history += itertools.chain(*[list(r.history.all()) for r in review_req.reviewassignment_set.all()])
history.sort(key=lambda h: h.history_date, reverse=True)
return render(request, 'doc/review/review_request.html', {
'doc': doc,
'review_req': review_req,
'can_close_request': can_close_request,
'can_assign_reviewer': can_assign_reviewer,
'can_edit_comment': can_edit_comment,
'can_edit_deadline': can_edit_deadline,
'assignments': assignments,
'wg_chairs': wg_chairs,
'history': history,
})
class CloseReviewRequestForm(forms.Form):
close_reason = forms.ModelChoiceField(queryset=close_review_request_states(), widget=forms.RadioSelect, empty_label=None)
close_comment = forms.CharField(label='Comment (optional)', max_length=255, required=False)
def __init__(self, can_manage_request, *args, **kwargs):
super(CloseReviewRequestForm, self).__init__(*args, **kwargs)
if not can_manage_request:
self.fields["close_reason"].queryset = self.fields["close_reason"].queryset.filter(slug__in=["withdrawn"])
if len(self.fields["close_reason"].queryset) == 1:
self.fields["close_reason"].initial = self.fields["close_reason"].queryset.first().pk
self.fields["close_reason"].widget = forms.HiddenInput()
@login_required
def close_request(request, name, request_id):
doc = get_object_or_404(Document, name=name)
review_req = get_object_or_404(ReviewRequest, pk=request_id, state__in=["requested", "assigned"])
can_request = is_authorized_in_doc_stream(request.user, doc)
can_manage_request = can_manage_review_requests_for_team(request.user, review_req.team)
if not (can_request or can_manage_request):
permission_denied(request, "You do not have permission to perform this action")
if request.method == "POST":
form = CloseReviewRequestForm(can_manage_request, request.POST)
if form.is_valid():
close_review_request(request, review_req,form.cleaned_data["close_reason"],
form.cleaned_data["close_comment"])
return redirect(review_request, name=review_req.doc.name, request_id=review_req.pk)
else:
form = CloseReviewRequestForm(can_manage_request)
return render(request, 'doc/review/close_request.html', {
'doc': doc,
'review_req': review_req,
'assignments': review_req.reviewassignment_set.all(),
'form': form,
})
class AssignReviewerForm(forms.Form):
reviewer = PersonEmailChoiceField(label="Assign Additional Reviewer", empty_label="(None)")
add_skip = forms.BooleanField(label='Skip next time', required=False)
def __init__(self, review_req, *args, **kwargs):
super(AssignReviewerForm, self).__init__(*args, **kwargs)
get_reviewer_queue_policy(review_req.team).setup_reviewer_field(self.fields["reviewer"], review_req)
@login_required
def assign_reviewer(request, name, request_id):
doc = get_object_or_404(Document, name=name)
review_req = get_object_or_404(ReviewRequest, pk=request_id, state__in=["requested", "assigned"])
if not can_manage_review_requests_for_team(request.user, review_req.team):
permission_denied(request, "You do not have permission to perform this action")
if request.method == "POST" and request.POST.get("action") == "assign":
form = AssignReviewerForm(review_req, request.POST)
if form.is_valid():
reviewer = form.cleaned_data["reviewer"]
add_skip = form.cleaned_data["add_skip"]
assign_review_request_to_reviewer(request, review_req, reviewer, add_skip)
return redirect(review_request, name=review_req.doc.name, request_id=review_req.pk)
else:
form = AssignReviewerForm(review_req)
return render(request, 'doc/review/assign_reviewer.html', {
'doc': doc,
'review_req': review_req,
'assignments': review_req.reviewassignment_set.all(),
'form': form,
})
class RejectReviewerAssignmentForm(forms.Form):
message_to_secretary = forms.CharField(widget=forms.Textarea, required=False, help_text="Optional explanation of rejection, will be emailed to team secretary if filled in", strip=False)
@login_required
def reject_reviewer_assignment(request, name, assignment_id):
doc = get_object_or_404(Document, name=name)
review_assignment = get_object_or_404(ReviewAssignment, pk=assignment_id, state__in=["assigned", "accepted"])
review_request_past_deadline = review_assignment.review_request.deadline < datetime.date.today()
if not review_assignment.reviewer:
return redirect(review_request, name=review_assignment.review_request.doc.name, request_id=review_assignment.review_request.pk)
is_reviewer = user_is_person(request.user, review_assignment.reviewer.person)
can_manage_request = can_manage_review_requests_for_team(request.user, review_assignment.review_request.team)
if not (is_reviewer or can_manage_request):
permission_denied(request, "You do not have permission to perform this action")
if request.method == "POST" and request.POST.get("action") == "reject" and not review_request_past_deadline:
form = RejectReviewerAssignmentForm(request.POST)
if form.is_valid():
# reject the assignment
review_assignment.state = ReviewAssignmentStateName.objects.get(slug="rejected")
review_assignment.completed_on = datetime.datetime.now()
review_assignment.save()
descr = "Assignment of request for {} review by {} to {} was rejected".format(
review_assignment.review_request.type.name,
review_assignment.review_request.team.acronym.upper(),
review_assignment.reviewer.person
)
update_change_reason(review_assignment, descr)
ReviewAssignmentDocEvent.objects.create(
type="closed_review_assignment",
doc=review_assignment.review_request.doc,
rev=review_assignment.review_request.doc.rev,
by=request.user.person,
desc=descr,
review_assignment=review_assignment,
state=review_assignment.state,
)
policy = get_reviewer_queue_policy(review_assignment.review_request.team)
policy.return_reviewer_to_rotation_top(review_assignment.reviewer.person)
msg = render_to_string("review/reviewer_assignment_rejected.txt", {
"by": request.user.person,
"message_to_secretary": form.cleaned_data.get("message_to_secretary")
})
email_review_assignment_change(request, review_assignment, "Reviewer assignment rejected", msg, by=request.user.person, notify_secretary=True, notify_reviewer=True, notify_requested_by=False)
return redirect(review_request, name=review_assignment.review_request.doc.name, request_id=review_assignment.review_request.pk)
else:
form = RejectReviewerAssignmentForm()
return render(request, 'doc/review/reject_reviewer_assignment.html', {
'doc': doc,
'review_req': review_assignment.review_request,
'assignments': review_assignment.review_request.reviewassignment_set.all(),
'form': form,
'review_request_past_deadline': review_request_past_deadline,
})
@login_required
def withdraw_reviewer_assignment(request, name, assignment_id):
get_object_or_404(Document, name=name)
review_assignment = get_object_or_404(ReviewAssignment, pk=assignment_id, state__in=["assigned", "accepted"])
can_manage_request = can_manage_review_requests_for_team(request.user, review_assignment.review_request.team)
if not can_manage_request:
permission_denied(request, "You do not have permission to perform this action")
if request.method == "POST" and request.POST.get("action") == "withdraw":
review_assignment.state_id = 'withdrawn'
review_assignment.save()
descr = "Assignment of request for {} review by {} to {} was withdrawn".format(
review_assignment.review_request.type.name,
review_assignment.review_request.team.acronym.upper(),
review_assignment.reviewer.person, )
update_change_reason(review_assignment, descr)
ReviewAssignmentDocEvent.objects.create(
type="closed_review_assignment",
doc=review_assignment.review_request.doc,
rev=review_assignment.review_request.doc.rev,
by=request.user.person,
desc=descr,
review_assignment=review_assignment,
state=review_assignment.state,
)
policy = get_reviewer_queue_policy(review_assignment.review_request.team)
policy.return_reviewer_to_rotation_top(review_assignment.reviewer.person)
msg = "Review assignment withdrawn by %s"%request.user.person
email_review_assignment_change(request, review_assignment, "Reviewer assignment withdrawn", msg, by=request.user.person, notify_secretary=True, notify_reviewer=True, notify_requested_by=False)
return redirect(review_request, name=review_assignment.review_request.doc.name, request_id=review_assignment.review_request.pk)
return render(request, 'doc/review/withdraw_reviewer_assignment.html', {
'assignment': review_assignment,
})
@login_required
def mark_reviewer_assignment_no_response(request, name, assignment_id):
get_object_or_404(Document, name=name)
review_assignment = get_object_or_404(ReviewAssignment, pk=assignment_id, state__in=["assigned", "accepted"])
can_manage_request = can_manage_review_requests_for_team(request.user, review_assignment.review_request.team)
if not can_manage_request:
permission_denied(request, "You do not have permission to perform this action")
if request.method == "POST" and request.POST.get("action") == "noresponse":
review_assignment.state_id = 'no-response'
review_assignment.save()
descr = "Assignment of request for {} review by {} to {} was marked no-response".format(
review_assignment.review_request.type.name,
review_assignment.review_request.team.acronym.upper(),
review_assignment.reviewer.person)
update_change_reason(review_assignment, descr)
ReviewAssignmentDocEvent.objects.create(
type="closed_review_assignment",
doc=review_assignment.review_request.doc,
rev=review_assignment.review_request.doc.rev,
by=request.user.person,
desc=descr,
review_assignment=review_assignment,
state=review_assignment.state,
)
msg = "Review assignment marked 'No Response' by %s"%request.user.person
email_review_assignment_change(request, review_assignment, "Reviewer assignment marked no-response", msg, by=request.user.person, notify_secretary=True, notify_reviewer=True, notify_requested_by=False)
return redirect(review_request, name=review_assignment.review_request.doc.name, request_id=review_assignment.review_request.pk)
return render(request, 'doc/review/mark_reviewer_assignment_no_response.html', {
'assignment': review_assignment,
})
class SubmitUnsolicitedReviewTeamChoiceForm(forms.Form):
team = forms.ModelChoiceField(queryset=Group.objects.filter(reviewteamsettings__isnull=False), widget=forms.RadioSelect, empty_label=None)
def __init__(self, user, *args, **kwargs):
super(SubmitUnsolicitedReviewTeamChoiceForm, self).__init__(*args, **kwargs)
self.fields['team'].queryset = self.fields['team'].queryset.filter(role__person__user=user, role__name='secr')
@login_required()
def submit_unsolicited_review_choose_team(request, name):
"""
If a user is submitting an unsolicited review, and is allowed to do this for more
than one team, they are routed through this small view to pick a team.
This is needed as the complete review form needs to be specific for a team.
This view only produces a redirect, so it's open for any user.
"""
doc = get_object_or_404(Document, name=name)
if request.method == "POST":
form = SubmitUnsolicitedReviewTeamChoiceForm(request.user, request.POST)
if form.is_valid():
return redirect("ietf.doc.views_review.complete_review",
name=doc.name, acronym=form.cleaned_data['team'].acronym)
else:
form = SubmitUnsolicitedReviewTeamChoiceForm(user=request.user)
return render(request, 'doc/review/submit_unsolicited_review.html', {
'doc': doc,
'form': form,
})
class CompleteReviewForm(forms.Form):
state = forms.ModelChoiceField(queryset=ReviewAssignmentStateName.objects.filter(slug__in=("completed", "part-completed")).order_by("-order"), widget=forms.RadioSelect, initial="completed")
reviewed_rev = forms.CharField(label="Reviewed revision", max_length=4)
result = forms.ModelChoiceField(queryset=ReviewResultName.objects.filter(used=True), widget=forms.RadioSelect, empty_label=None)
review_type = forms.ModelChoiceField(queryset=ReviewTypeName.objects.filter(used=True), widget=forms.RadioSelect, empty_label=None)
reviewer = forms.ModelChoiceField(queryset=Person.objects.all(), widget=forms.Select)
ACTIONS = [
("enter", "Enter review content (automatically posts to {mailing_list})"),
("upload", "Upload review content in text file (automatically posts to {mailing_list})"),
("link", "Link to review message already sent to {mailing_list}"),
]
review_submission = forms.ChoiceField(choices=ACTIONS, widget=forms.RadioSelect)
review_url = forms.URLField(label="Link to message", required=False)
review_file = forms.FileField(label="Text file to upload", required=False)
review_content = forms.CharField(widget=forms.Textarea, required=False, strip=False)
completion_date = DatepickerDateField(date_format="yyyy-mm-dd", picker_settings={ "autoclose": "1" }, initial=datetime.date.today, help_text="Date of announcement of the results of this review")
completion_time = forms.TimeField(widget=forms.HiddenInput, initial=datetime.time.min)
cc = MultiEmailField(required=False, help_text="Email addresses to send to in addition to the review team list")
email_ad = forms.BooleanField(label="Send extra email to the responsible AD suggesting early attention", required=False)
def __init__(self, assignment, doc, team, is_reviewer, *args, **kwargs):
self.assignment = assignment
self.doc = doc
super(CompleteReviewForm, self).__init__(*args, **kwargs)
known_revisions = NewRevisionDocEvent.objects.filter(doc=doc).order_by("time", "id").values_list("rev", "time", flat=False)
revising_review = assignment.state_id not in ["assigned", "accepted"] if assignment else False
if not is_reviewer:
new_field_order = ['review_submission', 'review_url', 'review_file', 'review_content']
new_field_order += [f for f in self.fields.keys() if f not in new_field_order]
self.order_fields(new_field_order)
if not revising_review:
self.fields["state"].choices = [
(slug, "{} - extra reviewer is to be assigned".format(label)) if slug == "part-completed" else (slug, label)
for slug, label in self.fields["state"].choices
]
if 'initial' in kwargs and assignment:
reviewed_rev_class = []
for r in known_revisions:
last_version = r[0]
if r[1] < assignment.review_request.time:
kwargs["initial"]["reviewed_rev"] = r[0]
reviewed_rev_class.append('reviewer-doc-past')
else:
reviewed_rev_class.append('reviewer-doc-ok')
# After this the ones in future are marked with green, but we
# want also to mark the oldest one before the review was assigned
# so shift list one step.
reviewed_rev_class.pop(0)
reviewed_rev_class.append('reviewer-doc-ok')
# If it is users own review, then default to latest version
if is_reviewer:
kwargs["initial"]["reviewed_rev"] = last_version
self.fields["reviewed_rev"].help_text = mark_safe(
" ".join("<a class=\"rev label label-default {0}\" title=\"{2:%Y-%m-%d}\">{1}</a>".format(reviewed_rev_class[i], *r)
for i, r in enumerate(known_revisions)))
else:
self.fields["reviewed_rev"].help_text = mark_safe(
" ".join("<a class=\"rev label label-default {0}\" title=\"{2:%Y-%m-%d}\">{1}</a>".format('', *r)
for i, r in enumerate(known_revisions)))
self.fields["result"].queryset = self.fields["result"].queryset.filter(reviewteamsettings_review_results_set__group=team)
def format_submission_choice(label):
if revising_review:
label = label.replace(" (automatically posts to {mailing_list})", "")
return label.format(mailing_list=team.list_email or "[error: team has no mailing list set]")
if assignment:
del self.fields["review_type"]
del self.fields["reviewer"]
else:
self.fields["review_type"].queryset = self.fields["review_type"].queryset.filter(
reviewteamsettings__group=team)
self.fields["reviewer"].queryset = self.fields["reviewer"].queryset.filter(role__name="reviewer", role__group=team)
self.fields["review_submission"].choices = [ (k, format_submission_choice(label)) for k, label in self.fields["review_submission"].choices]
if revising_review:
del self.fields["cc"]
elif is_reviewer:
del self.fields["completion_date"]
del self.fields["completion_time"]
def clean_reviewed_rev(self):
return clean_doc_revision(self.doc, self.cleaned_data.get("reviewed_rev"))
def clean_review_content(self):
return self.cleaned_data["review_content"].replace("\r", "")
def clean_review_file(self):
return get_cleaned_text_file_content(self.cleaned_data["review_file"])
def clean_review_url(self):
url = self.cleaned_data['review_url']
#scheme, netloc, path, parameters, query, fragment = urlparse(url)
if url:
r = requests.get(url)
if r.status_code != 200:
raise forms.ValidationError("Trying to retrieve the URL resulted in status code %s: %s. Please provide an URL that can be retrieved." % (r.status_code, r.reason))
return url
def clean(self):
if self.assignment and "@" in self.assignment.reviewer.person.ascii:
raise forms.ValidationError("Reviewer name must be filled in (the ASCII version is currently \"{}\" - since it contains an @ sign the name is probably still the original email address).".format(self.review_req.reviewer.person.ascii))
def require_field(f):
if not self.cleaned_data.get(f):
self.add_error(f, ValidationError("You must fill in this field."))
submission_method = self.cleaned_data.get("review_submission")
if submission_method == "enter":
require_field("review_content")
elif submission_method == "upload":
require_field("review_file")
elif submission_method == "link":
require_field("review_url")
@login_required
def complete_review(request, name, assignment_id=None, acronym=None):
doc = get_object_or_404(Document, name=name)
if assignment_id:
assignment = get_object_or_404(ReviewAssignment, pk=assignment_id)
revising_review = assignment.state_id not in ["assigned", "accepted"]
is_reviewer = user_is_person(request.user, assignment.reviewer.person)
can_manage_request = can_manage_review_requests_for_team(request.user, assignment.review_request.team)
if not (is_reviewer or can_manage_request):
permission_denied(request, "You do not have permission to perform this action")
team = assignment.review_request.team
team_acronym = assignment.review_request.team.acronym.lower()
request_type = assignment.review_request.type
reviewer = assignment.reviewer
mailtrigger_slug = 'review_completed_{}_{}'.format(team_acronym, request_type.slug)
# Description is only used if the mailtrigger does not exist yet.
mailtrigger_desc = 'Recipients when a {} {} review is completed'.format(team_acronym, request_type)
to, cc = gather_address_lists(
mailtrigger_slug,
create_from_slug_if_not_exists='review_completed',
desc_if_not_exists=mailtrigger_desc,
review_req=assignment.review_request
)
else:
team = get_object_or_404(Group, acronym=acronym)
if not can_manage_review_requests_for_team(request.user, team):
permission_denied(request, "You do not have permission to perform this action")
assignment = None
is_reviewer = False
revising_review = False
request_type = None
to, cc = [], []
if request.method == "POST":
form = CompleteReviewForm(assignment, doc, team, is_reviewer,
request.POST, request.FILES)
if form.is_valid():
review_submission = form.cleaned_data['review_submission']
if not assignment:
request_type = form.cleaned_data['review_type']
reviewer = form.cleaned_data['reviewer'].role_email('reviewer',group=team)
if assignment and assignment.review:
review = assignment.review
else:
# create review doc
name_components = [
"review",
strip_prefix(doc.name, "draft-"),
form.cleaned_data["reviewed_rev"],
team.acronym,
request_type.slug,
xslugify(reviewer.person.ascii_parts()[3]),
datetime.date.today().isoformat(),
]
review_name = "-".join(c for c in name_components if c).lower()
if not Document.objects.filter(name=review_name).exists():
review = Document.objects.create(name=review_name,type_id='review',group=team)
DocAlias.objects.create(name=review_name).docs.add(review)
else:
messages.warning(request, message='Attempt to save review failed: review document already exists. This most likely occurred because the review was submitted twice in quick succession. If you intended to submit a new review, rather than update an existing one, things are probably OK. Please verify that the shown review is what you expected.')
return redirect("ietf.doc.views_doc.document_main", name=review_name)
if not assignment:
# If this is an unsolicited review, create a new request and assignment.
# The assignment will be immediately closed after, sharing the usual
# processes for regular assigned reviews.
review_request = ReviewRequest.objects.create(
state_id='assigned',
type=form.cleaned_data['review_type'],
doc=doc,
team=team,
deadline=datetime.date.today(),
requested_by=Person.objects.get(user=request.user),
requested_rev=form.cleaned_data['reviewed_rev'],
)
assignment = ReviewAssignment.objects.create(
review_request=review_request,
state_id='assigned',
reviewer=form.cleaned_data['reviewer'].role_email('reviewer', group=team),
assigned_on=datetime.datetime.now(),
review = review,
)
review.rev = "00" if not review.rev else "{:02}".format(int(review.rev) + 1)
review.title = "{} Review of {}-{}".format(assignment.review_request.type.name, assignment.review_request.doc.name, form.cleaned_data["reviewed_rev"])
review.time = datetime.datetime.now()
if review_submission == "link":
review.external_url = form.cleaned_data['review_url']
e = NewRevisionDocEvent.objects.create(
type="new_revision",
doc=review,
by=request.user.person,
rev=review.rev,
desc='New revision available',
time=review.time,
)
review.set_state(State.objects.get(type="review", slug="active"))
review.save_with_history([e])
# save file on disk
if review_submission == "upload":
content = form.cleaned_data['review_file']
else:
content = form.cleaned_data['review_content']
filename = os.path.join(review.get_file_path(), '{}.txt'.format(review.name))
with io.open(filename, 'w', encoding='utf-8') as destination:
destination.write(content)
completion_datetime = datetime.datetime.now()
if "completion_date" in form.cleaned_data:
completion_datetime = datetime.datetime.combine(form.cleaned_data["completion_date"], form.cleaned_data.get("completion_time") or datetime.time.min)
# complete assignment
assignment.state = form.cleaned_data["state"]
assignment.reviewed_rev = form.cleaned_data["reviewed_rev"]
assignment.result = form.cleaned_data["result"]
assignment.review = review
assignment.completed_on = completion_datetime
assignment.save()
need_to_email_review = review_submission != "link" and assignment.review_request.team.list_email and not revising_review
submitted_on_different_date = completion_datetime.date() != datetime.date.today()
desc = "Request for {} review by {} {}: {}. Reviewer: {}.".format(
assignment.review_request.type.name,
assignment.review_request.team.acronym.upper(),
assignment.state.name,
assignment.result.name,
assignment.reviewer.person,
)
update_change_reason(assignment, desc)
if need_to_email_review:
desc += " " + "Sent review to list."
if revising_review:
desc += " Review has been revised by {}.".format(request.user.person)
elif submitted_on_different_date:
desc += " Submission of review completed at an earlier date."
close_event = ReviewAssignmentDocEvent(type="closed_review_assignment", review_assignment=assignment)
close_event.doc = assignment.review_request.doc
close_event.rev = assignment.review_request.doc.rev
close_event.by = request.user.person
close_event.desc = desc
close_event.state = assignment.state
close_event.time = datetime.datetime.now()
close_event.save()
# If the completion date is different, record when the initial review was made too.
if not revising_review and submitted_on_different_date:
desc = "Request for {} review by {} {}: {}. Reviewer: {}.".format(
assignment.review_request.type.name,
assignment.review_request.team.acronym.upper(),
assignment.state.name,
assignment.result.name,
assignment.reviewer.person,
)
initial_close_event = ReviewAssignmentDocEvent(type="closed_review_assignment",
review_assignment=assignment)
initial_close_event.doc = assignment.review_request.doc
initial_close_event.rev = assignment.review_request.doc.rev
initial_close_event.by = request.user.person
initial_close_event.desc = desc
initial_close_event.state = assignment.state
initial_close_event.time = completion_datetime
initial_close_event.save()
if assignment.state_id == "part-completed" and not revising_review:
existing_assignments = ReviewAssignment.objects.filter(review_request__doc=assignment.review_request.doc, review_request__team=assignment.review_request.team, state__in=("assigned", "accepted", "completed"))
subject = "Review of {}-{} completed partially".format(assignment.review_request.doc.name, assignment.reviewed_rev)
msg = render_to_string("review/partially_completed_review.txt", {
"existing_assignments": existing_assignments,
"by": request.user.person,
})
email_review_assignment_change(request, assignment, subject, msg, request.user.person, notify_secretary=True, notify_reviewer=False, notify_requested_by=False)
role = request.user.person.role_set.filter(group=assignment.review_request.team,name='reviewer').first()
if role and role.email.active:
author_email = role.email
frm = role.formatted_email()
else:
author_email = request.user.person.email()
frm = request.user.person.formatted_email()
author, created = DocumentAuthor.objects.get_or_create(document=review, email=author_email, person=request.user.person)
if need_to_email_review:
# email the review
subject = "{} {} {} of {}-{}".format(assignment.review_request.team.acronym.capitalize(),assignment.review_request.type.name.lower(),"partial review" if assignment.state_id == "part-completed" else "review", assignment.review_request.doc.name, assignment.reviewed_rev)
related_groups = [ assignment.review_request.team, ]
if assignment.review_request.doc.group:
related_groups.append(assignment.review_request.doc.group)
cc = form.cleaned_data["cc"]
msg = Message.objects.create(
by=request.user.person,
subject=subject,
frm=frm,
to=", ".join(to),
cc=", ".join(cc),
body = render_to_string("review/completed_review.txt", {
"assignment": assignment,
"content": content,
}),
)
msg.related_groups.add(*related_groups)
msg.related_docs.add(assignment.review_request.doc)
msg = send_mail_message(request, msg)
list_name = mailarch.list_name_from_email(assignment.review_request.team.list_email)
if list_name:
review.external_url = mailarch.construct_message_url(list_name, email.utils.unquote(msg["Message-ID"].strip()))
review.save_with_history([close_event])
if form.cleaned_data['email_ad'] or assignment.result in assignment.review_request.team.reviewteamsettings.notify_ad_when.all():
(to, cc) = gather_address_lists('review_notify_ad',review_req = assignment.review_request).as_strings()
msg_txt = render_to_string("review/notify_ad.txt", {
"to": to,
"cc": cc,
"assignment": assignment,
"settings": settings,
"explicit_request": form.cleaned_data['email_ad'],
})
msg = infer_message(msg_txt)
msg.by = request.user.person
msg.save()
send_mail_message(request, msg)
return redirect("ietf.doc.views_doc.document_main", name=assignment.review.name)
else:
initial={
"reviewed_rev": assignment.reviewed_rev if assignment else None,
"result": assignment.result_id if assignment else None,
"cc": ", ".join(cc),
}
try:
initial['review_content'] = render_to_string('/group/%s/review/content_templates/%s.txt' % (assignment.review_request.team.acronym,
request_type.slug), {'assignment':assignment, 'today':datetime.date.today()})
except (TemplateDoesNotExist, AttributeError):
pass
form = CompleteReviewForm(assignment, doc, team, is_reviewer, initial=initial)
mail_archive_query_urls = mailarch.construct_query_urls(doc, team)
return render(request, 'doc/review/complete_review.html', {
'doc': doc,
'team': team,
'assignment': assignment,
'form': form,
'mail_archive_query_urls': mail_archive_query_urls,
'revising_review': revising_review,
'review_to': to,
'review_cc': cc,
'is_reviewer': is_reviewer,
})
def search_mail_archive(request, name, acronym=None, assignment_id=None):
if assignment_id:
assignment = get_object_or_404(ReviewAssignment, pk=assignment_id)
team = assignment.review_request.team
else:
assignment = None
team = get_object_or_404(Group, acronym=acronym)
doc = get_object_or_404(Document, name=name)
is_reviewer = assignment and user_is_person(request.user, assignment.reviewer.person)
can_manage_request = can_manage_review_requests_for_team(request.user, team)
if not (is_reviewer or can_manage_request):
permission_denied(request, "You do not have permission to perform this action")
res = mailarch.construct_query_urls(doc, team, query=request.GET.get("query"))
if not res:
return JsonResponse({ "error": "Couldn't do lookup in mail archive - don't know where to look"})
MAX_RESULTS = 30
try:
res["messages"] = mailarch.retrieve_messages(res["query_data_url"])[:MAX_RESULTS]
for message in res["messages"]:
try:
revision_guess = message["subject"].split(name)[1].split('-')[1]
message["revision_guess"] = revision_guess if revision_guess.isnumeric() else None
except IndexError:
pass
except KeyError as e:
res["error"] = "No results found (%s)" % str(e)
except Exception as e:
res["error"] = "Retrieval from mail archive failed: %s" % str(e)
# raise # useful when debugging
return JsonResponse(res)
class EditReviewRequestCommentForm(forms.ModelForm):
comment = forms.CharField(widget=forms.Textarea, strip=False)
class Meta:
fields = ['comment',]
model = ReviewRequest
def edit_comment(request, name, request_id):
review_req = get_object_or_404(ReviewRequest, pk=request_id)
if not can_request_review_of_doc(request.user, review_req.doc):
permission_denied(request, "You do not have permission to perform this action")
if request.method == "POST":
form = EditReviewRequestCommentForm(request.POST, instance=review_req)
if form.is_valid():
form.save()
return redirect(review_request, name=review_req.doc.name, request_id=review_req.pk)
else:
form = EditReviewRequestCommentForm(instance=review_req)
return render(request, 'doc/review/edit_request_comment.html', {
'review_req': review_req,
'form' : form,
})
class EditReviewRequestDeadlineForm(forms.ModelForm):
deadline = DatepickerDateField(date_format="yyyy-mm-dd", picker_settings={ "autoclose": "1", "start-date": "+0d" })
class Meta:
fields = ['deadline',]
model = ReviewRequest
def clean_deadline(self):
v = self.cleaned_data.get('deadline')
if v < datetime.date.today():
raise forms.ValidationError("Select today or a date in the future.")
return v
def edit_deadline(request, name, request_id):
review_req = get_object_or_404(ReviewRequest, pk=request_id)
if not can_request_review_of_doc(request.user, review_req.doc):
permission_denied(request, "You do not have permission to perform this action")
old_deadline = review_req.deadline
if request.method == "POST":
form = EditReviewRequestDeadlineForm(request.POST, instance=review_req)
if form.is_valid():
if form.cleaned_data['deadline'] != old_deadline:
form.save()
subject = "Deadline changed: {} {} review of {}-{}".format(review_req.team.acronym.capitalize(),review_req.type.name.lower(), review_req.doc.name, review_req.requested_rev)
msg = render_to_string("review/deadline_changed.txt", {
"review_req": review_req,
"old_deadline": old_deadline,
"by": request.user.person,
})
email_review_request_change(request, review_req, subject, msg, request.user.person, notify_secretary=True, notify_reviewer=True, notify_requested_by=True)
return redirect(review_request, name=review_req.doc.name, request_id=review_req.pk)
else:
form = EditReviewRequestDeadlineForm(instance=review_req)
return render(request, 'doc/review/edit_request_deadline.html', {
'review_req': review_req,
'form' : form,
})
class ReviewWishAddForm(forms.Form):
team = forms.ModelChoiceField(queryset=Group.objects.filter(reviewteamsettings__isnull=False),
widget=forms.RadioSelect, empty_label=None, required=True)
def __init__(self, user, doc, *args, **kwargs):
super(ReviewWishAddForm, self).__init__(*args, **kwargs)
self.person = get_object_or_404(Person, user=user)
self.doc = doc
self.fields['team'].queryset = self.fields['team'].queryset.filter(role__person=self.person,
role__name='reviewer')
if len(self.fields['team'].queryset) == 1:
self.team = self.fields['team'].queryset.get()
del self.fields['team']
def save(self):
team = self.team if hasattr(self, 'team') else self.cleaned_data['team']
ReviewWish.objects.get_or_create(person=self.person, team=team, doc=self.doc)
@login_required
def review_wish_add(request, name):
doc = get_object_or_404(Document, docalias__name=name)
if request.method == "POST":
form = ReviewWishAddForm(request.user, doc, request.POST)
if form.is_valid():
form.save()
return _generate_ajax_or_redirect_response(request, doc)
else:
form = ReviewWishAddForm(request.user, doc)
return render(request, "doc/review/review_wish_add.html", {
"doc": doc,
"form": form,
})
@login_required
def review_wishes_remove(request, name):
doc = get_object_or_404(Document, docalias__name=name)
person = get_object_or_404(Person, user=request.user)
if request.method == "POST":
ReviewWish.objects.filter(person=person, doc=doc).delete()
return _generate_ajax_or_redirect_response(request, doc)
return render(request, "doc/review/review_wishes_remove.html", {
"name": doc.name,
})
def _generate_ajax_or_redirect_response(request, doc):
redirect_url = request.GET.get('next')
url_is_safe = is_safe_url(url=redirect_url, allowed_hosts=request.get_host(),
require_https=request.is_secure())
if request.is_ajax():
return HttpResponse(json.dumps({'success': True}), content_type='application/json')
elif url_is_safe:
return HttpResponseRedirect(redirect_url)
else:
return HttpResponseRedirect(doc.get_absolute_url())
| 47.971109
| 363
| 0.657354
|
6cd229c98dffcfd0409a025d1a0767768b3d190c
| 469
|
py
|
Python
|
repl.py
|
rcarmo/newsfeed-corpus
|
b1f649e78b3c115c8f66b298fd72cdc8605cff4c
|
[
"MIT"
] | 21
|
2016-11-22T12:15:37.000Z
|
2021-09-23T18:20:32.000Z
|
repl.py
|
rcarmo/newsfeed-corpus
|
b1f649e78b3c115c8f66b298fd72cdc8605cff4c
|
[
"MIT"
] | null | null | null |
repl.py
|
rcarmo/newsfeed-corpus
|
b1f649e78b3c115c8f66b298fd72cdc8605cff4c
|
[
"MIT"
] | 2
|
2019-12-19T20:37:01.000Z
|
2020-01-13T08:28:48.000Z
|
#!/usr/bin/env python3
""" REPL for database inspection """
from config import (CHECK_INTERVAL, DATABASE_NAME, FETCH_INTERVAL,
MAX_CONCURRENT_REQUESTS, MONGO_SERVER, log)
from datetime import datetime, timedelta
from pymongo import MongoClient
from code import interact
from bpython import embed
def main():
client = MongoClient(MONGO_SERVER)
db = client[DATABASE_NAME]
embed(locals_=locals())
if __name__ == '__main__':
main()
| 23.45
| 66
| 0.724947
|
ed14ba5f20945812b39f270b9539bedaa300889a
| 1,669
|
py
|
Python
|
cms/plugins/text/cms_plugins.py
|
christianbertschy/django-cms-2.0
|
a98d035b47346b7f6924a807338be3af6566537f
|
[
"BSD-3-Clause"
] | 1
|
2015-09-24T00:36:34.000Z
|
2015-09-24T00:36:34.000Z
|
cms/plugins/text/cms_plugins.py
|
christianbertschy/django-cms-2.0
|
a98d035b47346b7f6924a807338be3af6566537f
|
[
"BSD-3-Clause"
] | null | null | null |
cms/plugins/text/cms_plugins.py
|
christianbertschy/django-cms-2.0
|
a98d035b47346b7f6924a807338be3af6566537f
|
[
"BSD-3-Clause"
] | null | null | null |
from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from django.utils.translation import ugettext_lazy as _
from models import Text
from cms.plugins.text.forms import TextForm
from cms.plugins.text.widgets import WYMEditor
from cms.plugins.text.utils import plugin_tags_to_user_html
from django.forms.fields import CharField
class TextPlugin(CMSPluginBase):
model = Text
name = _("Text")
form = TextForm
render_template = "cms/plugins/text.html"
def get_editor_widget(self, request, plugins):
"""
Returns the Django form Widget to be used for
the text area
"""
return WYMEditor(installed_plugins=plugins)
def get_form_class(self, request, plugins):
"""
Returns a subclass of Form to be used by this plugin
"""
# We avoid mutating the Form declared above by subclassing
class TextPluginForm(self.form):
pass
widget = self.get_editor_widget(request, plugins)
TextPluginForm.declared_fields["body"] = CharField(widget=widget, required=False)
return TextPluginForm
def get_form(self, request, obj=None, **kwargs):
plugins = plugin_pool.get_text_enabled_plugins(self.placeholder)
form = self.get_form_class(request, plugins)
kwargs['form'] = form # override standard form
return super(TextPlugin, self).get_form(request, obj, **kwargs)
def render(self, context, instance, placeholder):
return {'body':plugin_tags_to_user_html(instance.body, context, placeholder),
'placeholder':placeholder}
plugin_pool.register_plugin(TextPlugin)
| 35.510638
| 89
| 0.705812
|
1f893d06ce997af05443b3ec4b01c0daff3211dc
| 2,814
|
py
|
Python
|
lib/googlecloudsdk/compute/subcommands/url_maps/remove_path_matcher.py
|
IsaacHuang/google-cloud-sdk
|
52afa5d1a75dff08f4f5380c5cccc015bf796ca5
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/compute/subcommands/url_maps/remove_path_matcher.py
|
IsaacHuang/google-cloud-sdk
|
52afa5d1a75dff08f4f5380c5cccc015bf796ca5
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/compute/subcommands/url_maps/remove_path_matcher.py
|
IsaacHuang/google-cloud-sdk
|
52afa5d1a75dff08f4f5380c5cccc015bf796ca5
|
[
"Apache-2.0"
] | 2
|
2020-07-25T05:03:06.000Z
|
2020-11-04T04:55:57.000Z
|
# Copyright 2014 Google Inc. All Rights Reserved.
"""Command for removing a path matcher from a URL map."""
import copy
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.compute.lib import base_classes
class RemovePathMatcher(base_classes.ReadWriteCommand):
"""Remove a path matcher from a URL map."""
@staticmethod
def Args(parser):
parser.add_argument(
'--path-matcher-name',
required=True,
help='The name of the path matcher to remove.')
parser.add_argument(
'name',
help='The name of the URL map.')
@property
def service(self):
return self.compute.urlMaps
@property
def resource_type(self):
return 'urlMaps'
def CreateReference(self, args):
return self.CreateGlobalReference(args.name)
def GetGetRequest(self, args):
"""Returns the request for the existing URL map resource."""
return (self.service,
'Get',
self.messages.ComputeUrlMapsGetRequest(
urlMap=self.ref.Name(),
project=self.project))
def GetSetRequest(self, args, replacement, existing):
return (self.service,
'Update',
self.messages.ComputeUrlMapsUpdateRequest(
urlMap=self.ref.Name(),
urlMapResource=replacement,
project=self.project))
def Modify(self, args, existing):
"""Returns a modified URL map message."""
replacement = copy.deepcopy(existing)
# Removes the path matcher.
new_path_matchers = []
path_matcher_found = False
for path_matcher in existing.pathMatchers:
if path_matcher.name == args.path_matcher_name:
path_matcher_found = True
else:
new_path_matchers.append(path_matcher)
if not path_matcher_found:
raise exceptions.ToolException(
'No path matcher with the name [{0}] was found.'.format(
args.path_matcher_name))
replacement.pathMatchers = new_path_matchers
# Removes all host rules that refer to the path matcher.
new_host_rules = []
for host_rule in existing.hostRules:
if host_rule.pathMatcher != args.path_matcher_name:
new_host_rules.append(host_rule)
replacement.hostRules = new_host_rules
return replacement
RemovePathMatcher.detailed_help = {
'brief': 'Remove a path matcher from a URL map',
'DESCRIPTION': """\
*{command}* is used to remove a path matcher from a URL
map. When a path matcher is removed, all host rules that
refer to the path matcher are also removed.
""",
'EXAMPLES': """\
To remove the path matcher named ``MY-MATCHER'' from the URL map named
``MY-URL-MAP'', you can use this command:
$ {command} MY-URL-MAP --path-matcher MY-MATCHER
""",
}
| 29.93617
| 78
| 0.657072
|
2c95e55a51006180c686982b336fa6bb709a5ef5
| 2,875
|
py
|
Python
|
examples/test_healthcheck.py
|
zmdismai/tcf
|
3903e0a2f444c3aa14647a5147a0df76a49e4195
|
[
"Apache-2.0"
] | null | null | null |
examples/test_healthcheck.py
|
zmdismai/tcf
|
3903e0a2f444c3aa14647a5147a0df76a49e4195
|
[
"Apache-2.0"
] | null | null | null |
examples/test_healthcheck.py
|
zmdismai/tcf
|
3903e0a2f444c3aa14647a5147a0df76a49e4195
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/python2
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# We don't care for documenting all the interfaces, names should be
# self-descriptive:
#
# - pylint: disable = missing-docstring
import os
import tcfl.tc
import tcfl.tl
@tcfl.tc.tags(**tcfl.tl.zephyr_tags())
# Ask for a target that defines an zephyr_board field, which indicates
# it can run the Zephyr OS
@tcfl.tc.target("zephyr_board",
app_zephyr = os.path.join(tcfl.tl.ZEPHYR_BASE,
"samples", "hello_world"),
mode = "all")
@tcfl.tc.tags(ignore_example = True)
class _healtcheck_zephyr(tcfl.tc.tc_c):
def configure_50_target(self, target):
self.overriden_configure_50_target(target)
@staticmethod
def eval(target):
target.expect("Hello World! %s" % target.kws['zephyr_board'])
# Ignore QEMU Zephyr, as they cannot power on/off w/o an image -- let
# the Hello World test test those
@tcfl.tc.target("not type:'^qemu-zephyr'", mode = "all")
@tcfl.tc.tags(ignore_example = True)
class _healtcheck_power(tcfl.tc.tc_c):
@staticmethod
def eval_power(target):
if not getattr(target, "power"):
raise tcfl.tc.skip_e("No power control interface")
target.report_info("Powering off")
target.power.off()
target.report_pass("Powered off")
target.report_info("Querying power status")
power = target.power.get()
if power != 0:
raise tcfl.tc.failed_e("Power should be 0, reported %d" % power)
target.report_pass("Power is reported correctly as %d" % power)
target.report_info("Powering on")
target.power.on()
target.report_pass("Powered on")
target.report_info("Querying power status")
power = target.power.get()
if power == 0:
raise tcfl.tc.failed_e(
"Power should be not 0, reported %d" % power)
target.report_pass("Power is reported correctly as %d" % power)
@tcfl.tc.target(mode = "all")
@tcfl.tc.tags(ignore_example = True)
class _healtcheck_console(tcfl.tc.tc_c):
@staticmethod
def eval_console(target):
if not getattr(target, "console"):
raise tcfl.tc.skip_e("No console interface")
target.report_info("reading default console")
target.console.read()
target.report_pass("read default console")
target.report_info("listing consoles")
consoles = target.console.list()
target.report_pass("listed consoles: %s" % " ".join(consoles))
for console in consoles:
target.report_info("reading console '%s'" % console)
target.console.read(console_id = console)
target.report_pass("read console '%s'" % console)
# We don't test writing...don't want to mess anything up
| 33.430233
| 76
| 0.642783
|
39dfd9e3bfaf989ab98084737e7ac77c221b2e94
| 577
|
py
|
Python
|
apps/announcements/migrations/0001_initial.py
|
Ev1dentSnow/ArtemisAPI_django
|
ca7ef0ccc97114f2c5439b7b1bbc0e635facf020
|
[
"MIT"
] | null | null | null |
apps/announcements/migrations/0001_initial.py
|
Ev1dentSnow/ArtemisAPI_django
|
ca7ef0ccc97114f2c5439b7b1bbc0e635facf020
|
[
"MIT"
] | null | null | null |
apps/announcements/migrations/0001_initial.py
|
Ev1dentSnow/ArtemisAPI_django
|
ca7ef0ccc97114f2c5439b7b1bbc0e635facf020
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.5 on 2021-07-08 23:39
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Announcement',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('subject', models.TextField()),
('content', models.TextField()),
],
options={
'db_table': 'announcements',
},
),
]
| 22.192308
| 79
| 0.516464
|
e5edca6d60e9de36c1adb95824acef18b3dcf554
| 2,553
|
py
|
Python
|
src/sqlfluff/core/rules/std/L045.py
|
swanderz/sqlfluff
|
668f23500ce5d06f4de67333a8957811c2df485f
|
[
"MIT"
] | null | null | null |
src/sqlfluff/core/rules/std/L045.py
|
swanderz/sqlfluff
|
668f23500ce5d06f4de67333a8957811c2df485f
|
[
"MIT"
] | null | null | null |
src/sqlfluff/core/rules/std/L045.py
|
swanderz/sqlfluff
|
668f23500ce5d06f4de67333a8957811c2df485f
|
[
"MIT"
] | null | null | null |
"""Implementation of Rule L045."""
from typing import Dict, List
from sqlfluff.core.dialects.base import Dialect
from sqlfluff.core.rules.base import BaseRule, LintResult
from sqlfluff.core.rules.analysis.select_crawler import SelectCrawler
from sqlfluff.core.rules.doc_decorators import document_fix_compatible
@document_fix_compatible
class Rule_L045(BaseRule):
"""Query defines a CTE (common-table expression) but does not use it.
| **Anti-pattern**
| Defining a CTE that is not used by the query is harmless, but it means
| the code is unnecesary and could be removed.
.. code-block::
WITH cte1 AS (
SELECT a
FROM t
),
cte2 AS (
SELECT b
FROM u
)
SELECT *
FROM cte1
| **Best practice**
| Remove unused CTEs.
.. code-block::
WITH cte1 AS (
SELECT a
FROM t
)
SELECT *
FROM cte1
"""
@classmethod
def _visit_sources(
cls,
select_info_list: List[SelectCrawler],
dialect: Dialect,
queries: Dict[str, List[SelectCrawler]],
):
for select_info in select_info_list:
# Process nested SELECTs.
for source in SelectCrawler.crawl(
select_info.select_statement, queries, dialect
):
if isinstance(source, list):
cls._visit_sources(source, dialect, queries)
# Process the query's sources.
for alias_info in select_info.select_info.table_aliases:
# Does the query read from a CTE? If so, visit the CTE.
for target_segment in alias_info.from_expression_element.get_children(
"table_expression", "join_clause"
):
target = target_segment.raw
if target in queries:
select_info_target = queries.pop(target)
if isinstance(select_info_target, list):
cls._visit_sources(select_info_target, dialect, queries)
def _eval(self, segment, dialect, **kwargs):
if segment.is_type("statement"):
queries = SelectCrawler.gather(segment, dialect)
if None in queries:
# Begin analysis at the final, outer query (key=None).
self._visit_sources(queries.pop(None), dialect, queries)
if queries:
return LintResult(anchor=segment)
return None
| 31.134146
| 86
| 0.585586
|
e52214da164209c452fbddfdeee7c04b2b70b105
| 875
|
py
|
Python
|
checker.py
|
Wh1t3Fox/ar15-sales
|
b1680c8b4fb06aab5a52c338ac1747ac5ec61ca4
|
[
"MIT"
] | null | null | null |
checker.py
|
Wh1t3Fox/ar15-sales
|
b1680c8b4fb06aab5a52c338ac1747ac5ec61ca4
|
[
"MIT"
] | null | null | null |
checker.py
|
Wh1t3Fox/ar15-sales
|
b1680c8b4fb06aab5a52c338ac1747ac5ec61ca4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
'''
Pretty simplistic web scraper for AR parts
'''
from bs4 import BeautifulSoup as bs
import requests
import yaml
_BRAND_ALERT = False
# Parse config file
def get_config(filename):
global _BRAND_ALERT
with open(filename) as fr:
config = yaml.safe_load(fr)
if config['brand_alert']:
_BRAND_ALERT = True
return config
# Query webpage and return HTML
def get_html(url):
content = requests.get(url).text
return bs(content, 'html.parser').lower()
# Do any of our part show up in the HTML?
def check_sale(html, parts):
pass
def main():
# Parse t3h config
config = get_config('config.yml')
print(config)
# Iterate through websites
for site in config['sites']:
html = get_html(site)
check_sale(html, config['parts'])
if __name__ == '__main__':
main()
| 18.617021
| 45
| 0.657143
|
1ba780da4d696dc7898ede198c8ff16b004ee180
| 184,510
|
py
|
Python
|
tests/unit/constraints/test_tabular.py
|
HDI-Project/SDV
|
04cfd6557d3676fa487e49e1cbd56eecd69a9bc6
|
[
"MIT"
] | 39
|
2018-07-07T01:02:42.000Z
|
2019-12-17T13:53:47.000Z
|
tests/unit/constraints/test_tabular.py
|
HDI-Project/SDV
|
04cfd6557d3676fa487e49e1cbd56eecd69a9bc6
|
[
"MIT"
] | 75
|
2018-06-29T00:35:02.000Z
|
2019-12-23T16:59:55.000Z
|
tests/unit/constraints/test_tabular.py
|
HDI-Project/SDV
|
04cfd6557d3676fa487e49e1cbd56eecd69a9bc6
|
[
"MIT"
] | 31
|
2018-10-29T13:16:38.000Z
|
2020-01-02T13:10:42.000Z
|
"""Tests for the sdv.constraints.tabular module."""
import uuid
from datetime import datetime
from unittest.mock import Mock
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomConstraint, FixedCombinations, GreaterThan, Negative,
OneHotEncoding, Positive, Rounding, Unique)
def dummy_transform_table(table_data):
return table_data
def dummy_reverse_transform_table(table_data):
return table_data
def dummy_is_valid_table(table_data):
return [True] * len(table_data)
def dummy_transform_table_column(table_data, column):
return table_data
def dummy_reverse_transform_table_column(table_data, column):
return table_data
def dummy_is_valid_table_column(table_data, column):
return [True] * len(table_data[column])
def dummy_transform_column(column_data):
return column_data
def dummy_reverse_transform_column(column_data):
return column_data
def dummy_is_valid_column(column_data):
return [True] * len(column_data)
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid_table'
# Run
instance = CustomConstraint(
transform=dummy_transform_table,
reverse_transform=dummy_reverse_transform_table,
is_valid=is_valid_fqn
)
# Assert
assert instance._transform == dummy_transform_table
assert instance._reverse_transform == dummy_reverse_transform_table
assert instance._is_valid == dummy_is_valid_table
def test__run_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy transform function with ``table_data`` argument.
Side Effects:
- Run transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` argument.
Side Effects:
- Run reverse transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = reverse_transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` argument.
Side Effects:
- Run is valid function once with ``table_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table)
# Run
instance = CustomConstraint(is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
assert called[0][1] == 'a'
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_transform_missing_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" that is missing the constraint column.
Setup:
- Pass dummy transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- MissingConstraintColumnError is thrown.
"""
# Setup
table_data = pd.DataFrame({'b': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table_column,
return_value=table_data)
# Run and assert
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
with pytest.raises(MissingConstraintColumnError):
instance.transform(table_data)
def test__run_reverse_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run reverse transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
assert called[0][1] == 'a'
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` and ``column`` argument.
Side Effects:
- Run is valid function once with ``table_data`` and ``column`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
assert called[0][1] == 'a'
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy transform function with ``column_data`` argument.
Side Effects:
- Run transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy reverse transform function with ``column_data`` argument.
Side Effects:
- Run reverse transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy is valid function with ``column_data`` argument.
Side Effects:
- Run is valid function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
np.testing.assert_array_equal(is_valid, expected_out)
class TestFixedCombinations():
def test___init__(self):
"""Test the ``FixedCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce fixed combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = FixedCombinations(column_names=columns)
# Assert
assert instance._columns == columns
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``FixedCombinations.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
columns = ['b', 'c']
# Run
instance = FixedCombinations(column_names=columns, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == tuple(columns)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``FixedCombinations.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
columns = ['b', 'c']
# Run
instance = FixedCombinations(column_names=columns, handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init__with_one_column(self):
"""Test the ``FixedCombinations.__init__`` method with only one constraint column.
Expect a ``ValueError`` because FixedCombinations requires at least two
constraint columns.
Side effects:
- A ValueError is raised
"""
# Setup
columns = ['c']
# Run and assert
with pytest.raises(ValueError):
FixedCombinations(column_names=columns)
def test_fit(self):
"""Test the ``FixedCombinations.fit`` method.
The ``FixedCombinations.fit`` method is expected to:
- Call ``FixedCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = FixedCombinations(column_names=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``FixedCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = FixedCombinations(column_names=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``FixedCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = FixedCombinations(column_names=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_true(self):
"""Test the ``FixedCombinations.is_valid`` method with non string columns.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = FixedCombinations(column_names=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_false(self):
"""Test the ``FixedCombinations.is_valid`` method with non string columns.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = FixedCombinations(column_names=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [6, 7, 8],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``FixedCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = FixedCombinations(column_names=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c'].items()]
except ValueError:
assert False
def test_transform_non_string(self):
"""Test the ``FixedCombinations.transform`` method with non strings.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns as UUIDs.
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = FixedCombinations(column_names=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c#d'].items()]
except ValueError:
assert False
def test_transform_not_all_columns_provided(self):
"""Test the ``FixedCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = FixedCombinations(column_names=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_reverse_transform(self):
"""Test the ``FixedCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = FixedCombinations(column_names=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_non_string(self):
"""Test the ``FixedCombinations.reverse_transform`` method with a non string column.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = FixedCombinations(column_names=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test__validate_scalar(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = 'b'
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = 'b'
scalar = 'high'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_list(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = ['b']
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = ['b']
scalar = 'low'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_error(self):
"""Test the ``_validate_scalar`` method.
This method raises an error when the the scalar column is a list.
Input:
- scalar_column = 0
- column_names = 'b'
Side effect:
- Raise error since the scalar is a list
"""
# Setup
scalar_column = [0]
column_names = 'b'
scalar = 'high'
# Run / Assert
with pytest.raises(TypeError):
GreaterThan._validate_scalar(scalar_column, column_names, scalar)
def test__validate_inputs_high_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
Output:
- low == ['a']
- high == 3
- constraint_columns = ('a')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar='high', drop=None)
# Assert
low == ['a']
high == 3
constraint_columns == ('a',)
def test__validate_inputs_low_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 3
- high = 'b'
- scalar = 'low'
- drop = None
Output:
- low == 3
- high == ['b']
- constraint_columns = ('b')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=3, high='b', scalar='low', drop=None)
# Assert
low == 3
high == ['b']
constraint_columns == ('b',)
def test__validate_inputs_scalar_none(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3 # where 3 is a column name
- scalar = None
- drop = None
Output:
- low == ['a']
- high == [3]
- constraint_columns = ('a', 3)
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar=None, drop=None)
# Assert
low == ['a']
high == [3]
constraint_columns == ('a', 3)
def test__validate_inputs_scalar_none_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a']
- high = ['b', 'c']
- scalar = None
- drop = None
Output:
- low == ['a']
- high == ['b', 'c']
- constraint_columns = ('a', 'b', 'c')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=['a'], high=['b', 'c'], scalar=None, drop=None)
# Assert
low == ['a']
high == ['b', 'c']
constraint_columns == ('a', 'b', 'c')
def test__validate_inputs_scalar_none_two_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a', 0]
- high = ['b', 'c']
- scalar = None
- drop = None
Side effect:
- Raise error because both high and low are more than one column
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=['a', 0], high=['b', 'c'], scalar=None, drop=None)
def test__validate_inputs_scalar_unknown(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'unknown'
- drop = None
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high='b', scalar='unknown', drop=None)
def test__validate_inputs_drop_error_low(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 2
- high = 'b'
- scalar = 'low'
- drop = 'low'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=2, high='b', scalar='low', drop='low')
def test__validate_inputs_drop_error_high(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
- drop = 'high'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high=3, scalar='high', drop='high')
def test__validate_inputs_drop_success(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'high'
- drop = 'low'
Output:
- low = ['a']
- high = 0
- constraint_columns == ('a')
"""
# Run / Assert
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=0, scalar='high', drop='low')
assert low == ['a']
assert high == 0
assert constraint_columns == ('a',)
def test___init___(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == ['a']
assert instance._high == ['b']
assert instance._strict is False
assert instance._scalar is None
assert instance._drop is None
assert instance.constraint_columns == ('a', 'b')
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='transform')
# Assert
assert instance.rebuild_columns == ['b']
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init___high_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 'a'
- high = 0
- strict = True
- drop = 'low'
- scalar = 'high'
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._drop = 'low'
- instance._scalar == 'high'
"""
# Run
instance = GreaterThan(low='a', high=0, strict=True, drop='low', scalar='high')
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop == 'low'
assert instance.constraint_columns == ('a',)
def test___init___low_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 0
- high = 'a'
- strict = True
- drop = 'high'
- scalar = 'low'
Side effects:
- instance._low == 0
- instance._high == 'a'
- instance._stric == True
- instance._drop = 'high'
- instance._scalar == 'low'
"""
# Run
instance = GreaterThan(low=0, high='a', strict=True, drop='high', scalar='low')
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop == 'high'
assert instance.constraint_columns == ('a',)
def test___init___strict_is_false(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater_equal``
when ``strict`` is set to ``False``.
Input:
- low = 'a'
- high = 'b'
- strict = False
"""
# Run
instance = GreaterThan(low='a', high='b', strict=False)
# Assert
assert instance.operator == np.greater_equal
def test___init___strict_is_true(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater``
when ``strict`` is set to ``True``.
Input:
- low = 'a'
- high = 'b'
- strict = True
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True)
# Assert
assert instance.operator == np.greater
def test__init__get_columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'high'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'low'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
instance._columns_to_reconstruct == ['a']
def test__init__get_columns_to_reconstruct_scalar_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 0
- scalar = 'high'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high=0, scalar='high')
instance._columns_to_reconstruct == ['a']
def test__get_value_column_list(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
"""
# Setup
instance = GreaterThan(low='a', high='b')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = table_data[['a']].values
np.testing.assert_array_equal(out, expected)
def test__get_value_scalar(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
- scalar = 'low'
"""
# Setup
instance = GreaterThan(low=3, high='b', scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = 3
assert out == expected
def test__get_diff_columns_name_low_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b#'], scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b##']
assert out == expected
def test__get_diff_columns_name_high_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, scalar='high')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b#']
assert out == expected
def test__get_diff_columns_name_scalar_is_none(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b#', scalar=None)
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b##a']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_low(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a#', 'c'], high='b', scalar=None)
table_data = pd.DataFrame({
'a#': [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a##b', 'c#b']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_high(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['b', 'c'], scalar=None)
table_data = pd.DataFrame({
0: [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b#0', 'c#0']
assert out == expected
def test__check_columns_exist_success(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
instance._check_columns_exist(table_data, 'high')
def test__check_columns_exist_error(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='c')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
with pytest.raises(KeyError):
instance._check_columns_exist(table_data, 'high')
def test__fit_only_one_datetime_arg(self):
"""Test the ``Between._fit`` method by passing in only one arg as datetime.
If only one of the high / low args is a datetime type, expect a ValueError.
Input:
- low is an int column
- high is a datetime
Output:
- n/a
Side Effects:
- ValueError
"""
# Setup
instance = GreaterThan(low='a', high=pd.to_datetime('2021-01-01'), scalar='high')
# Run and assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(ValueError):
instance._fit(table_data)
def test__fit__low_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__low_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='c', high=3, scalar='high')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='c', scalar='low')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._high`` if ``instance_drop`` is `high`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._low`` if ``instance_drop`` is `low`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` by default.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_high_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `low` if ``instance._scalar`` is ``'high'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_low_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` if ``instance._scalar`` is ``'low'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__diff_columns_one_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_diff_columns``
to the one column in ``instance.constraint_columns`` plus a
token if there is only one column in that set.
Input:
- Table with one column.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high=3, scalar='high')
# Run
table_data = pd.DataFrame({'a': [1, 2, 3]})
instance._fit(table_data)
# Asserts
assert instance._diff_columns == ['a#']
def test__fit__diff_columns_multiple_columns(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_diff_columns``
to the two columns in ``instance.constraint_columns`` separated
by a token if there both columns are in that set.
Input:
- Table with two column.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._diff_columns == ['b#a']
def test__fit_int(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of integers.
Side Effect:
- The _dtype attribute gets `int` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'i' for dtype in instance._dtype])
def test__fit_float(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of float values.
Side Effect:
- The _dtype attribute gets `float` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_datetime(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns of datetimes.
Side Effect:
- The _dtype attribute gets `datetime` as the value.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01']),
'b': pd.to_datetime(['2020-01-02'])
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'M' for dtype in instance._dtype])
def test__fit_type__high_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``low`` column as the ``_dtype`` attribute
if ``_scalar`` is ``'high'``.
Input:
- Table that contains two constrained columns with the low one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low='a', high=3, scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_type__low_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_scalar`` is ``'low'``.
Input:
- Table that contains two constrained columns with the high one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low=3, high='b', scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with different dtype.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, scalar='high')
dtype_int = pd.Series([1]).dtype
dtype_float = np.dtype('float')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4., 5., 6.]
})
instance._fit(table_data)
# Assert
expected_diff_columns = ['a#', 'b#']
expected_dtype = pd.Series([dtype_int, dtype_float], index=table_data.columns)
assert instance._diff_columns == expected_diff_columns
pd.testing.assert_series_equal(instance._dtype, expected_dtype)
def test__fit_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with different dtype.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b'], scalar='low')
dtype_int = pd.Series([1]).dtype
dtype_float = np.dtype('float')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4., 5., 6.]
})
instance._fit(table_data)
# Assert
expected_diff_columns = ['a#', 'b#']
expected_dtype = pd.Series([dtype_int, dtype_float], index=table_data.columns)
assert instance._diff_columns == expected_diff_columns
pd.testing.assert_series_equal(instance._dtype, expected_dtype)
def test_is_valid_strict_false(self):
"""Test the ``GreaterThan.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- False should be returned for the strictly invalid row and True
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``GreaterThan.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- True should be returned for the strictly valid row and False
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_scalar_high_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is a column name, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=False, scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_scalar_low_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is a column name, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low='a', high=2, strict=False, scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is multi column, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=2, strict=False, scalar='high')
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is multi column, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=2, high=['a', 'b'], strict=False, scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If scalar is none, and high is multi column, then
the values in that column should all be higher than
in the low column.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low='b', high=['a', 'c'], strict=False)
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_datetime(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a datetime and low is a column,
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below `high`.
Output:
- True should be returned for the rows where the low
column is below `high`.
"""
# Setup
high_dt = pd.to_datetime('8/31/2021')
instance = GreaterThan(low='a', high=high_dt, strict=False, scalar='high')
table_data = pd.DataFrame({
'a': [datetime(2020, 5, 17), datetime(2020, 2, 1), datetime(2021, 9, 1)],
'b': [4, 2, 2],
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_datetime(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a datetime and high is a column,
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below `low`.
Output:
- True should be returned for the rows where the high
column is above `low`.
"""
# Setup
low_dt = pd.to_datetime('8/31/2021')
instance = GreaterThan(low=low_dt, high='a', strict=False, scalar='low')
table_data = pd.DataFrame({
'a': [datetime(2021, 9, 17), datetime(2021, 7, 1), datetime(2021, 9, 1)],
'b': [4, 2, 2],
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_two_cols_with_nans(self):
"""Test the ``GreaterThan.is_valid`` method with nan values.
If there is a NaN row, expect that `is_valid` returns True.
Input:
- Table with a NaN row
Output:
- True should be returned for the NaN row.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, None, 3],
'b': [4, None, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_two_cols_with_one_nan(self):
"""Test the ``GreaterThan.is_valid`` method with nan values.
If there is a row in which we compare one NaN value with one
non-NaN value, expect that `is_valid` returns True.
Input:
- Table with a row that contains only one NaN value.
Output:
- True should be returned for the row with the NaN value.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, None, 3],
'b': [4, 5, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test__transform_int_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_int_drop_high(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the high column.
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the high column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_int_drop_low(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the low column.
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the low column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_float_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type float.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_datetime_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type datetime.
If the columns are of type datetime, ``_transform`` is expected
to convert the timedelta distance into numeric before applying
the +1 and logarithm.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with values at a distance of exactly 1 second.
Output:
- Same table with a diff column of the logarithms
of the dinstance in nanoseconds + 1, which is np.log(1_000_000_001).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
instance._is_datetime = True
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_not_all_columns_provided(self):
"""Test the ``GreaterThan.transform`` method.
If some of the columns needed for the transform are missing, it will raise
a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, fit_columns_model=False)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test__transform_high_is_scalar(self):
"""Test the ``GreaterThan._transform`` method with high as scalar.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_scalar`` is ``'high'``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high=5, strict=True, scalar='high')
instance._diff_columns = ['a#b']
instance.constraint_columns = ['a']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(5), np.log(4), np.log(3)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_low_is_scalar(self):
"""Test the ``GreaterThan._transform`` method with high as scalar.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_scalar`` is ``'low'``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low=2, high='b', strict=True, scalar='low')
instance._diff_columns = ['a#b']
instance.constraint_columns = ['b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(3), np.log(4), np.log(5)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=3, strict=True, scalar='high')
instance._diff_columns = ['a#', 'b#']
instance.constraint_columns = ['a', 'b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(3), np.log(2), np.log(1)],
'b#': [np.log(0), np.log(-1), np.log(-2)],
})
pd.testing.assert_frame_equal(out, expected)
def test__transform_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=3, high=['a', 'b'], strict=True, scalar='low')
instance._diff_columns = ['a#', 'b#']
instance.constraint_columns = ['a', 'b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(-1), np.log(0), np.log(1)],
'b#': [np.log(2), np.log(3), np.log(4)],
})
pd.testing.assert_frame_equal(out, expected)
def test__transform_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=['a', 'c'], high='b', strict=True)
instance._diff_columns = ['a#', 'c#']
instance.constraint_columns = ['a', 'c']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'c#': [np.log(-2)] * 3,
})
pd.testing.assert_frame_equal(out, expected)
def test_reverse_transform_int_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_float_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype float.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to float values
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as float values
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [np.dtype('float')]
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'b': [4.1, 5.2, 6.3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the high column replaced by the low one + one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the low column replaced by the high one - 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a': [1, 2, 3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- subtract from the high column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the low column replaced by the high one - one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column when the row is invalid
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + one second
for all invalid rows, and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-01T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2]
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_scalar`` is ``'low'``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=True, scalar='low')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 6, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_scalar`` is ``'high'``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high=3, strict=True, scalar='high')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 0],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_scalar`` is ``'high'``.
- ``_low`` is set to multiple columns.
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(5).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3/-4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=3, strict=True, scalar='high')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [0, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'b#': [np.log(5)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 0, 0],
'b': [0, -1, -1],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_scalar`` is ``'low'``.
- ``_high`` is set to multiple columns.
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(5).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value +3/+4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high=['a', 'b'], strict=True, scalar='low')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'b#': [np.log(5)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [6, 6, 4],
'b': [7, 7, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_low`` = ['a', 'c'].
- ``_high`` = ['b'].
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(-2).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value +3/-4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=['a', 'c'], high=['b'], strict=True)
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'c#']
instance._columns_to_reconstruct = ['a', 'c']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(1)] * 3,
'c#': [np.log(1)] * 3,
})
out = instance.reverse_transform(transformed)
print(out)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_multi_column_positive(self):
"""Test the ``GreaterThan.reverse_transform`` method for positive constraint.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Input:
- Table with given data.
Output:
- Same table with with replaced rows and dropped columns.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b'], strict=True, scalar='low')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, -1],
'c': [7, 8, 9],
'a#': [np.log(2), np.log(3), np.log(4)],
'b#': [np.log(5), np.log(6), np.log(0)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 0],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_multi_column_negative(self):
"""Test the ``GreaterThan.reverse_transform`` method for negative constraint.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Input:
- Table with given data.
Output:
- Same table with with replaced rows and dropped columns.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, strict=True, scalar='high')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [-1, -2, 1],
'b': [-4, -5, -1],
'c': [7, 8, 9],
'a#': [np.log(2), np.log(3), np.log(0)],
'b#': [np.log(5), np.log(6), np.log(2)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [-1, -2, 0],
'b': [-4, -5, -1],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
class TestPositive():
def test__init__(self):
"""
Test the ``Positive.__init__`` method.
The method is expected to set the ``_low`` instance variable
to 0, the ``_scalar`` variable to ``'low'``. The rest of the
parameters should be passed. Check that ``_drop`` is set to
``None`` when ``drop`` is ``False``.
Input:
- strict = True
- low = 'a'
- drop = False
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar == 'low'
- instance._drop = None
"""
# Run
instance = Positive(columns='a', strict=True, drop=False)
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop is None
def test__init__drop_true(self):
"""
Test the ``Positive.__init__`` method with drop is ``True``.
Check that ``_drop`` is set to 'high' when ``drop`` is ``True``.
Input:
- strict = True
- low = 'a'
- drop = True
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar == 'low'
- instance._drop = 'high'
"""
# Run
instance = Positive(columns='a', strict=True, drop=True)
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop == 'high'
class TestNegative():
def test__init__(self):
"""
Test the ``Negative.__init__`` method.
The method is expected to set the ``_high`` instance variable
to 0, the ``_scalar`` variable to ``'high'``. The rest of the
parameters should be passed. Check that ``_drop`` is set to
``None`` when ``drop`` is ``False``.
Input:
- strict = True
- low = 'a'
- drop = False
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar = 'high'
- instance._drop = None
"""
# Run
instance = Negative(columns='a', strict=True, drop=False)
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop is None
def test__init__drop_true(self):
"""
Test the ``Negative.__init__`` method with drop is ``True``.
Check that ``_drop`` is set to 'low' when ``drop`` is ``True``.
Input:
- strict = True
- low = 'a'
- drop = True
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar = 'high'
- instance._drop = 'low'
"""
# Run
instance = Negative(columns='a', strict=True, drop=True)
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop == 'low'
def new_column(data):
"""Formula to be used for the ``TestColumnFormula`` class."""
if data['a'] is None or data['b'] is None:
return None
return data['a'] + data['b']
class TestColumnFormula():
def test___init__(self):
"""Test the ``ColumnFormula.__init__`` method.
It is expected to create a new Constraint instance,
import the formula to use for the computation, and
set the specified constraint column.
Input:
- column = 'col'
- formula = new_column
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column)
# Assert
assert instance._column == column
assert instance._formula == new_column
assert instance.constraint_columns == ('col', )
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``ColumnFormula.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == (column,)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``ColumnFormula.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column,
handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test_is_valid_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a valid data.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a non-valid data.
If the data does not fulfill the formula, result is a series of ``False`` values.
Input:
- Table data not fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 2, 3]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_with_nans(self):
"""Test the ``ColumnFormula.is_valid`` method for with a formula that produces nans.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, None],
'c': [5, 7, None]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test__transform(self):
"""Test the ``ColumnFormula._transform`` method.
It is expected to drop the indicated column from the table.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data without the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_without_dropping_column(self):
"""Test the ``ColumnFormula._transform`` method without dropping the column.
If `drop_column` is false, expect to not drop the constraint column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column, drop_column=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_missing_column(self):
"""Test the ``ColumnFormula._transform`` method when the constraint column is missing.
When ``_transform`` is called with data that does not contain the constraint column,
expect to return the data as-is.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data, unchanged (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'd': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'd': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform(self):
"""Test the ``ColumnFormula.reverse_transform`` method.
It is expected to compute the indicated column by applying the given formula.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 1, 1]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestRounding():
def test___init__(self):
"""Test the ``Rounding.__init__`` method.
It is expected to create a new Constraint instance
and set the rounding args.
Input:
- columns = ['b', 'c']
- digits = 2
"""
# Setup
columns = ['b', 'c']
digits = 2
# Run
instance = Rounding(columns=columns, digits=digits)
# Assert
assert instance._columns == columns
assert instance._digits == digits
def test___init__invalid_digits(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``digits`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 20
"""
# Setup
columns = ['b', 'c']
digits = 20
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits)
def test___init__invalid_tolerance(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``tolerance`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 2
- tolerance = 0.1
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 0.1
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits, tolerance=tolerance)
def test_is_valid_positive_digits(self):
"""Test the ``Rounding.is_valid`` method for a positive digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 1e-3
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12, 5.51, None, 6.941, 1.129],
'c': [5.315, 7.12, 1.12, 9.131, 12.329],
'd': ['a', 'b', 'd', 'e', None],
'e': [123.31598, -1.12001, 1.12453, 8.12129, 1.32923]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, True, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_negative_digits(self):
"""Test the ``Rounding.is_valid`` method for a negative digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b']
digits = -2
tolerance = 1
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [401, 500, 6921, 799, None],
'c': [5.3134, 7.1212, 9.1209, 101.1234, None],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_zero_digits(self):
"""Test the ``Rounding.is_valid`` method for a zero digits argument.
Input:
- Table data not with the desired decimal places (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 0
tolerance = 1e-4
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, None, 3, 4],
'b': [4, 5.5, 1.2, 6.0001, 5.99999],
'c': [5, 7.12, 1.31, 9.00001, 4.9999],
'd': ['a', 'b', None, 'd', 'e'],
'e': [2.1254, 17.12123, 124.12, 123.0112, -9.129434]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_reverse_transform_positive_digits(self):
"""Test the ``Rounding.reverse_transform`` method with positive digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.12345, None, 5.100, 6.0001, 1.7999],
'c': [1.1, 1.234, 9.13459, 4.3248, 6.1312],
'd': ['a', 'b', 'd', 'e', None]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.123, None, 5.100, 6.000, 1.800],
'c': [1.100, 1.234, 9.135, 4.325, 6.131],
'd': ['a', 'b', 'd', 'e', None]
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_negative_digits(self):
"""Test the ``Rounding.reverse_transform`` method with negative digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b']
digits = -3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41234.5, None, 5000, 6001, 5928],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41000.0, None, 5000.0, 6000.0, 6000.0],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_zero_digits(self):
"""Test the ``Rounding.reverse_transform`` method with zero digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 0
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12345, None, 5.0, 6.01, 7.9],
'c': [1.1, 1.0, 9.13459, None, 8.89],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.0, None, 5.0, 6.0, 8.0],
'c': [1.0, 1.0, 9.0, None, 9.0],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def transform(data, low, high):
"""Transform to be used for the TestBetween class."""
data = (data - low) / (high - low) * 0.95 + 0.025
return np.log(data / (1.0 - data))
class TestBetween():
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``Between.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
column = 'col'
# Run
instance = Between(column=column, low=10, high=20, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == (column,)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``Between.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
column = 'col'
# Run
instance = Between(column=column, low=10, high=20, handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test_fit_only_one_datetime_arg(self):
"""Test the ``Between.fit`` method by passing in only one arg as datetime.
If only one of the bound parameters is a datetime type, expect a ValueError.
Input:
- low is an int scalar
- high is a datetime
Output:
- n/a
Side Effects:
- ValueError
"""
# Setup
column = 'a'
low = 0.0
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high)
# Run and assert
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [4, 5, 6],
})
with pytest.raises(ValueError):
instance.fit(table_data)
def test_transform_scalar_scalar(self):
"""Test the ``Between.transform`` method by passing ``low`` and ``high`` as scalars.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [4, 5, 6],
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'a#0.0#1.0': transform(table_data[column], low, high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_scalar_column(self):
"""Test the ``Between._transform`` method with ``low`` as scalar and ``high`` as a column.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0.5, 1, 6],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0.5, 1, 6],
'a#0.0#b': transform(table_data[column], low, table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_scalar(self):
"""Test the ``Between._transform`` method with ``low`` as a column and ``high`` as scalar.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'a#b#1.0': transform(table_data[column], table_data[low], high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_column(self):
"""Test the ``Between._transform`` method by passing ``low`` and ``high`` as columns.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
'c': [0.5, 1, 6]
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_datetime_datetime(self):
"""Test the ``Between._transform`` method by passing ``low`` and ``high`` as datetimes.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
- High and Low as datetimes
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [4, 5, 6],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'a#1900-01-01T00:00:00.000000000#2021-01-01T00:00:00.000000000': transform(
table_data[column], low, high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_datetime_column(self):
"""Test the ``Between._transform`` method with ``low`` as datetime and ``high`` as a column.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a#1900-01-01T00:00:00.000000000#b': transform(
table_data[column], low, table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_datetime(self):
"""Test the ``Between._transform`` method with ``low`` as a column and ``high`` as datetime.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'a#b#2021-01-01T00:00:00.000000000': transform(
table_data[column], table_data[low], high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_column_datetime(self):
"""Test the ``Between._transform`` method with ``low`` and ``high`` as datetime columns.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
]
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_scalar_scalar(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as scalars.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
table_data = pd.DataFrame({
'b': [4, 5, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [4, 5, 6],
'a#0.0#1.0': transform(table_data[column], low, high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_scalar_column(self):
"""Test ``Between.reverse_transform`` with ``low`` as scalar and ``high`` as a column.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
table_data = pd.DataFrame({
'b': [0.5, 1, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0.5, 1, 6],
'a#0.0#b': transform(table_data[column], low, table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_scalar(self):
"""Test ``Between.reverse_transform`` with ``low`` as a column and ``high`` as scalar.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
table_data = pd.DataFrame({
'b': [0, -1, 0.5],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0, -1, 0.5],
'a#b#1.0': transform(table_data[column], table_data[low], high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_column(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as columns.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
table_data = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_datetime_datetime(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as datetime.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
- High and low as datetimes
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
table_data = pd.DataFrame({
'b': [4, 5, 6],
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [4, 5, 6],
'a#1900-01-01T00:00:00.000000000#2021-01-01T00:00:00.000000000': transform(
table_data[column], low, high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_series_equal(expected_out['b'], out['b'])
pd.testing.assert_series_equal(expected_out['a'], out['a'].astype('datetime64[ms]'))
def test_reverse_transform_datetime_column(self):
"""Test ``Between.reverse_transform`` with ``low`` as datetime and ``high`` as a column.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
table_data = pd.DataFrame({
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-02'),
pd.to_datetime('2020-08-03'),
]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a#1900-01-01T00:00:00.000000000#b': transform(
table_data[column], low, table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_datetime(self):
"""Test ``Between.reverse_transform`` with ``low`` as a column and ``high`` as datetime.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
table_data = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-03'),
pd.to_datetime('2020-08-04'),
],
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'a#b#2021-01-01T00:00:00.000000000': transform(
table_data[column], table_data[low], high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_series_equal(expected_out['b'], out['b'])
pd.testing.assert_series_equal(expected_out['a'], out['a'].astype('datetime64[ms]'))
def test_reverse_transform_column_column_datetime(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as datetime columns.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
table_data = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``Between.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a valid row, a strictly invalid row and an
invalid row. (pandas.DataFrame)
Output:
- True should be returned for the valid row and False
for the other two. (pandas.Series)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, strict=True, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 1, 3],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False])
pd.testing.assert_series_equal(expected_out, out, check_names=False)
def test_is_valid_strict_false(self):
"""Test the ``Between.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a valid row, a strictly invalid row and an
invalid row. (pandas.DataFrame)
Output:
- True should be returned for the first two rows, and False
for the last one (pandas.Series)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, strict=False, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 1, 3],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out, check_names=False)
def test_is_valid_scalar_column(self):
"""Test the ``Between.is_valid`` method with ``low`` as scalar and ``high`` as a column.
Is expected to return whether the constraint ``column`` is between the
``low`` and ``high`` values.
Input:
- Table data where the last value is greater than ``high``. (pandas.DataFrame)
Output:
- True should be returned for the two first rows, False
for the last one. (pandas.Series)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0.5, 1, 0.6],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_column_scalar(self):
"""Test the ``Between.is_valid`` method with ``low`` as a column and ``high`` as scalar.
Is expected to return whether the constraint ``column`` is between the
``low`` and ``high`` values.
Input:
- Table data where the second value is smaller than ``low`` and
last value is greater than ``high``. (pandas.DataFrame)
Output:
- True should be returned for the first row, False
for the last two. (pandas.Series)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 1.9],
'b': [-0.5, 1, 0.6],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_column_column(self):
"""Test the ``Between.is_valid`` method with ``low`` and ``high`` as columns.
Is expected to return whether the constraint ``column`` is between the
``low`` and ``high`` values.
Input:
- Table data where the last value is greater than ``high``. (pandas.DataFrame)
Output:
- True should be returned for the two first rows, False
for the last one. (pandas.Series)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
'c': [0.5, 1, 0.6]
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_low_high_nans(self):
"""Test the ``Between.is_valid`` method with nan values in low and high columns.
If one of `low` or `high` is NaN, expect it to be ignored in the comparison.
If both are NaN or the constraint column is NaN, return True.
Input:
- Table with a NaN row
Output:
- True should be returned for the NaN row.
"""
# Setup
instance = Between(column='a', low='b', high='c')
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9, 1.0],
'b': [0, None, None, 0.4],
'c': [0.5, None, 0.6, None]
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_column_nans(self):
"""Test the ``Between.is_valid`` method with nan values in constraint column.
If the constraint column is Nan, expect that `is_valid` returns True.
Input:
- Table with a NaN row
Output:
- True should be returned for the NaN row.
"""
# Setup
instance = Between(column='a', low='b', high='c')
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, None],
'b': [0, 0.1, 0.5],
'c': [0.5, 1.5, 0.6]
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_high_scalar_low_nans(self):
"""Test the ``Between.is_valid`` method with ``high`` as scalar and ``low`` containing NaNs.
The NaNs in ``low`` should be ignored.
Input:
- Table with a NaN row
Output:
- The NaN values should be ignored when making comparisons.
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 1.9],
'b': [-0.5, None, None],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_low_high_nans_datetime(self):
"""Test the ``Between.is_valid`` method with nan values in low and high datetime columns.
If one of `low` or `high` is NaN, expect it to be ignored in the comparison.
If both are NaN or the constraint column is NaN, return True.
Input:
- Table with row NaN containing NaNs.
Output:
- True should be returned for the NaN row.
"""
# Setup
instance = Between(column='a', low='b', high='c')
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-13'),
pd.to_datetime('2020-08-12'),
pd.to_datetime('2020-08-13'),
pd.to_datetime('2020-08-14'),
],
'b': [
pd.to_datetime('2020-09-03'),
None,
None,
pd.to_datetime('2020-10-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
None,
None,
]
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_column_nans_datetime(self):
"""Test the ``Between.is_valid`` method with nan values in the constraint column.
If there is a row containing NaNs, expect that `is_valid` returns True.
Input:
- Table with row NaN containing NaNs.
Output:
- True should be returned for the NaN row.
"""
# Setup
instance = Between(column='a', low='b', high='c')
# Run
table_data = pd.DataFrame({
'a': [
None,
pd.to_datetime('2020-08-12'),
pd.to_datetime('2020-08-13'),
],
'b': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-02'),
pd.to_datetime('2020-08-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-01'),
]
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_high_datetime_low_nans(self):
"""Test the ``Between.is_valid`` method with ``high`` as datetime and ``low`` with NaNs.
The NaNs in ``low`` should be ignored.
Input:
- Table with a NaN row
Output:
- The NaN values should be ignored when making comparisons.
"""
# Setup
column = 'a'
low = 'b'
high = pd.to_datetime('2020-08-13')
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-08-12'),
pd.to_datetime('2020-08-12'),
pd.to_datetime('2020-08-14'),
],
'b': [
pd.to_datetime('2020-06-03'),
None,
None,
],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out)
class TestOneHotEncoding():
def test_reverse_transform(self):
"""Test the ``OneHotEncoding.reverse_transform`` method.
It is expected to, for each of the appropriate rows, set the column
with the largest value to one and set all other columns to zero.
Input:
- Table data with any numbers (pandas.DataFrame)
Output:
- Table data where the appropriate rows are one hot (pandas.DataFrame)
"""
# Setup
instance = OneHotEncoding(columns=['a', 'b'])
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.8],
'b': [0.8, 0.1, 0.9],
'c': [1, 2, 3]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [0.0, 1.0, 0.0],
'b': [1.0, 0.0, 1.0],
'c': [1, 2, 3]
})
pd.testing.assert_frame_equal(expected_out, out)
def test_is_valid(self):
"""Test the ``OneHotEncoding.is_valid`` method.
``True`` when for the rows where the data is one hot, ``False`` otherwise.
Input:
- Table data (pandas.DataFrame) containing one valid column, one column with a sum less
than 1, one column with a sum greater than 1, one column with halves adding to one and one
column with nans.
Output:
- Series of ``True`` and ``False`` values (pandas.Series)
"""
# Setup
instance = OneHotEncoding(columns=['a', 'b', 'c'])
# Run
table_data = pd.DataFrame({
'a': [1.0, 1.0, 0.0, 0.5, 1.0],
'b': [0.0, 1.0, 0.0, 0.5, 0.0],
'c': [0.0, 2.0, 0.0, 0.0, np.nan],
'd': [1, 2, 3, 4, 5]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test__sample_constraint_columns_proper(self):
"""Test the ``OneHotEncoding._sample_constraint_columns`` method.
Expected to return a table with the appropriate complementary column ``b``,
since column ``a`` is entirely defined by the ``condition`` table.
Input:
- Table data (pandas.DataFrame)
Output:
- Table where ``a`` is the same as in ``condition``
and ``b`` is complementary`` (pandas.DataFrame)
"""
# Setup
data = pd.DataFrame({
'a': [1.0, 0.0] * 5,
'b': [0.0, 1.0] * 5,
})
instance = OneHotEncoding(columns=['a', 'b'])
instance.fit(data)
# Run
condition = pd.DataFrame({
'a': [1.0, 0.0, 0.0] * 5,
})
out = instance._sample_constraint_columns(condition)
# Assert
expected_out = pd.DataFrame({
'a': [1.0, 0.0, 0.0] * 5,
'b': [0.0, 1.0, 1.0] * 5,
})
pd.testing.assert_frame_equal(expected_out, out)
def test__sample_constraint_columns_one_one(self):
"""Test the ``OneHotEncoding._sample_constraint_columns`` method.
Since the condition column contains a one for all rows, expected to assign
all other columns to zeros.
Input:
- Table data (pandas.DataFrame)
Output:
- Table where the first column contains one's and others columns zero's (pandas.DataFrame)
"""
# Setup
data = pd.DataFrame({
'a': [1.0, 0.0] * 5,
'b': [0.0, 1.0] * 5,
'c': [0.0, 0.0] * 5
})
instance = OneHotEncoding(columns=['a', 'b', 'c'])
instance.fit(data)
# Run
condition = pd.DataFrame({
'a': [1.0] * 10
})
out = instance._sample_constraint_columns(condition)
# Assert
expected_out = pd.DataFrame({
'a': [1.0] * 10,
'b': [0.0] * 10,
'c': [0.0] * 10
})
pd.testing.assert_frame_equal(expected_out, out)
def test__sample_constraint_columns_two_ones(self):
"""Test the ``OneHotEncoding._sample_constraint_columns`` method.
Expected to raise a ``ValueError``, since the condition contains two ones
in a single row.
Input:
- Table data (pandas.DataFrame)
Raise:
- ``ValueError``
"""
# Setup
data = pd.DataFrame({
'a': [1.0, 0.0] * 5,
'b': [0.0, 1.0] * 5,
'c': [0.0, 0.0] * 5
})
instance = OneHotEncoding(columns=['a', 'b', 'c'])
instance.fit(data)
# Run
condition = pd.DataFrame({
'a': [1.0] * 10,
'b': [1.0] * 10,
'c': [0.0] * 10
})
# Assert
with pytest.raises(ValueError):
instance._sample_constraint_columns(condition)
def test__sample_constraint_columns_non_binary(self):
"""Test the ``OneHotEncoding._sample_constraint_columns`` method.
Expected to raise a ``ValueError``, since the condition contains a non binary value.
Input:
- Table data (pandas.DataFrame)
Raise:
- ``ValueError``
"""
# Setup
data = pd.DataFrame({
'a': [1.0, 0.0] * 5,
'b': [0.0, 1.0] * 5,
'c': [0.0, 0.0] * 5
})
instance = OneHotEncoding(columns=['a', 'b', 'c'])
instance.fit(data)
# Run
condition = pd.DataFrame({
'a': [0.5] * 10
})
# Assert
with pytest.raises(ValueError):
instance._sample_constraint_columns(condition)
def test__sample_constraint_columns_all_zeros(self):
"""Test the ``OneHotEncoding._sample_constraint_columns`` method.
Expected to raise a ``ValueError``, since the condition contains only zeros.
Input:
- Table data (pandas.DataFrame)
Raise:
- ``ValueError``
"""
# Setup
data = pd.DataFrame({
'a': [1, 0] * 5,
'b': [0, 1] * 5,
'c': [0, 0] * 5
})
instance = OneHotEncoding(columns=['a', 'b', 'c'])
instance.fit(data)
# Run
condition = pd.DataFrame({
'a': [0.0] * 10,
'b': [0.0] * 10,
'c': [0.0] * 10
})
# Assert
with pytest.raises(ValueError):
instance._sample_constraint_columns(condition)
def test__sample_constraint_columns_valid_condition(self):
"""Test the ``OneHotEncoding._sample_constraint_columns`` method.
Expected to generate a table where every column satisfies the ``condition``.
Input:
- Table data (pandas.DataFrame)
Output:
- Table satifying the ``condition`` (pandas.DataFrame)
"""
# Setup
data = pd.DataFrame({
'a': [1.0, 0.0] * 5,
'b': [0.0, 1.0] * 5,
'c': [0.0, 0.0] * 5
})
instance = OneHotEncoding(columns=['a', 'b', 'c'])
instance.fit(data)
# Run
condition = pd.DataFrame({
'a': [0.0] * 10,
'b': [1.0] * 10,
'c': [0.0] * 10
})
out = instance._sample_constraint_columns(condition)
# Assert
expected_out = pd.DataFrame({
'a': [0.0] * 10,
'b': [1.0] * 10,
'c': [0.0] * 10
})
pd.testing.assert_frame_equal(expected_out, out)
def test__sample_constraint_columns_one_zero(self):
"""Test the ``OneHotEncoding._sample_constraint_columns`` method.
Since the condition column contains only one zero, expected to randomly sample
from unset columns any valid possibility. Since the ``b`` column in ``data``
contains all the ones, it's expected to return a table where only ``b`` has ones.
Input:
- Table data (pandas.DataFrame)
Output:
- Table where ``b`` is all one`s and other columns are all zero`s (pandas.DataFrame)
"""
# Setup
data = pd.DataFrame({
'a': [0.0, 0.0] * 5,
'b': [1.0, 1.0] * 5,
'c': [0.0, 0.0] * 5
})
instance = OneHotEncoding(columns=['a', 'b', 'c'])
instance.fit(data)
# Run
condition = pd.DataFrame({
'c': [0.0] * 10
})
out = instance._sample_constraint_columns(condition)
# Assert
expected_out = pd.DataFrame({
'c': [0.0] * 10,
'a': [0.0] * 10,
'b': [1.0] * 10
})
pd.testing.assert_frame_equal(expected_out, out)
def test__sample_constraint_columns_one_zero_alt(self):
"""Test the ``OneHotEncoding._sample_constraint_columns`` method.
Since the condition column contains only one zero, expected to randomly sample
from unset columns any valid possibility.
Input:
- Table data (pandas.DataFrame)
Output:
- Table where ``c`` is all zero`s and ``b`` xor ``a`` is always one (pandas.DataFrame)
"""
# Setup
data = pd.DataFrame({
'a': [1.0, 0.0] * 5,
'b': [0.0, 1.0] * 5,
'c': [0.0, 0.0] * 5
})
instance = OneHotEncoding(columns=['a', 'b', 'c'])
instance.fit(data)
# Run
condition = pd.DataFrame({
'c': [0.0] * 10
})
out = instance._sample_constraint_columns(condition)
# Assert
assert (out['c'] == 0.0).all()
assert ((out['b'] == 1.0) ^ (out['a'] == 1.0)).all()
def test_sample_constraint_columns_list_of_conditions(self):
"""Test the ``OneHotEncoding._sample_constraint_columns`` method.
Expected to generate a table satisfying the ``condition``.
Input:
- Table data (pandas.DataFrame)
Output:
- Table satisfying the ``condition`` (pandas.DataFrame)
"""
# Setup
data = pd.DataFrame({
'a': [1.0, 0.0] * 5,
'b': [0.0, 1.0] * 5,
'c': [0.0, 0.0] * 5
})
instance = OneHotEncoding(columns=['a', 'b', 'c'])
instance.fit(data)
# Run
condition = pd.DataFrame({
'a': [0.0, 1.0] * 5,
'c': [0.0, 0.0] * 5
})
out = instance._sample_constraint_columns(condition)
# Assert
expected_output = pd.DataFrame({
'a': [0.0, 1.0] * 5,
'c': [0.0, 0.0] * 5,
'b': [1.0, 0.0] * 5
})
pd.testing.assert_frame_equal(out, expected_output)
def test_sample_constraint_columns_negative_values(self):
"""Test the ``OneHotEncoding._sample_constraint_columns`` method.
Expected to raise a ``ValueError``, since condition is not a one hot vector.
This tests that even if the sum of a row is one it still crashes.
Input:
- Table data (pandas.DataFrame)
Raise:
- ``ValueError``
"""
# Setup
data = pd.DataFrame({
'a': [1.0] * 10,
'b': [-1.0] * 10,
'c': [1.0] * 10
})
instance = OneHotEncoding(columns=['a', 'b', 'c'])
instance.fit(data)
# Run
condition = pd.DataFrame({
'a': [1.0] * 10,
'b': [-1.0] * 10,
'c': [1.0] * 10
})
# Assert
with pytest.raises(ValueError):
instance._sample_constraint_columns(condition)
def test_sample_constraint_columns_all_zeros_but_one(self):
"""Test the ``OneHotEncoding._sample_constraint_columns`` method.
Expected to generate a table where column ``a`` is filled with ones,
and ``b`` and ``c`` filled with zeros.
Input:
- Table data (pandas.DataFrame)
Output:
- Table satisfying the ``condition`` (pandas.DataFrame)
"""
# Setup
data = pd.DataFrame({
'a': [1.0, 0.0] * 5,
'b': [0.0, 1.0] * 5,
'c': [0.0, 0.0] * 5
})
instance = OneHotEncoding(columns=['a', 'b', 'c'])
instance.fit(data)
# Run
condition = pd.DataFrame({
'a': [0.0] * 10,
'c': [0.0] * 10
})
out = instance._sample_constraint_columns(condition)
# Assert
expected_output = pd.DataFrame({
'a': [0.0] * 10,
'c': [0.0] * 10,
'b': [1.0] * 10
})
pd.testing.assert_frame_equal(out, expected_output)
class TestUnique():
def test___init__(self):
"""Test the ``Unique.__init__`` method.
The ``columns`` should be set to those provided and the
``handling_strategy`` should be set to ``'reject_sampling'``.
Input:
- column names to keep unique.
Output:
- Instance with ``columns`` set and ``transform``
and ``reverse_transform`` methods set to ``instance._identity``.
"""
# Run
instance = Unique(columns=['a', 'b'])
# Assert
assert instance.columns == ['a', 'b']
assert instance.fit_columns_model is False
assert instance.transform == instance._identity_with_validation
assert instance.reverse_transform == instance._identity
def test___init__one_column(self):
"""Test the ``Unique.__init__`` method.
The ``columns`` should be set to a list even if a string is
provided.
Input:
- string that is the name of a column.
Output:
- Instance with ``columns`` set to list of one element.
"""
# Run
instance = Unique(columns='a')
# Assert
assert instance.columns == ['a']
def test_is_valid(self):
"""Test the ``Unique.is_valid`` method.
This method should return a pd.Series where the index
of the first occurence of a unique combination of ``instance.columns``
is set to ``True``, and every other occurence is set to ``False``.
Input:
- DataFrame with multiple of the same combinations of columns.
Output:
- Series with the index of the first occurences set to ``True``.
"""
# Setup
instance = Unique(columns=['a', 'b', 'c'])
# Run
data = pd.DataFrame({
'a': [1, 1, 2, 2, 3, 4],
'b': [5, 5, 6, 6, 7, 8],
'c': [9, 9, 10, 10, 12, 13]
})
valid = instance.is_valid(data)
# Assert
expected = pd.Series([True, False, True, False, True, True])
pd.testing.assert_series_equal(valid, expected)
def test_is_valid_custom_index_same_values(self):
"""Test the ``Unique.is_valid`` method.
This method should return a pd.Series where the index
of the first occurence of a unique combination of ``instance.columns``
is set to ``True``, and every other occurence is set to ``False``.
Input:
- DataFrame with multiple of the same combinations of columns.
- DataFrame has a custom index column which is set to 0 for rows.
Output:
- Series with the index of the first occurences set to ``True``.
Github Issue:
- Problem is described in: https://github.com/sdv-dev/SDV/issues/616
"""
# Setup
instance = Unique(columns=['a', 'b', 'c'])
# Run
data = pd.DataFrame({
'a': [1, 1, 2, 2, 3],
'b': [5, 5, 6, 6, 7],
'c': [8, 8, 9, 9, 10]
}, index=[0, 0, 0, 0, 0])
valid = instance.is_valid(data)
# Assert
expected = pd.Series([True, False, True, False, True], index=[0, 0, 0, 0, 0])
pd.testing.assert_series_equal(valid, expected)
def test_is_valid_custom_index_not_sorted(self):
"""Test the ``Unique.is_valid`` method.
This method should return a pd.Series where the index
of the first occurence of a unique combination of ``instance.columns``
is set to ``True``, and every other occurence is set to ``False``.
Input:
- DataFrame with multiple of the same combinations of columns.
- DataFrame has a custom index column which is set in an unsorted way.
Output:
- Series with the index of the first occurences set to ``True``.
Github Issue:
- Problem is described in: https://github.com/sdv-dev/SDV/issues/617
"""
# Setup
instance = Unique(columns=['a', 'b', 'c'])
# Run
data = pd.DataFrame({
'a': [1, 1, 2, 2, 3],
'b': [5, 5, 6, 6, 7],
'c': [8, 8, 9, 9, 10]
}, index=[2, 1, 3, 5, 4])
valid = instance.is_valid(data)
# Assert
expected = pd.Series([True, False, True, False, True], index=[2, 1, 3, 5, 4])
pd.testing.assert_series_equal(valid, expected)
def test_is_valid_one_column_custom_index_not_sorted(self):
"""Test the ``Unique.is_valid`` method.
This method should return a pd.Series where the index
of the first occurence of a unique value of ``self.columns``
is set to ``True``, and every other occurence is set to ``False``.
Input:
- DataFrame with multiple occurences of the same value of the
one column in ``instance.columns``.
- DataFrame has a custom index column which is set in an unsorted way.
Output:
- Series with the index of the first occurences set to ``True``.
Github Issue:
- Problem is described in: https://github.com/sdv-dev/SDV/issues/617
"""
# Setup
instance = Unique(columns='a')
# Run
data = pd.DataFrame({
'a': [1, 1, 1, 2, 3, 2],
'b': [1, 2, 3, 4, 5, 6],
'c': [False, False, True, False, False, True]
}, index=[2, 1, 3, 5, 4, 6])
valid = instance.is_valid(data)
# Assert
expected = pd.Series([True, False, False, True, True, False], index=[2, 1, 3, 5, 4, 6])
pd.testing.assert_series_equal(valid, expected)
def test_is_valid_one_column_custom_index_same_values(self):
"""Test the ``Unique.is_valid`` method.
This method should return a pd.Series where the index
of the first occurence of a unique value of ``self.columns``
is set to ``True``, and every other occurence is set to ``False``.
Input:
- DataFrame with multiple occurences of the same value of the
one column in ``instance.columns``.
- DataFrame has a custom index column which is set to 0 for rows.
Output:
- Series with the index of the first occurences set to ``True``.
Github Issue:
- Problem is described in: https://github.com/sdv-dev/SDV/issues/616
"""
# Setup
instance = Unique(columns='a')
# Run
data = pd.DataFrame({
'a': [1, 1, 1, 2, 3, 2],
'b': [1, 2, 3, 4, 5, 6],
'c': [False, False, True, False, False, True]
}, index=[0, 0, 0, 0, 0, 0])
valid = instance.is_valid(data)
# Assert
expected = pd.Series([True, False, False, True, True, False], index=[0, 0, 0, 0, 0, 0])
pd.testing.assert_series_equal(valid, expected)
def test_is_valid_one_column(self):
"""Test the ``Unique.is_valid`` method.
This method should return a pd.Series where the index
of the first occurence of a unique value of ``self.columns``
is set to ``True``, and every other occurence is set to ``False``.
Input:
- DataFrame with multiple occurences of the same value of the
one column in ``instance.columns``.
Output:
- Series with the index of the first occurences set to ``True``.
"""
# Setup
instance = Unique(columns='a')
# Run
data = pd.DataFrame({
'a': [1, 1, 1, 2, 3, 2],
'b': [1, 2, 3, 4, 5, 6],
'c': [False, False, True, False, False, True]
})
valid = instance.is_valid(data)
# Assert
expected = pd.Series([True, False, False, True, True, False])
pd.testing.assert_series_equal(valid, expected)
def test_is_valid_one_column_nans(self):
"""Test the ``Unique.is_valid`` method for one column with nans.
This method should return a pd.Series where the index
of the first occurence of a unique value of ``instance.columns``
is set to ``True``, and every other occurence is set to ``False``.
``None``, ``np.nan`` and ``float('nan')`` should be treated as the same category.
Input:
- DataFrame with some repeated values, some of which are nan's.
Output:
- Series with the index of the first occurences set to ``True``.
"""
# Setup
instance = Unique(columns=['a'])
# Run
data = pd.DataFrame({
'a': [1, None, 2, np.nan, float('nan'), 1],
'b': [np.nan, 1, None, float('nan'), float('nan'), 1],
})
valid = instance.is_valid(data)
# Assert
expected = pd.Series([True, True, True, False, False, False])
pd.testing.assert_series_equal(valid, expected)
def test_is_valid_multiple_columns_nans(self):
"""Test the ``Unique.is_valid`` method for multiple columns with nans.
This method should return a pd.Series where the index
of the first occurence of a unique combination of ``instance.columns``
is set to ``True``, and every other occurence is set to ``False``.
``None``, ``np.nan`` and ``float('nan')`` should be treated as the same category.
Input:
- DataFrame with multiple of the same combinations of columns, some of which are nan's.
Output:
- Series with the index of the first occurences set to ``True``.
"""
# Setup
instance = Unique(columns=['a', 'b'])
# Run
data = pd.DataFrame({
'a': [1, None, 1, np.nan, float('nan'), 1],
'b': [np.nan, 1, None, float('nan'), float('nan'), 1],
})
valid = instance.is_valid(data)
# Assert
expected = pd.Series([True, True, False, True, False, True])
pd.testing.assert_series_equal(valid, expected)
| 32.836804
| 100
| 0.551699
|
67ff72011a09cb59f5a9600e05429813dd388b81
| 11,138
|
py
|
Python
|
src/opserver/alarmgen_cfg.py
|
UbuntuEvangelist/contrail-controller
|
4e8a992230f8f8e91e4f753e19b5442d9e1b446d
|
[
"Apache-2.0"
] | null | null | null |
src/opserver/alarmgen_cfg.py
|
UbuntuEvangelist/contrail-controller
|
4e8a992230f8f8e91e4f753e19b5442d9e1b446d
|
[
"Apache-2.0"
] | null | null | null |
src/opserver/alarmgen_cfg.py
|
UbuntuEvangelist/contrail-controller
|
4e8a992230f8f8e91e4f753e19b5442d9e1b446d
|
[
"Apache-2.0"
] | 18
|
2017-01-12T09:28:44.000Z
|
2019-04-18T20:47:42.000Z
|
import argparse, os, ConfigParser, sys, re
from pysandesh.sandesh_base import *
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
class CfgParser(object):
def __init__(self, argv):
self._devices = []
self._args = None
self.__pat = None
self._argv = argv or ' '.join(sys.argv[1:])
def parse(self):
'''
command line example
contrail-alarm-gen --log_level SYS_DEBUG
--logging_level DEBUG
--log_category test
--log_file <stdout>
--use_syslog
--syslog_facility LOG_USER
--disc_server_ip 127.0.0.1
--disc_server_port 5998
--worker_id 0
--partitions 5
--redis_password
--http_server_port 5995
--redis_server_port 6379
--redis_uve_list 127.0.0.1:6379
--alarmgen_list 127.0.0.1:0
--kafka_broker_list 127.0.0.1:9092
--zk_list 127.0.0.1:2181
--rabbitmq_server_list 127.0.0.1:5672
--conf_file /etc/contrail/contrail-alarm-gen.conf
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file", action="append",
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(self._argv.split())
defaults = {
'host_ip' : '127.0.0.1',
'collectors' : [],
'kafka_broker_list' : ['127.0.0.1:9092'],
'log_local' : False,
'log_level' : SandeshLevel.SYS_DEBUG,
'log_category' : '',
'log_file' : Sandesh._DEFAULT_LOG_FILE,
'use_syslog' : False,
'syslog_facility' : Sandesh._DEFAULT_SYSLOG_FACILITY,
'http_server_port' : 5995,
'worker_id' : '0',
'partitions' : 15,
'zk_list' : None,
'rabbitmq_server_list' : None,
'rabbitmq_port' : 5672,
'rabbitmq_user' : 'guest',
'rabbitmq_password' : 'guest',
'rabbitmq_vhost' : None,
'rabbitmq_ha_mode' : False,
'redis_uve_list' : ['127.0.0.1:6379'],
'alarmgen_list' : ['127.0.0.1:0'],
'sandesh_send_rate_limit' : SandeshSystem.get_sandesh_send_rate_limit(),
'kafka_prefix' :'',
}
redis_opts = {
'redis_server_port' : 6379,
'redis_password' : None,
}
disc_opts = {
'disc_server_ip' : None,
'disc_server_port' : 5998,
}
keystone_opts = {
'auth_host': '127.0.0.1',
'auth_protocol': 'http',
'auth_port': 35357,
'admin_user': 'user1',
'admin_password': 'password1',
'admin_tenant_name': 'default-domain'
}
config = None
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.optionxform = str
config.read(args.conf_file)
if 'DEFAULTS' in config.sections():
defaults.update(dict(config.items('DEFAULTS')))
if 'REDIS' in config.sections():
redis_opts.update(dict(config.items('REDIS')))
if 'DISCOVERY' in config.sections():
disc_opts.update(dict(config.items('DISCOVERY')))
if 'KEYSTONE' in config.sections():
keystone_opts.update(dict(config.items('KEYSTONE')))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
defaults.update(redis_opts)
defaults.update(disc_opts)
defaults.update(keystone_opts)
parser.set_defaults(**defaults)
parser.add_argument("--host_ip",
help="Host IP address")
parser.add_argument("--collectors",
help="List of Collector IP addresses in ip:port format",
nargs="+")
parser.add_argument(
"--log_file",
help="Filename for the logs to be written to")
parser.add_argument("--log_local", action="store_true",
help="Enable local logging of sandesh messages")
parser.add_argument(
"--log_category",
help="Category filter for local logging of sandesh messages")
parser.add_argument(
"--log_level",
help="Severity level for local logging of sandesh messages")
parser.add_argument("--use_syslog",
action="store_true",
help="Use syslog for logging")
parser.add_argument("--syslog_facility",
help="Syslog facility to receive log lines")
parser.add_argument("--http_server_port", type=int,
help="introspect server port")
parser.add_argument("--worker_id",
help="Worker Id")
parser.add_argument("--partitions", type=int,
help="Number of partitions for hashing UVE keys")
parser.add_argument("--disc_server_ip",
help="Discovery Server IP address")
parser.add_argument("--disc_server_port",
type=int,
help="Discovery Server port")
parser.add_argument("--redis_server_port",
type=int,
help="Redis server port")
parser.add_argument("--redis_password",
help="Redis server password")
parser.add_argument("--kafka_broker_list",
help="List of bootstrap kafka brokers in ip:port format",
nargs="+")
parser.add_argument("--zk_list",
help="List of zookeepers in ip:port format",
nargs="+")
parser.add_argument("--rabbitmq_server_list", type=str,
help="List of Rabbitmq server ip address separated by comma")
parser.add_argument("--rabbitmq_port",
help="Rabbitmq server port")
parser.add_argument("--rabbitmq_user",
help="Username for Rabbitmq")
parser.add_argument("--rabbitmq_password",
help="Password for Rabbitmq")
parser.add_argument("--rabbitmq_vhost",
help="vhost for Rabbitmq")
parser.add_argument("--rabbitmq_ha_mode",
action="store_true",
help="True if the rabbitmq cluster is mirroring all queue")
parser.add_argument("--redis_uve_list",
help="List of redis-uve in ip:port format. For internal use only",
nargs="+")
parser.add_argument("--alarmgen_list",
help="List of alarmgens in ip:inst format. For internal use only",
nargs="+")
parser.add_argument("--sandesh_send_rate_limit", type=int,
help="Sandesh send rate limit in messages/sec")
parser.add_argument("--kafka_prefix",
help="System Prefix for Kafka")
parser.add_argument("--auth_host",
help="ip of keystone server")
parser.add_argument("--auth_protocol",
help="keystone authentication protocol")
parser.add_argument("--auth_port", type=int,
help="ip of keystone server")
parser.add_argument("--admin_user",
help="Name of keystone admin user")
parser.add_argument("--admin_password",
help="Password of keystone admin user")
parser.add_argument("--admin_tenant_name",
help="Tenant name for keystone admin user")
self._args = parser.parse_args(remaining_argv)
if type(self._args.collectors) is str:
self._args.collectors = self._args.collectors.split()
if type(self._args.kafka_broker_list) is str:
self._args.kafka_broker_list= self._args.kafka_broker_list.split()
if type(self._args.zk_list) is str:
self._args.zk_list= self._args.zk_list.split()
if type(self._args.redis_uve_list) is str:
self._args.redis_uve_list = self._args.redis_uve_list.split()
if type(self._args.redis_uve_list) is str:
self._args.alarmgen_list = self._args.alarmgen_list.split()
def _pat(self):
if self.__pat is None:
self.__pat = re.compile(', *| +')
return self.__pat
def _mklist(self, s):
return self._pat().split(s)
def redis_uve_list(self):
return self._args.redis_uve_list
def alarmgen_list(self):
return self._args.alarmgen_list
def discovery(self):
return {'server':self._args.disc_server_ip,
'port':self._args.disc_server_port }
def collectors(self):
return self._args.collectors
def kafka_broker_list(self):
return self._args.kafka_broker_list
def zk_list(self):
return self._args.zk_list;
def log_local(self):
return self._args.log_local
def log_category(self):
return self._args.log_category
def log_level(self):
return self._args.log_level
def log_file(self):
return self._args.log_file
def use_syslog(self):
return self._args.use_syslog
def syslog_facility(self):
return self._args.syslog_facility
def http_port(self):
return self._args.http_server_port
def worker_id(self):
return self._args.worker_id
def partitions(self):
return self._args.partitions
def redis_password(self):
return self._args.redis_password
def redis_server_port(self):
return self._args.redis_server_port
def host_ip(self):
return self._args.host_ip
def sandesh_send_rate_limit(self):
return self._args.sandesh_send_rate_limit
def kafka_prefix(self):
return self._args.kafka_prefix
def rabbitmq_params(self):
return {'servers': self._args.rabbitmq_server_list,
'port': self._args.rabbitmq_port,
'user': self._args.rabbitmq_user,
'password': self._args.rabbitmq_password,
'vhost': self._args.rabbitmq_vhost,
'ha_mode': self._args.rabbitmq_ha_mode}
def keystone_params(self):
return {'auth_host': self._args.auth_host,
'auth_protocol': self._args.auth_protocol,
'auth_port': self._args.auth_port,
'admin_user': self._args.admin_user,
'admin_password': self._args.admin_password,
'admin_tenant_name': self._args.admin_tenant_name}
| 38.406897
| 84
| 0.580356
|
99cdaeea0abf585ce66f2e7d98a1778de9913ce6
| 2,702
|
py
|
Python
|
Server/recipe/tool.py
|
it-intensive-programming2/HCI_project
|
1530c181ae2bad62bd216aa851cea60ba6594738
|
[
"MIT"
] | 2
|
2021-08-05T02:33:17.000Z
|
2021-08-19T17:10:57.000Z
|
Server/recipe/tool.py
|
it-intensive-programming2/HCI_project
|
1530c181ae2bad62bd216aa851cea60ba6594738
|
[
"MIT"
] | 1
|
2021-01-04T01:39:02.000Z
|
2021-01-04T01:39:02.000Z
|
Server/recipe/tool.py
|
it-intensive-programming2/recipe_helper
|
1530c181ae2bad62bd216aa851cea60ba6594738
|
[
"MIT"
] | null | null | null |
import os
import sys
import urllib.request
import json
from fatsecret import Fatsecret
from tqdm import tqdm
import pandas as pd
import matplotlib.pyplot as plt
from wordcloud import WordCloud
import numpy as np
import base64
from PIL import Image
import cv2
from collections import Counter
# 네이버 파파고 번역 api
def translate(word):
client_id = "l84GX3EyER6GPhqOznzE"
client_secret = "Rb1fwwcZSc"
encText = urllib.parse.quote(word)
data = "source=ko&target=en&text=" + encText
url = "https://openapi.naver.com/v1/papago/n2mt"
request = urllib.request.Request(url)
request.add_header("X-Naver-Client-Id", client_id)
request.add_header("X-Naver-Client-Secret", client_secret)
response = urllib.request.urlopen(request, data=data.encode("utf-8"))
rescode = response.getcode()
if rescode == 200:
response_body = response.read().decode("utf-8")
tmp = json.loads(response_body)
return tmp["message"]["result"]["translatedText"]
else:
print("Error Code:" + rescode)
def getCalorie(recipeID):
fs = Fatsecret(
"9b00b592903147709dc78f969353a598", "d2d2abbf973d453388512a33dd036f4a"
)
df = pd.read_csv("/var/www/Appserver3/recipe/cat2_eng.csv")
word = df[df.id == int(recipeID)].cat2.values[0]
# 영어로 바꿔서
search_target = translate(word)
# fatsecret에 검색
foods = fs.foods_search(search_target)
# 검색 결과가 많아 검색결과중 첫번째를 선택
nut = foods[0]["food_description"]
nut = nut.split("|")
nut[0] = nut[0].split("-")[1]
nutrient = {}
for b in nut:
i = b.split(":")
i[0] = i[0].strip()
nutrient[i[0]] = i[1].strip()
# # 칼로리, 탄수, 지방, 단백질
# print(nutrient["Calories"])
# print(nutrient["Carbs"])
# print(nutrient["Fat"])
# print(nutrient["Protein"])
return [
nutrient["Calories"],
nutrient["Carbs"],
nutrient["Fat"],
nutrient["Protein"],
]
def generateWordCloud(word_list, userID):
image_dir = "/var/www/Appserver3/recipe/wc_image/" + userID + ".png"
word_list = Counter(word_list)
mask = np.array(Image.open("/var/www/Appserver3/recipe/chef.png"))
wc = WordCloud(
font_path="/var/www/Appserver3/recipe/NanumSquareRoundR.ttf",
background_color="rgba(255, 255, 255, 0)",
mode="RGBA",
max_words=2000,
mask=mask,
max_font_size=100,
)
wc = wc.generate_from_frequencies(word_list)
wc.to_file(image_dir)
img_str = ""
with open(image_dir, "rb") as img:
img_str = base64.b64encode(img.read())
with open("/var/www/Appserver3/recipe/wc_image/" + userID + ".txt", "w") as f:
f.write(str(img_str))
| 26.752475
| 82
| 0.641377
|
eb558aa16320e9940f99909f5e55630c3cace359
| 2,014
|
py
|
Python
|
tests/providers/google/suite/operators/test_sheets.py
|
troywinter/airflow
|
ba66ba0d97941c55d9f00f66329a9d3c7ad673e7
|
[
"Apache-2.0"
] | 3
|
2015-08-25T13:56:44.000Z
|
2020-03-21T10:26:58.000Z
|
tests/providers/google/suite/operators/test_sheets.py
|
troywinter/airflow
|
ba66ba0d97941c55d9f00f66329a9d3c7ad673e7
|
[
"Apache-2.0"
] | 37
|
2020-07-21T07:50:02.000Z
|
2022-03-29T22:31:28.000Z
|
tests/providers/google/suite/operators/test_sheets.py
|
troywinter/airflow
|
ba66ba0d97941c55d9f00f66329a9d3c7ad673e7
|
[
"Apache-2.0"
] | 4
|
2020-07-17T14:02:28.000Z
|
2022-02-23T04:29:58.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mock
from airflow.providers.google.suite.operators.sheets import GoogleSheetsCreateSpreadsheetOperator
GCP_CONN_ID = "test"
SPREADSHEET_URL = "https://example/sheets"
SPREADSHEET_ID = "1234567890"
class TestGoogleSheetsCreateSpreadsheet:
@mock.patch("airflow.providers.google.suite.operators.sheets.GSheetsHook")
@mock.patch(
"airflow.providers.google.suite.operators.sheets.GoogleSheetsCreateSpreadsheetOperator.xcom_push"
)
def test_execute(self, mock_xcom, mock_hook):
context = {}
spreadsheet = mock.MagicMock()
mock_hook.return_value.create_spreadsheet.return_value = {
"spreadsheetId": SPREADSHEET_ID,
"spreadsheetUrl": SPREADSHEET_URL,
}
op = GoogleSheetsCreateSpreadsheetOperator(
task_id="test_task", spreadsheet=spreadsheet, gcp_conn_id=GCP_CONN_ID
)
op.execute(context)
mock_hook.return_value.create_spreadsheet.assert_called_once_with(
spreadsheet=spreadsheet
)
calls = [
mock.call(context, "spreadsheet_id", SPREADSHEET_ID),
mock.call(context, "spreadsheet_url", SPREADSHEET_URL),
]
mock_xcom.has_calls(calls)
| 38
| 105
| 0.727408
|
ddb90783f9e38ba7b032eac08f569ba19c0085cb
| 720
|
py
|
Python
|
app/core/tests/test_commands.py
|
tsiaGeorge/recipe-app-api
|
eed9387aafb6f3efb169ff93e315a59baec8752f
|
[
"MIT"
] | null | null | null |
app/core/tests/test_commands.py
|
tsiaGeorge/recipe-app-api
|
eed9387aafb6f3efb169ff93e315a59baec8752f
|
[
"MIT"
] | 11
|
2020-02-11T23:38:02.000Z
|
2022-02-10T07:14:08.000Z
|
app/core/tests/test_commands.py
|
tsiaGeorge/recipe-app-api
|
eed9387aafb6f3efb169ff93e315a59baec8752f
|
[
"MIT"
] | null | null | null |
from unittest.mock import patch
from django.core.management import call_command
from django.db import OperationalError
from django.test import TestCase
class CommandTests(TestCase):
@patch('django.db.utils.ConnectionHandler.__getitem__', return_value=True)
def test_wait_for_db_ready(self, gi):
gi.return_value = True
call_command('wait_for_db')
self.assertEqual(gi.call_count, 1)
@patch('time.sleep', return_value=True)
@patch('django.db.utils.ConnectionHandler.__getitem__',
side_effect=[OperationalError] * 5 + [True])
def test_wait_for_db_ready_after_5_attempts(self, gi, ts):
call_command('wait_for_db')
self.assertEqual(gi.call_count, 6)
| 32.727273
| 78
| 0.731944
|
670cbfd0185dcaef0fd19fdc1d58f30afd4cd157
| 1,232
|
py
|
Python
|
sscutils/tests/test_validation.py
|
papsebestyen/sscutils
|
dff8b62ab31c9dfe1494264f9319e287945762bc
|
[
"MIT"
] | null | null | null |
sscutils/tests/test_validation.py
|
papsebestyen/sscutils
|
dff8b62ab31c9dfe1494264f9319e287945762bc
|
[
"MIT"
] | 21
|
2021-09-15T15:31:22.000Z
|
2022-03-20T17:10:50.000Z
|
sscutils/tests/test_validation.py
|
papsebestyen/sscutils
|
dff8b62ab31c9dfe1494264f9319e287945762bc
|
[
"MIT"
] | 2
|
2021-09-08T14:12:00.000Z
|
2021-09-29T10:58:08.000Z
|
import pytest
from sscutils.artifact_context import ArtifactContext
from sscutils.exceptions import NotAnArtifactException
from sscutils.utils import cd_into
from sscutils.validation_functions import is_repo_name, is_step_name
@pytest.mark.parametrize(
"repo_name,is_valid",
[
("abc", True),
("abc123", False),
("abc-a", True),
("abc--a", False),
("-abc", False),
("abc-", False),
],
)
def test_repo_name_valid(repo_name, is_valid):
if is_valid:
is_repo_name(repo_name)
else:
with pytest.raises(NameError):
is_repo_name(repo_name)
@pytest.mark.parametrize(
"step_name,is_valid",
[
("abc", True),
("abc123", False),
("abc_a", True),
("abc__a", False),
("_abc", False),
("abc_", False),
],
)
def test_step_name_valid(step_name, is_valid):
if is_valid:
is_step_name(step_name)
else:
with pytest.raises(NameError):
is_step_name(step_name)
def test_failing_config(tmp_path):
with cd_into(tmp_path):
with pytest.raises(NotAnArtifactException):
ArtifactContext()
def test_missing_fk(tmp_path):
# TODO
pass
| 22
| 68
| 0.62013
|
902118645f30e4b792be4fd623738f85b904fdca
| 116
|
py
|
Python
|
library/admin.py
|
Chenthan/Onlinelibrary
|
69c76d48702276ba23fbe229ec68db4ef571fef5
|
[
"BSD-3-Clause"
] | null | null | null |
library/admin.py
|
Chenthan/Onlinelibrary
|
69c76d48702276ba23fbe229ec68db4ef571fef5
|
[
"BSD-3-Clause"
] | null | null | null |
library/admin.py
|
Chenthan/Onlinelibrary
|
69c76d48702276ba23fbe229ec68db4ef571fef5
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib import admin
from .models import reader
# Register your models here.
admin.site.register(reader)
| 29
| 32
| 0.818966
|
ed00ab1478a55d8ff5477fc1e634512d4fc1fb1d
| 7,243
|
py
|
Python
|
torchmetrics/utilities/data.py
|
pranjaldatta/metrics
|
078671fa0b28d7d0a9c505b077d9acc8ac28a69d
|
[
"Apache-2.0"
] | 1
|
2021-05-09T16:11:48.000Z
|
2021-05-09T16:11:48.000Z
|
torchmetrics/utilities/data.py
|
pranjaldatta/metrics
|
078671fa0b28d7d0a9c505b077d9acc8ac28a69d
|
[
"Apache-2.0"
] | null | null | null |
torchmetrics/utilities/data.py
|
pranjaldatta/metrics
|
078671fa0b28d7d0a9c505b077d9acc8ac28a69d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, List, Mapping, Optional, Sequence, Union
import torch
from torch import Tensor, tensor
from torchmetrics.utilities.prints import rank_zero_warn
METRIC_EPS = 1e-6
def dim_zero_cat(x: Union[Tensor, List[Tensor]]) -> Tensor:
x = x if isinstance(x, (list, tuple)) else [x]
x = [y.unsqueeze(0) if y.numel() == 1 and y.ndim == 0 else y for y in x]
return torch.cat(x, dim=0)
def dim_zero_sum(x: Tensor) -> Tensor:
return torch.sum(x, dim=0)
def dim_zero_mean(x: Tensor) -> Tensor:
return torch.mean(x, dim=0)
def _flatten(x):
return [item for sublist in x for item in sublist]
def to_onehot(
label_tensor: Tensor,
num_classes: Optional[int] = None,
) -> Tensor:
"""
Converts a dense label tensor to one-hot format
Args:
label_tensor: dense label tensor, with shape [N, d1, d2, ...]
num_classes: number of classes C
Returns:
A sparse label tensor with shape [N, C, d1, d2, ...]
Example:
>>> x = torch.tensor([1, 2, 3])
>>> to_onehot(x)
tensor([[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
"""
if num_classes is None:
num_classes = int(label_tensor.max().detach().item() + 1)
tensor_onehot = torch.zeros(
label_tensor.shape[0],
num_classes,
*label_tensor.shape[1:],
dtype=label_tensor.dtype,
device=label_tensor.device,
)
index = label_tensor.long().unsqueeze(1).expand_as(tensor_onehot)
return tensor_onehot.scatter_(1, index, 1.0)
def select_topk(prob_tensor: Tensor, topk: int = 1, dim: int = 1) -> Tensor:
"""
Convert a probability tensor to binary by selecting top-k highest entries.
Args:
prob_tensor: dense tensor of shape ``[..., C, ...]``, where ``C`` is in the
position defined by the ``dim`` argument
topk: number of highest entries to turn into 1s
dim: dimension on which to compare entries
Returns:
A binary tensor of the same shape as the input tensor of type torch.int32
Example:
>>> x = torch.tensor([[1.1, 2.0, 3.0], [2.0, 1.0, 0.5]])
>>> select_topk(x, topk=2)
tensor([[0, 1, 1],
[1, 1, 0]], dtype=torch.int32)
"""
zeros = torch.zeros_like(prob_tensor)
topk_tensor = zeros.scatter(dim, prob_tensor.topk(k=topk, dim=dim).indices, 1.0)
return topk_tensor.int()
def to_categorical(tensor: Tensor, argmax_dim: int = 1) -> Tensor:
"""
Converts a tensor of probabilities to a dense label tensor
Args:
tensor: probabilities to get the categorical label [N, d1, d2, ...]
argmax_dim: dimension to apply
Return:
A tensor with categorical labels [N, d2, ...]
Example:
>>> x = torch.tensor([[0.2, 0.5], [0.9, 0.1]])
>>> to_categorical(x)
tensor([1, 0])
"""
return torch.argmax(tensor, dim=argmax_dim)
def get_num_classes(
preds: Tensor,
target: Tensor,
num_classes: Optional[int] = None,
) -> int:
"""
Calculates the number of classes for a given prediction and target tensor.
Args:
preds: predicted values
target: true labels
num_classes: number of classes if known
Return:
An integer that represents the number of classes.
"""
num_target_classes = int(target.max().detach().item() + 1)
num_pred_classes = int(preds.max().detach().item() + 1)
num_all_classes = max(num_target_classes, num_pred_classes)
if num_classes is None:
num_classes = num_all_classes
elif num_classes != num_all_classes:
rank_zero_warn(
f"You have set {num_classes} number of classes which is"
f" different from predicted ({num_pred_classes}) and"
f" target ({num_target_classes}) number of classes",
RuntimeWarning,
)
return num_classes
def apply_to_collection(
data: Any,
dtype: Union[type, tuple],
function: Callable,
*args,
wrong_dtype: Optional[Union[type, tuple]] = None,
**kwargs,
) -> Any:
"""
Recursively applies a function to all elements of a certain dtype.
Args:
data: the collection to apply the function to
dtype: the given function will be applied to all elements of this dtype
function: the function to apply
*args: positional arguments (will be forwarded to calls of ``function``)
wrong_dtype: the given function won't be applied if this type is specified and the given collections is of
the :attr:`wrong_type` even if it is of type :attr`dtype`
**kwargs: keyword arguments (will be forwarded to calls of ``function``)
Returns:
the resulting collection
Example:
>>> apply_to_collection(torch.tensor([8, 0, 2, 6, 7]), dtype=Tensor, function=lambda x: x ** 2)
tensor([64, 0, 4, 36, 49])
>>> apply_to_collection([8, 0, 2, 6, 7], dtype=int, function=lambda x: x ** 2)
[64, 0, 4, 36, 49]
>>> apply_to_collection(dict(abc=123), dtype=int, function=lambda x: x ** 2)
{'abc': 15129}
"""
elem_type = type(data)
# Breaking condition
if isinstance(data, dtype) and (wrong_dtype is None or not isinstance(data, wrong_dtype)):
return function(data, *args, **kwargs)
# Recursively apply to collection items
if isinstance(data, Mapping):
return elem_type({k: apply_to_collection(v, dtype, function, *args, **kwargs) for k, v in data.items()})
if isinstance(data, tuple) and hasattr(data, '_fields'): # named tuple
return elem_type(*(apply_to_collection(d, dtype, function, *args, **kwargs) for d in data))
if isinstance(data, Sequence) and not isinstance(data, str):
return elem_type([apply_to_collection(d, dtype, function, *args, **kwargs) for d in data])
# data is neither of dtype, nor a collection
return data
def get_group_indexes(indexes: Tensor) -> List[Tensor]:
"""
Given an integer `torch.Tensor` `indexes`, return a `torch.Tensor` of indexes for
each different value in `indexes`.
Args:
indexes: a `torch.Tensor`
Return:
A list of integer `torch.Tensor`s
Example:
>>> indexes = torch.tensor([0, 0, 0, 1, 1, 1, 1])
>>> get_group_indexes(indexes)
[tensor([0, 1, 2]), tensor([3, 4, 5, 6])]
"""
res = dict()
for i, _id in enumerate(indexes):
_id = _id.item()
if _id in res:
res[_id] += [i]
else:
res[_id] = [i]
return [tensor(x, dtype=torch.long) for x in res.values()]
| 31.354978
| 114
| 0.627226
|
f6db116d38364861f5c10d0885c6e59f65964bea
| 7,247
|
py
|
Python
|
pson.py
|
antonioluna/proyecto_JSON
|
b413a2e699116f17cba60d6966001d5876494e19
|
[
"Unlicense"
] | null | null | null |
pson.py
|
antonioluna/proyecto_JSON
|
b413a2e699116f17cba60d6966001d5876494e19
|
[
"Unlicense"
] | null | null | null |
pson.py
|
antonioluna/proyecto_JSON
|
b413a2e699116f17cba60d6966001d5876494e19
|
[
"Unlicense"
] | null | null | null |
#-*- coding: utf-8 -*-
import json
#Obtiene los codigos de los distritos
def recorrer(dato1, dato2):
datos = {}
for x in educacionr:
datos[x[dato1]] = x[dato2]
return datos
#Obtiene las instituciones por distrito
def instituciones(codigo):
datos = {}
for x in educacionr:
if x[cod_dist] == codigo:
datos[x[cod_est]] = x[nom_inst]
return datos
#Obtiene el gestor de una institucion en concreto
def gestor(cod):
datos = []
gestor = []
institucion = []
for x in educacionr:
if x[cod_est] == cod:
gestor.extend([x[cod_dept], x[nom_dept]])
institucion.extend([x[cod_inst], x[nom_inst]])
datos.extend([gestor])
datos.extend([institucion])
return datos
#Función que obtiene el tipo de gestión de las instituciones de un distrito
def tipo(codigo):
datos = [""]
publica = {}
privada = {}
concertada = {}
for x in educacionr:
datos[0] = x[nom_dist]
if x[cod_dist] == codigo:
if x[gestion] == "OFICIAL":
publica[x[cod_est]] = x[nom_inst]
if x[gestion] == "PRIVADA":
privada[x[cod_est]] = x[nom_inst]
if x[gestion] == "PRIV.SUBVENCIONADA":
concertada[x[cod_est]] = x[nom_inst]
datos.extend([publica])
datos.extend([privada])
datos.extend([concertada])
return datos
##########################################################
# #
# Abriendo fichero json #
# #
##########################################################
with open("./DATA/educacion.json", "r") as educacion:
educacionr = json.load(educacion)
##########################################################
# #
# Valores fichero json #
# #
##########################################################
cod_dept = "codigo_departamento"
cod_dist = "codigo_distrito"
cod_est = "codigo_establecimiento"
cod_inst = "codigo_institucion"
cod_zona = "codigo_zona"
nom_barrio = "nombre_barrio_localidad"
nom_dept = "nombre_departamento"
nom_dist = "nombre_distrito"
nom_inst = "nombre_institucion"
nom_zona = "nombre_zona"
gestion = "sector_o_tipo_gestion"
##########################################################
# #
# Menú principal #
# #
##########################################################
print "\nBienvenido a mi proyecto de JSON"
while True:
print "\n¿Que quieres hacer?\n\n1 Listar distritos\n2 Listar de \
instituciones de un distrito\n3 Ver el departamento gestor de una \
institución\n4 Ver la cantidad de instituciones privadas / públicas de un \
distrito\n5 Ver información detallada sobre una institución en concreto\n6 \
Salir"
opcion = raw_input("\nElija opción: ")
##########################################################
# #
# Impresión de lista de distritos #
# #
##########################################################
if opcion == "1":
distritos = recorrer(cod_dist, nom_dist)
print "\nCód --- Nombre dst\n"
for x in distritos:
print x + " --- " + distritos[x]
##########################################################
# #
# Impresión de lista de instituciones #
# #
##########################################################
if opcion == "2":
elec = raw_input("\nIntroduzca el código del distrito: ")
insti = instituciones(elec)
print "En el distrito %s se encuentran las siguientes instituciones:"\
% (elec)
print "\nCód --- Nombre dst\n"
for x in insti:
print x + " --- " + insti[x]
if opcion == "3":
cod = raw_input("\nIntroduzca el código de la institución: ")
gest = gestor(cod)
print "\nInstitución: %s - %s\nGestor: %s - %s"\
% (str(gest[1][0]), str(gest[1][1]),
str(gest[0][0]), str(gest[0][1]))
##########################################################
# #
# Cantidad pública/privada #
# #
##########################################################
if opcion == "4":
num = raw_input("\nIntroduzca el código del distrito: ")
ppc = tipo(num)
print "Distrito: %s --- %s:" % (num, ppc[0])
print "\nInstituciones Oficiales: %d\n" % (len(ppc[1]))
print " Cod --- Nombre Ins."
for ofi in ppc[1]:
print ofi + " --- " + ppc[1][ofi]
print "\nInstituciones Privadas: %d" % (len(ppc[2]))
print " Cod --- Nombre Ins."
for pri in ppc[2]:
print pri + " --- " + ppc[2][pri]
print "\nInstituciones Concertadas: %d" % (len(ppc[3]))
print " Cod --- Nombre Ins."
for con in ppc[3]:
print con + " --- " + ppc[3][con]
##########################################################
# #
# Información detallada de institución #
# #
##########################################################
if opcion == "5":
num = raw_input("\nIntroduzca el código de la institución: ")
for institucion in educacionr:
if institucion[cod_est] == num:
print "\nCódigo de distrito: %s"\
% (str(institucion[cod_dist]))
print "Nombre de distrito: %s"\
% (str(institucion[nom_dist]))
print "Tipo de gestión: %s"\
% (str(institucion[gestion]))
print "Código de departamento gestor: %s"\
% (str(institucion[cod_dept]))
print "Nombre de departamento gestor: %s"\
% (str(institucion[nom_dept]))
print "Código de institucion gestora: %s"\
% (str(institucion[cod_inst]))
print "Nombre de institución: %s"\
% (str(institucion[nom_inst]))
print "Código de institución: %s"\
% (str(institucion[cod_est]))
print "Código de zona: %s"\
% (str(institucion[cod_zona]))
print "Zona: %s"\
% (str(institucion[nom_zona]))
print "Nombre del barrio: %s"\
% (str(institucion[nom_barrio]))
if opcion == "6":
print "\n\nAdiós\n\n"
break
| 35.699507
| 78
| 0.412723
|
cdfc8c420400dfac25a6e569fe313402509af689
| 11,149
|
py
|
Python
|
Raws-Maji/Lapis ReLiGHTs [BD]/lapis_mv02.py
|
Ichunjo/encode-script
|
389a9f497e637eaade6f99acee816636856961d4
|
[
"MIT"
] | 36
|
2019-11-08T20:50:07.000Z
|
2022-03-23T05:43:55.000Z
|
Raws-Maji/Lapis ReLiGHTs [BD]/lapis_mv02.py
|
Ichunjo/encode-script
|
389a9f497e637eaade6f99acee816636856961d4
|
[
"MIT"
] | 1
|
2019-11-08T21:26:16.000Z
|
2019-11-08T21:26:16.000Z
|
Raws-Maji/Lapis ReLiGHTs [BD]/lapis_mv02.py
|
Ichunjo/encode-script
|
389a9f497e637eaade6f99acee816636856961d4
|
[
"MIT"
] | 7
|
2019-11-08T21:10:47.000Z
|
2022-03-28T21:57:04.000Z
|
"""Lapis script"""
__author__ = 'Vardë'
import os
import sys
import shlex
import subprocess
from typing import NamedTuple, Optional, Dict, Any
from pathlib import Path
from functools import partial
from adptvgrnMod import adptvgrnMod
import vardefunc as vdf
import muvsfunc as muvf
import mvsfunc as mvf
import G41Fun as gf
from vsutil import depth, get_y, iterate, get_w, insert_clip
import lvsfunc as lvf
import vapoursynth as vs
core = vs.core
core.num_threads = 12
class InfosBD(NamedTuple):
path: str
src: str
src_clip: vs.VideoNode
frame_start: int
frame_end: int
src_cut: vs.VideoNode
a_src: str
a_src_cut: str
a_enc_cut: str
name: str
output: str
chapter: str
output_final: str
def infos_bd(path, frame_start, frame_end) -> InfosBD:
src = path + '.m2ts'
src_clip = lvf.src(src, stream_index=0, ff_loglevel=4)
src_cut = src_clip[frame_start:frame_end] if (frame_start or frame_end) else src_clip
a_src = path + '_track_{}.wav'
a_src_cut = path + '_cut_track_{}.wav'
a_enc_cut = path + '_track_{}.m4a'
name = Path(sys.argv[0]).stem
output = name + '.265'
chapter = 'chapters/' + name + '.txt'
output_final = name + '.mkv'
return InfosBD(path, src, src_clip, frame_start, frame_end,
src_cut, a_src, a_src_cut, a_enc_cut,
name, output, chapter, output_final)
JPBD = infos_bd(r'[BDMV][201223][GNXA-2293][Lapis_Re_LiGHTs][vol.3 Fin]\LAPIS_RE_LIGHTS_3\BDMV\STREAM\00018', 24, 2477)
def hybrid_denoise(clip: vs.VideoNode, knlm_h: float = 0.5, sigma: float = 2,
knlm_args: Optional[Dict[str, Any]] = None,
bm3d_args: Optional[Dict[str, Any]] = None)-> vs.VideoNode:
"""Denoise luma with BM3D and chroma with knlmeansCL
Args:
clip (vs.VideoNode): Source clip.
knlm_h (float, optional): h parameter in knlm.KNLMeansCL. Defaults to 0.5.
sigma (float, optional): Sigma parameter in mvf.BM3D. Defaults to 2.
knlm_args (Optional[Dict[str, Any]], optional): Optional extra arguments for knlm.KNLMeansCL. Defaults to None.
bm3d_args (Optional[Dict[str, Any]], optional): Optional extra arguments for mvf.BM3D. Defaults to None.
Returns:
vs.VideoNode: Denoised clip
"""
knargs = dict(a=2, d=3, device_type='gpu', device_id=0, channels='UV')
if knlm_args is not None:
knargs.update(knlm_args)
b3args = dict(radius1=1, profile1='fast')
if bm3d_args is not None:
b3args.update(bm3d_args)
luma = get_y(clip)
luma = mvf.BM3D(luma, sigma, **b3args)
chroma = core.knlm.KNLMeansCL(clip, h=knlm_h, **knargs)
return vdf.merge_chroma(luma, chroma)
def eedi3_upscale(clip: vs.VideoNode, correct_shift: bool = True)-> vs.VideoNode:
nnargs: Dict[str, Any] = dict(nsize=4, nns=4, qual=2, etype=1, pscrn=1)
eeargs: Dict[str, Any] = dict(alpha=0.2, beta=0.25, gamma=1000, nrad=2, mdis=20)
clip = clip.std.Transpose()
clip = clip.eedi3m.EEDI3(0, True, sclip=clip.nnedi3.nnedi3(0, True, **nnargs), **eeargs)
clip = clip.std.Transpose()
clip = clip.eedi3m.EEDI3(0, True, sclip=clip.nnedi3.nnedi3(0, True, **nnargs), **eeargs)
return core.resize.Bicubic(clip, src_top=.5, src_left=.5) if correct_shift else clip
def sraa_eedi3(clip: vs.VideoNode, rep: Optional[int] = None, **eedi3_args: Any)-> vs.VideoNode:
"""Drop half the field with eedi3+nnedi3 and interpolate them.
Args:
clip (vs.VideoNode): Source clip.
rep (Optional[int], optional): Repair mode. Defaults to None.
Returns:
vs.VideoNode: AA'd clip
"""
nnargs: Dict[str, Any] = dict(nsize=0, nns=3, qual=1)
eeargs: Dict[str, Any] = dict(alpha=0.2, beta=0.6, gamma=40, nrad=2, mdis=20)
eeargs.update(eedi3_args)
eedi3_fun, nnedi3_fun = core.eedi3m.EEDI3, core.nnedi3cl.NNEDI3CL
flt = core.std.Transpose(clip)
flt = eedi3_fun(flt, 0, False, sclip=nnedi3_fun(flt, 0, False, False, **nnargs), **eeargs)
flt = core.std.Transpose(flt)
flt = eedi3_fun(flt, 0, False, sclip=nnedi3_fun(flt, 0, False, False, **nnargs), **eeargs)
if rep:
flt = core.rgsf.Repair(flt, clip, rep)
return flt
def do_filter():
"""Vapoursynth filtering"""
src = JPBD.src_cut
src = depth(src, 16)
out = src
h = 846
w = get_w(h)
cubic_filters = ['catrom', 'mitchell', 'robidoux', 'robidoux sharp']
cubic_filters = [vdf.get_bicubic_params(cf) for cf in cubic_filters]
degrain = hybrid_denoise(out, 0.35, 1.2, dict(a=2, d=1))
out = degrain
y = get_y(out)
y32 = depth(y, 32)
lineart = vdf.edge_detect(y32, 'kirsch', 0.055, (1, 1)).std.Median().std.Inflate()
descale_clips = [core.descale.Debicubic(y32, w, h, b, c) for b, c in cubic_filters]
descale = core.std.Expr(descale_clips, 'x y z a min min min x y z max max min')
conv = core.std.Convolution(descale, [1, 2, 1, 2, 0, 2, 1, 2, 1])
thr, coef = 0.013, 3
descale_fix = core.std.Expr([descale, conv], f'x y - abs {thr} < y x ?').std.PlaneStats()
adapt_mask = core.adg.Mask(descale_fix, 12).std.Invert().std.Expr(f'x 0.5 - {coef} * 0.5 + 0 max 1 min')
descale = core.std.MaskedMerge(descale, descale_fix, adapt_mask)
upscale = vdf.fsrcnnx_upscale(descale, w*2, h*2, r'shaders\FSRCNNX_x2_56-16-4-1.glsl', upscaler_smooth=eedi3_upscale,
profile='zastin', sharpener=partial(gf.DetailSharpen, sstr=1.25, power=4))
aa_strong = sraa_eedi3(upscale, 13, alpha=0.3, beta=0.5, gamma=40)
aa = aa_strong
down = muvf.SSIM_downsample(aa, src.width, src.height, filter_param_a=0, filter_param_b=0)
upscale = depth(
core.std.MaskedMerge(y32, down, lineart), 16
)
merged = vdf.merge_chroma(upscale, out)
out = merged
y = get_y(out)
detail_light_mask = lvf.denoise.detail_mask(y, brz_a=2500, brz_b=1200)
pf = iterate(out, core.std.Maximum, 2).std.Convolution([10] * 9, planes=0)
diff = core.std.MakeDiff(out, pf)
deband = core.f3kdb.Deband(pf, 17, 36, 36, 36, 12, 12, 2, keep_tv_range=True, output_depth=16)
deband = core.std.MergeDiff(deband, diff)
deband = core.std.MaskedMerge(deband, out, detail_light_mask)
out = deband
grain = adptvgrnMod(out, 0.25, 0.15, size=out.height/h, sharp=80, luma_scaling=10, static=True)
out = grain
# # Restore 1080p stuff
ref = src
rescale_mask = vdf.drm(ref, h, mthr=65, sw=4, sh=4)
credit = out
credit = core.std.MaskedMerge(credit, ref, rescale_mask, 0)
credit = lvf.rfs(credit, ref, [(719, 806), (2357, src.num_frames-1)])
out = credit
return depth(out, 10).std.Limiter(16<<2, [235<<2, 240<<2])
def sec_to_time(secs):
hours = secs / 3600
minutes = (secs % 3600) / 60
secs = secs % 60
return "%02d:%02d:%05.4f" % (hours, minutes, secs)
def do_encode(clip):
"""Compression with x26X"""
# vdf.generate_keyframes(JPBD.src_cut, JPBD.name + '_keyframes.txt')
if not os.path.isfile(JPBD.output):
print('\n\n\nVideo encoding')
# bits = clip.format.bits_per_sample
# x265_cmd = f'x265 -o {JPBD.output} - --y4m' + ' '
# x265_cmd += f'--csv {JPBD.name}_log_x265.csv --csv-log-level 2' + ' '
# x265_cmd += '--preset veryslow' + ' '
# x265_cmd += f'--frames {clip.num_frames} --fps 24000/1001 --output-depth 10' + ' '
# x265_cmd += '--high-tier --ref 6' + ' '
# x265_cmd += '--rd 6 --ctu 64 --min-cu-size 8 --limit-refs 0 --no-limit-modes --rect --amp --no-early-skip --rskip 0 --tu-intra-depth 3 --tu-inter-depth 3 --rd-refine --rdoq-level 2 --limit-tu 0' + ' '
# x265_cmd += '--max-merge 5 --me star --subme 7 --merange 57 --weightb' + ' '
# x265_cmd += '--no-strong-intra-smoothing' + ' '
# x265_cmd += '--psy-rd 2.0 --psy-rdoq 1.5 --no-open-gop --keyint 360 --min-keyint 24 --scenecut 45 --rc-lookahead 120 --b-adapt 2 --bframes 16' + ' '
# x265_cmd += '--crf 15 --aq-mode 3 --aq-strength 1.0 --cutree --qcomp 0.70' + ' '
# x265_cmd += '--deblock=-1:-1 --no-sao --no-sao-non-deblock' + ' '
# x265_cmd += '--sar 1 --range limited --colorprim 1 --transfer 1 --colormatrix 1 --min-luma 64 --max-luma 940' + ' '
# print("Encoder command: ", " ".join(shlex.split(x265_cmd)), "\n")
# process = subprocess.Popen(shlex.split(x265_cmd), stdin=subprocess.PIPE)
# clip.output(process.stdin, y4m=True, progress_update=lambda value, endvalue:
# print(f"\rVapourSynth: {value}/{endvalue} ~ {100 * value // endvalue}% || Encoder: ", end=""))
# process.communicate()
ffv1_args = [
'ffmpeg', '-i', '-', '-vcodec', 'ffv1', '-coder', '1', '-context', '0',
'-g', '1', '-level', '3', '-threads', '8',
'-slices', '24', '-slicecrc', '1', JPBD.name + "_lossless.mkv"
]
print("Encoder command: ", " ".join(ffv1_args), "\n")
process = subprocess.Popen(ffv1_args, stdin=subprocess.PIPE)
clip.output(process.stdin, y4m=True, progress_update=lambda value, endvalue:
print(f"\rVapourSynth: {value}/{endvalue} ~ {100 * value // endvalue}% || Encoder: ", end=""))
process.communicate()
print('\n\n\nAudio extraction')
eac3to_args = ['eac3to', JPBD.src, '2:', JPBD.a_src.format(1), '-log=NUL']
subprocess.run(eac3to_args, text=True, check=True, encoding='utf-8')
qaac_args = ['--no-delay', '--no-optimize', '--threading', '--ignorelength',
'--start', sec_to_time(JPBD.frame_start / (clip.fps_num/clip.fps_den)),
'--end', sec_to_time(JPBD.frame_end / (clip.fps_num/clip.fps_den))]
qaac_args_more = ['qaac', JPBD.a_src.format(1), '-V', '127', *qaac_args, '-o', JPBD.a_enc_cut.format(1)]
subprocess.run(qaac_args_more, text=True, check=True, encoding='utf-8')
ffprobe_args = ['ffprobe', '-loglevel', 'quiet', '-show_entries', 'format_tags=encoder', '-print_format', 'default=nokey=1:noprint_wrappers=1', JPBD.a_enc_cut.format(1)]
encoder_name = subprocess.check_output(ffprobe_args, shell=True, encoding='utf-8')
f = open("tags_aac.xml", 'w')
f.writelines(['<?xml version="1.0"?>', '<Tags>', '<Tag>', '<Targets>', '</Targets>',
'<Simple>', '<Name>ENCODER</Name>', f'<String>{encoder_name}</String>', '</Simple>',
'</Tag>', '</Tags>'])
f.close()
print('\nFinal muxing')
mkv_args = ['mkvmerge', '-o', JPBD.output_final,
'--track-name', '0:HEVC BDRip by Vardë@Raws-Maji', '--language', '0:jpn', JPBD.output,
'--tags', '0:tags_aac.xml', '--track-name', '0:AAC 2.0', '--language', '0:jpn', JPBD.a_enc_cut.format(1)]
subprocess.run(mkv_args, text=True, check=True, encoding='utf-8')
# Clean up
files = [JPBD.a_src, JPBD.a_src_cut.format(1),
JPBD.a_enc_cut.format(1), 'tags_aac.xml']
for file in files:
if os.path.exists(file):
os.remove(file)
if __name__ == '__main__':
FILTERED = do_filter()
do_encode(FILTERED)
| 37.039867
| 210
| 0.622388
|
a6ad55baa5266055ab5f388c430976a0ff7119d2
| 9
|
py
|
Python
|
test.py
|
eaeschl/Web-Python-html-css
|
36fe0c5676477e4a963932d35961565f5e016d6b
|
[
"MIT"
] | null | null | null |
test.py
|
eaeschl/Web-Python-html-css
|
36fe0c5676477e4a963932d35961565f5e016d6b
|
[
"MIT"
] | null | null | null |
test.py
|
eaeschl/Web-Python-html-css
|
36fe0c5676477e4a963932d35961565f5e016d6b
|
[
"MIT"
] | null | null | null |
print()
| 4.5
| 8
| 0.555556
|
61129eaeb84785803d8c3d796b2d60d5d4e77c9f
| 7,800
|
py
|
Python
|
GitSvnServer/server.py
|
slonopotamus/git_svn_server
|
80889d9557c99873ceaa58e5260b8edfa30bcffb
|
[
"BSD-3-Clause"
] | 1
|
2016-01-24T11:59:25.000Z
|
2016-01-24T11:59:25.000Z
|
GitSvnServer/server.py
|
slonopotamus/git_svn_server
|
80889d9557c99873ceaa58e5260b8edfa30bcffb
|
[
"BSD-3-Clause"
] | null | null | null |
GitSvnServer/server.py
|
slonopotamus/git_svn_server
|
80889d9557c99873ceaa58e5260b8edfa30bcffb
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
import re
from SocketServer import *
import signal
import sys
import os
import socket
import traceback
from GitSvnServer.config import load_config
import auth
import client
import command
import editor
import report
import generate as gen
from errors import *
addr_family = socket.AF_INET
all_interfaces = "0.0.0.0"
if socket.has_ipv6:
addr_family = socket.AF_INET6
all_interfaces = "::"
ipv4_re = re.compile(r'\d{1,3}(\.\d{1,3}){3,3}')
def is_ipv4_addr(ip):
return ipv4_re.match(ip) is not None
def get_address(ip, port):
if ip is None:
ip = all_interfaces
elif socket.has_ipv6 and is_ipv4_addr(ip):
ip = '::ffff:%s' % ip
if socket.has_ipv6:
address = (ip, port, 0, 0)
else:
address = (ip, port)
return address
class SvnServer(ThreadingTCPServer):
address_family = addr_family
allow_reuse_address = True
url_re = re.compile(r'^svn://(?P<host>[^/]+)/(?P<path>.*?)\s*$')
def __init__(self, options):
self.options = options
self.log = options.log
self.repo_map, self.users = load_config(options.config)
address = get_address(options.ip, options.port)
ThreadingTCPServer.__init__(self, address, SvnRequestHandler)
def start(self):
if self.options.debug:
self.log = None
if self.options.foreground or self.options.debug:
if self.options.pidfile is not None:
pf = open(self.options.pidfile, 'w')
pf.write('%d\n' % os.getpid())
pf.close()
return self.run()
pid = os.fork()
if pid == 0:
self.run()
os._exit(0)
if self.options.pidfile is not None:
pf = open(self.options.pidfile, 'w')
pf.write('%d\n' % pid)
pf.close()
def stop(self, *args):
print 'stopped serving'
if self.log is not None:
sys.stdout.close()
sys.exit(0)
def run(self):
signal.signal(signal.SIGTERM, self.stop)
if self.log is not None:
sys.stdout = open(self.log, 'a')
sys.stderr = sys.stdout
print 'start serving'
try:
self.serve_forever()
except KeyboardInterrupt:
pass
print 'stopped serving'
if self.log is not None:
sys.stdout.close()
def find_repo(self, url):
url_m = self.url_re.match(url)
if url_m is None:
return None
host = url_m.group('host')
path = url_m.group('path')
for base, repo in self.repo_map.items():
if path.startswith(base + '/'):
return repo, path[len(base) + 1:], 'svn://%s/%s' % (host, base)
elif path == base:
return repo, '', 'svn://%s/%s' % (host, base)
return None, None, None
class SvnRequestHandler(StreamRequestHandler):
def __init__(self, request, client_address, server):
"""
:type server: SvnServer
"""
self.mode = 'connect'
self.client_caps = None
self.repos = None
self.server = server
self.auth = None
self.data = None
self.base_url = None
self.url = None
self.user = None
self.command = None
self.options = server.options
StreamRequestHandler.__init__(self, request, client_address, server)
def debug(self, msg, send=False):
if not self.options.show_messages:
return
d = '<'
if send:
d = '>'
max_dbg_mlen = self.options.max_message_debug_len
if max_dbg_mlen > 0 and len(msg) > max_dbg_mlen:
sys.stderr.write('%d%s%s...\n' % (os.getpid(), d, msg[:max_dbg_mlen]))
else:
sys.stderr.write('%d%s%s\n' % (os.getpid(), d, msg))
def set_mode(self, mode):
if mode not in ['connect', 'auth', 'announce',
'command', 'editor', 'report']:
raise ModeError("Unknown mode '%s'" % mode)
self.mode = mode
def read_msg(self):
t = self.rfile.read(1)
while t in [' ', '\n', '\r']:
t = self.rfile.read(1)
if len(t) == 0:
raise EOF()
if t != '(':
raise ReadError(t)
depth = 1
while depth > 0:
ch = self.rfile.read(1)
if ch == '(':
depth += 1
if ch == ')':
depth -= 1
t += ch
return t
def read_str(self):
ch = self.rfile.read(1)
if len(ch) == 0:
raise EOF
l = ""
while ch not in [':', '']:
l += ch
ch = self.rfile.read(1)
bytes = int(l)
data = ''
while len(data) < bytes:
s = self.rfile.read(bytes - len(data))
if len(s) == 0:
raise EOF
data += s
self.debug(data)
return data
def send_msg(self, msg):
msg = '%s\n' % msg
self.debug(msg, send=True)
self.wfile.write(msg)
def send_server_id(self):
self.send_msg(gen.success(gen.string(self.repos.uuid),
gen.string(self.base_url)))
def handle(self):
sys.stderr.write('%d: -- NEW CONNECTION --\n' % os.getpid())
msg = None
try:
while True:
try:
if self.mode == 'connect':
self.url, self.client_caps, self.repos, self.base_url = client.connect(self)
if self.client_caps is None or self.repos is None:
return
self.mode = 'auth'
elif self.mode == 'auth':
if self.user is None:
self.user = auth.perform_auth(self, self.server.users)
self.mode = 'announce'
else:
self.send_msg(gen.success(gen.list(), gen.string('')))
self.mode = self.data
self.data = None
if self.user is None:
return
elif self.mode == 'announce':
self.send_server_id()
self.mode = 'command'
elif self.mode == 'command':
if self.command is None:
self.command = command.process(self)
else:
self.command = self.command.process()
elif self.mode == 'editor':
editor.process(self)
elif self.mode == 'report':
report.process(self)
else:
raise ModeError("unknown mode '%s'" % self.mode)
except ChangeMode as cm:
self.mode = cm.args[0]
if len(cm.args) > 1:
self.data = cm.args[1]
except ClientError as e:
self.send_msg(gen.error(210001, str(e)))
except EOF:
msg = 'EOF'
except socket.error as e:
errno, msg = e
except Exception:
try:
self.send_msg(gen.error(235000, traceback.format_exc()))
except Exception as e1:
print e1
raise
sys.stderr.write('%d: -- CLOSE CONNECTION (%s) --\n' %
(os.getpid(), msg))
def finish(self):
try:
StreamRequestHandler.finish(self)
except socket.error:
pass
| 26.712329
| 100
| 0.488077
|
dfc5a92b608d2e4a264e67abe17b6ef588d1bb07
| 1,343
|
py
|
Python
|
pyro_models/arm/radon_intercept.py
|
jpchen/pyro-models
|
b9e6ae6271e6cd622fbb4d34d67c450d5a954c9b
|
[
"Apache-2.0"
] | 30
|
2019-02-22T03:03:18.000Z
|
2022-01-22T15:57:37.000Z
|
pyro_models/arm/radon_intercept.py
|
jpchen/pyro-models
|
b9e6ae6271e6cd622fbb4d34d67c450d5a954c9b
|
[
"Apache-2.0"
] | 7
|
2019-02-26T18:28:57.000Z
|
2021-06-11T17:21:06.000Z
|
pyro_models/arm/radon_intercept.py
|
jpchen/pyro-models
|
b9e6ae6271e6cd622fbb4d34d67c450d5a954c9b
|
[
"Apache-2.0"
] | 8
|
2019-02-25T22:06:14.000Z
|
2022-02-18T23:19:49.000Z
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
# model file: example-models/ARM/Ch.12/radon_intercept.stan
import torch
import pyro
import pyro.distributions as dist
def init_vector(name, dims=None):
return pyro.sample(name, dist.Normal(torch.zeros(dims), 0.2 * torch.ones(dims)).to_event(1))
def validate_data_def(data):
assert 'N' in data, 'variable not found in data: key=N'
assert 'J' in data, 'variable not found in data: key=J'
assert 'county' in data, 'variable not found in data: key=county'
assert 'y' in data, 'variable not found in data: key=y'
# initialize data
N = data["N"]
J = data["J"]
county = data["county"]
y = data["y"]
def init_params(data):
params = {}
return params
def model(data, params):
# initialize data
N = data["N"]
J = data["J"]
county = data["county"].long() - 1
y = data["y"]
# initialize transformed parameters
sigma_y = pyro.sample("sigma", dist.HalfCauchy(2.5))
mu_a = pyro.sample("mu_a", dist.Normal(0., 1.))
sigma_a = pyro.sample("sigma_b", dist.HalfCauchy(2.5))
with pyro.plate("J", J):
a = pyro.sample("a", dist.Normal(mu_a, sigma_a))
with pyro.plate("data", N):
y_hat = a[county]
y = pyro.sample('y', dist.Normal(y_hat, sigma_y), obs=y)
| 29.844444
| 96
| 0.638868
|
08b4cc80bf936c0bf8043a431d40f01fdf94e409
| 2,338
|
py
|
Python
|
aliyun-python-sdk-rds/aliyunsdkrds/request/v20140815/ModifyReadonlyInstanceDelayReplicationTimeRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 1,001
|
2015-07-24T01:32:41.000Z
|
2022-03-25T01:28:18.000Z
|
aliyun-python-sdk-rds/aliyunsdkrds/request/v20140815/ModifyReadonlyInstanceDelayReplicationTimeRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 363
|
2015-10-20T03:15:00.000Z
|
2022-03-08T12:26:19.000Z
|
aliyun-python-sdk-rds/aliyunsdkrds/request/v20140815/ModifyReadonlyInstanceDelayReplicationTimeRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 682
|
2015-09-22T07:19:02.000Z
|
2022-03-22T09:51:46.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkrds.endpoint import endpoint_data
class ModifyReadonlyInstanceDelayReplicationTimeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rds', '2014-08-15', 'ModifyReadonlyInstanceDelayReplicationTime','rds')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_ReadSQLReplicationTime(self):
return self.get_query_params().get('ReadSQLReplicationTime')
def set_ReadSQLReplicationTime(self,ReadSQLReplicationTime):
self.add_query_param('ReadSQLReplicationTime',ReadSQLReplicationTime)
def get_DBInstanceId(self):
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self,DBInstanceId):
self.add_query_param('DBInstanceId',DBInstanceId)
| 37.709677
| 101
| 0.785287
|
4fb2dd2106244e3a5d6eb55bfba8341189f75677
| 4,457
|
py
|
Python
|
data/utils.py
|
Khoale1096/stupidNMT
|
894536c16dc7ff958aa5571828a89ecabfcb72d7
|
[
"BSD-3-Clause"
] | 31
|
2020-05-05T21:56:45.000Z
|
2022-02-24T19:08:19.000Z
|
data/utils.py
|
Khoale1096/stupidNMT
|
894536c16dc7ff958aa5571828a89ecabfcb72d7
|
[
"BSD-3-Clause"
] | 2
|
2019-10-02T15:23:55.000Z
|
2019-10-16T02:38:25.000Z
|
data/utils.py
|
Khoale1096/stupidNMT
|
894536c16dc7ff958aa5571828a89ecabfcb72d7
|
[
"BSD-3-Clause"
] | 5
|
2020-05-06T06:04:40.000Z
|
2020-12-29T14:09:31.000Z
|
'''
Utilities useful for datasets
'''
import pdb
import os
from functools import partial
from urllib.request import urlretrieve
import requests
from tqdm import tqdm
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.sampler import BatchSampler, RandomSampler, SequentialSampler
from data.sampler import SequenceLengthSampler
# See https://github.com/tqdm/tqdm#hooks-and-callbacks
class DownloadProgressBar(tqdm):
"""Provides `update_to(n)` which uses `tqdm.update(delta_n)`."""
def __init__(self, filename):
''' '''
super(DownloadProgressBar, self).__init__(
unit='B', unit_scale=True, miniters=1, desc=filename)
def update_to(self, blocks=1, block_size=1, total_size=None):
"""
blocks : int, optional
Number of blocks transferred so far [default: 1].
block_size : int, optional
Size of each block (in tqdm units) [default: 1].
total_size : int, optional
Total size (in tqdm units). If [default: None] remains unchanged.
"""
if total_size:
self.total = total_size
self.update(blocks * block_size - self.n) # will also set self.n = blocks * block_size
def maybe_download(filepath, url):
''' Download the requested URL to the requested path if it does not already exist '''
directory = os.path.dirname(filepath)
if not os.path.exists(directory):
os.makedirs(directory)
if os.path.exists(filepath):
return filepath
if 'drive.google.com' in url:
return download_from_google_drive(filepath, url)
else:
return download_url(filepath, url)
def download_url(filepath, url):
''' Downloads the given url to the specified file path. '''
filename = os.path.basename(filepath)
with DownloadProgressBar(filename) as progress:
urlretrieve(url, filepath, reporthook=progress.update_to)
return filepath
def download_from_google_drive(filepath, url):
'''
Downloads a file from Google Drive.
Apparently Google Drive may issue a warning about scanning for viruses and require confirmation
to continue the download.
'''
confirmation_token = None
session = requests.Session()
response = session.get(url, stream=True)
for key, value in response.cookies.items():
if key.startswith("download_warning"):
confirmation_token = value
if confirmation_token:
url = url + "&confirm=" + confirmation_token
response = session.get(url, stream=True)
total_size = int(response.headers.get('content-length', 0))
block_size = 16 * 1024
filename = os.path.basename(filepath)
with open(filepath, "wb") as file:
with DownloadProgressBar(filename) as progress:
blocks = iter(
file.write(block)
for block in response.iter_content(block_size)
if block
)
for i, block in enumerate(blocks):
progress.update_to(i, block_size, total_size)
return filepath
def get_dataloader(config, worker_init_fn=None, pin_memory=True, num_devices=1, shuffle=False):
''' Utility function that gets a data loader '''
#pdb.set_trace()
dataset = config.dataset(config, split=config.split).load()
if config.batch_method == 'token':
# Calculate batch sizes for each device. Potentially reduce the batch size on device 0 as
# the optimization step (all the gradients from all devices) happens on device 0.
batch_sizes = [config.batch_size - config.batch_size_buffer]
batch_sizes += [config.batch_size] * (num_devices - 1)
batch_sampler = SequenceLengthSampler(
batch_sizes,
[(len(d['input']), len(d['target'])) for d in dataset.data],
shuffle=shuffle,
granularity=config.token_bucket_granularity
)
elif config.batch_method == 'example':
sampler_fn = RandomSampler if shuffle else SequentialSampler
batch_sampler = BatchSampler(
sampler_fn(dataset),
config.batch_size,
False
)
else:
raise ValueError('Unknown batch method!')
return DataLoader(
dataset,
batch_sampler=batch_sampler,
collate_fn=partial(dataset.collate, sort=True),
num_workers=num_devices,
pin_memory=pin_memory,
worker_init_fn=worker_init_fn
)
| 33.261194
| 99
| 0.661656
|
e1efef18ae62e8d751740269df59186ecf836dd8
| 13,548
|
py
|
Python
|
ncachefactory/cachemanager.py
|
luckylyk/ncachefactory
|
3b26866e552bf6462024ce3febef31f4eec2ab69
|
[
"BSD-3-Clause-Clear"
] | 1
|
2019-09-25T17:16:06.000Z
|
2019-09-25T17:16:06.000Z
|
ncachefactory/cachemanager.py
|
luckylyk/ncachemanager
|
3b26866e552bf6462024ce3febef31f4eec2ab69
|
[
"BSD-3-Clause-Clear"
] | 1
|
2020-03-15T10:34:34.000Z
|
2020-05-22T19:20:26.000Z
|
ncachefactory/cachemanager.py
|
luckylyk/ncachefactory
|
3b26866e552bf6462024ce3febef31f4eec2ab69
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
"""
This module is on top of ncache and versioning
It combine both to work in a defined workspace
This is the main api used by the ui and can be useb from external script as
well.
"""
import os
import shutil
from datetime import datetime
from functools import partial
import subprocess
from maya import cmds
import maya.api.OpenMaya as om2
from ncachefactory.versioning import (
create_cacheversion, ensure_workspace_folder_exists, find_file_match,
clear_cacheversion_content, cacheversion_contains_node,
move_playblast_to_cacheversion, extract_xml_attributes)
from ncachefactory.mesh import (
create_mesh_for_geo_cache, attach_geo_cache,
is_deformed_mesh_too_stretched)
from ncachefactory.ncloth import (
find_input_mesh_dagpath, clean_inputmesh_connection,
find_output_mesh_dagpath)
from ncachefactory.ncache import (
import_ncache, record_ncache, DYNAMIC_NODES, clear_cachenodes,
list_connected_cachefiles, list_connected_cacheblends, append_ncache)
from ncachefactory.playblast import (
start_playblast_record, stop_playblast_record)
from ncachefactory.attributes import (
save_pervertex_maps, list_node_attributes_values,
clean_namespaces_in_attributes_dict, ORIGINAL_INPUTSHAPE_ATTRIBUTE,
filter_invisible_nodes_for_manager)
from ncachefactory.optionvars import MEDIAPLAYER_PATH_OPTIONVAR
ALTERNATE_INPUTSHAPE_GROUP = "alternative_inputshapes"
ALTERNATE_RESTSHAPE_GROUP = "alternative_restshapes"
INPUTSHAPE_SUFFIX = "_alternate_inputshape"
RESTSHAPE_SUFFIX = "_alternate_restshapes"
CACHENODENAME_SUFFIX = "_CN000"
def create_and_record_cacheversion(
workspace, start_frame, end_frame, comment=None, name=None,
nodes=None, behavior=0, evaluate_every_frame=1.0, save_every_evaluation=1,
playblast=False, playblast_viewport_options=None):
cloth_nodes = cmds.ls(nodes, type="nCloth")
nodes = nodes or cmds.ls(type=DYNAMIC_NODES)
nodes = filter_invisible_nodes_for_manager(nodes)
workspace = ensure_workspace_folder_exists(workspace)
cacheversion = create_cacheversion(
workspace=workspace,
name=name,
comment=comment,
nodes=nodes,
start_frame=start_frame,
end_frame=end_frame,
timespent=None)
if playblast is True:
start_playblast_record(
directory=cacheversion.directory, **playblast_viewport_options)
save_pervertex_maps(nodes=cloth_nodes, directory=cacheversion.directory)
start_time = datetime.now()
record_ncache(
nodes=nodes,
start_frame=start_frame,
end_frame=end_frame,
output=cacheversion.directory,
behavior=behavior,
evaluate_every_frame=evaluate_every_frame,
save_every_evaluation=save_every_evaluation)
end_time = datetime.now()
timespent = (end_time - start_time).total_seconds()
time = cmds.currentTime(query=True)
cacheversion.set_range(nodes, start_frame=start_frame, end_frame=time)
cacheversion.set_timespent(nodes=nodes, seconds=timespent)
if playblast is True:
temp_path = stop_playblast_record(cacheversion.directory)
move_playblast_to_cacheversion(temp_path, cacheversion)
return cacheversion
def record_in_existing_cacheversion(
cacheversion, start_frame, end_frame, nodes=None, behavior=0,
evaluate_every_frame=1.0, save_every_evaluation=1, playblast=False,
playblast_viewport_options=None):
if playblast is True:
start_playblast_record(
directory=cacheversion.directory,
**playblast_viewport_options)
cloth_nodes = cmds.ls(nodes, type="nCloth")
nodes = nodes or cmds.ls(type=DYNAMIC_NODES)
nodes = filter_invisible_nodes_for_manager(nodes)
save_pervertex_maps(nodes=cloth_nodes, directory=cacheversion.directory)
start_time = datetime.now()
record_ncache(
nodes=nodes,
start_frame=start_frame,
end_frame=end_frame,
output=cacheversion.directory,
behavior=behavior,
evaluate_every_frame=evaluate_every_frame,
save_every_evaluation=save_every_evaluation)
end_time = datetime.now()
timespent = (end_time - start_time).total_seconds()
time = cmds.currentTime(query=True)
cacheversion.set_range(nodes, start_frame=start_frame, end_frame=time)
cacheversion.set_timespent(nodes=nodes, seconds=timespent)
cacheversion.update_modification_time()
if playblast is True:
temp_path = stop_playblast_record(cacheversion.directory)
move_playblast_to_cacheversion(temp_path, cacheversion)
def append_to_cacheversion(
cacheversion, nodes=None, evaluate_every_frame=1.0,
save_every_evaluation=1, playblast=False, playblast_viewport_options=None):
if playblast is True:
start_playblast_record(
directory=cacheversion.directory,
**playblast_viewport_options)
nodes = nodes or cmds.ls(type=DYNAMIC_NODES)
nodes = filter_invisible_nodes_for_manager(nodes)
start_time = datetime.now()
append_ncache(
nodes=nodes,
evaluate_every_frame=evaluate_every_frame,
save_every_evaluation=save_every_evaluation)
end_time = datetime.now()
# Add up the second spent for the append cache to the cache time spent
# already recorded.
timespent = (end_time - start_time).total_seconds()
for node in cacheversion.infos.get('nodes'):
if node not in nodes:
continue
seconds = cacheversion.infos.get('nodes')[node]["timespent"] + timespent
cacheversion.set_timespent(nodes=[node], seconds=seconds)
cacheversion.update_modification_time()
# Update the cached range in the cache info if the append cache
# finished further the original cache
time = cmds.currentTime(query=True)
end_frame = cacheversion.infos.get('nodes')[node]['range'][1]
if time > end_frame:
cacheversion.set_range(nodes=nodes, end_frame=time)
if playblast is True:
temp_path = stop_playblast_record(cacheversion.directory)
move_playblast_to_cacheversion(temp_path, cacheversion)
def plug_cacheversion(cacheversion, groupname, suffix, inattr, nodes=None):
""" This function will plug a ncache to a given attribute.
Basically, it create a static mesh based on the dynamic node input.
Import the ncache as geo cache file and drive the created mesh with.
And finally connect it to the input attribute given.
"""
if not cmds.objExists(groupname):
cmds.group(name=groupname, world=True, empty=True)
group_content = cmds.listRelatives(groupname)
group_content = cmds.ls(
group_content,
shapes=True,
dag=True,
noIntermediate=True)
nodes = nodes or cmds.ls(type='nCloth')
new_input_meshes = []
for node in nodes:
if not cacheversion_contains_node(node, cacheversion):
continue
ensure_original_input_is_stored(node)
input_mesh = get_orignial_input_mesh(node)
mesh = create_mesh_for_geo_cache(input_mesh, suffix)
new_input_meshes.append(cmds.listRelatives(mesh, parent=True)[0])
xml_file = find_file_match(node, cacheversion, extension='xml')
attach_geo_cache(mesh, xml_file)
clean_inputmesh_connection(node, inattr)
cmds.connectAttr(mesh + '.worldMesh[0]', node + '.' + inattr)
cmds.parent(new_input_meshes, groupname)
# Parse the original group content and clean all the shape which are
# not used anymore.
content_to_clean = [
cmds.listRelatives(node, parent=True)[0] for node in group_content
if not cmds.ls(cmds.listConnections(node, type='nCloth'))]
if content_to_clean:
cmds.delete(content_to_clean)
def plug_cacheversion_to_inputmesh(cacheversion, nodes=None):
plug_cacheversion(
cacheversion=cacheversion,
groupname=ALTERNATE_INPUTSHAPE_GROUP,
suffix=INPUTSHAPE_SUFFIX,
inattr='inputMesh',
nodes=nodes)
def plug_cacheversion_to_restshape(cacheversion, nodes=None):
plug_cacheversion(
cacheversion=cacheversion,
groupname=ALTERNATE_RESTSHAPE_GROUP,
suffix=RESTSHAPE_SUFFIX,
inattr='restShapeMesh',
nodes=nodes)
def connect_cacheversion(cacheversion, nodes=None, behavior=0):
nodes = nodes or cmds.ls(type=DYNAMIC_NODES)
nodes = filter_invisible_nodes_for_manager(nodes)
for node in nodes:
if not cacheversion_contains_node(node, cacheversion):
continue
xml_file = find_file_match(node, cacheversion, extension='xml')
if not xml_file:
cmds.warning("no cache to connect for {}".format(xml_file))
continue
cachefile = import_ncache(node, xml_file, behavior=behavior)
cmds.rename(cachefile, cacheversion.name +CACHENODENAME_SUFFIX)
def delete_cacheversion(cacheversion):
cachenames = [f[:-4] for f in cacheversion.get_files('mcc')]
clear_cachenodes(cachenames=cachenames, workspace=cacheversion.workspace)
clear_cacheversion_content(cacheversion)
def filter_connected_cacheversions(nodes=None, cacheversions=None):
assert cacheversions is not None
nodes = nodes or []
blends = list_connected_cacheblends(nodes) or []
cachenodes = list_connected_cachefiles(nodes) or []
cachenodes += list_connected_cachefiles(blends) or []
directories = list({cmds.getAttr(n + '.cachePath') for n in cachenodes})
directories = [os.path.normpath(directory) for directory in directories]
return [
cacheversion for cacheversion in cacheversions
if os.path.normpath(cacheversion.directory) in directories]
def compare_node_and_version(node, cacheversion):
filename = find_file_match(node, cacheversion, extension='xml')
xml_attributes = extract_xml_attributes(filename)
xml_attributes = clean_namespaces_in_attributes_dict(xml_attributes)
node_attributes = list_node_attributes_values(node)
node_attributes = clean_namespaces_in_attributes_dict(node_attributes)
differences = {}
for key, value in xml_attributes.items():
current_value = node_attributes.get(key)
# in case of value are store in format like: "-1e5", that's stored in
# string instead of float. So we reconverted it to float
if isinstance(value, str):
value = float(value)
# value in xml are slightly less precise than the current value
# in maya, it doesn't compare the exact result but the difference
if current_value is None or abs(current_value - value) < 1e-6:
continue
differences[key] = (current_value, value)
return differences
def recover_original_inputmesh(nodes):
""" this function replug the original input in a cloth node if this one as
an alternate input connected. As an other simulation mesh """
nodes_to_clean = []
for node in nodes:
store_plug = node + '.' + ORIGINAL_INPUTSHAPE_ATTRIBUTE
stored_input_plugs = cmds.listConnections(
store_plug,
plugs=True,
connections=True)
if not stored_input_plugs:
cmds.warning('no stored input for ' + node)
continue
inputmeshattr = node + '.inputMesh'
current_inputs = cmds.listConnections(
inputmeshattr,
plugs=True,
connections=True)
if current_inputs:
cmds.disconnectAttr(current_inputs[1], inputmeshattr)
cmds.connectAttr(stored_input_plugs[1], inputmeshattr)
disconnected_node = current_inputs[1].split('.')[0]
if not cmds.listConnections(disconnected_node, source=True):
nodes_to_clean.append(disconnected_node)
if nodes_to_clean:
cmds.delete(nodes_to_clean)
def apply_settings(cacheversion, nodes):
for node in nodes:
filename = find_file_match(node, cacheversion, extension='xml')
xml_attributes = extract_xml_attributes(filename)
xml_attributes = clean_namespaces_in_attributes_dict(xml_attributes)
for key, value in xml_attributes.items():
attributes = cmds.ls([key, "*" + key, "*:" + key, "*:*:" + key])
for attribute in attributes:
try:
cmds.setAttr(attribute, value)
except RuntimeError:
msg = (
attribute + " is locked, connected, invalid or "
"doesn't in current scene. This attribute is skipped")
cmds.warning(msg)
def ensure_original_input_is_stored(dynamicnode):
store_plug = dynamicnode + '.' + ORIGINAL_INPUTSHAPE_ATTRIBUTE
if cmds.listConnections(store_plug):
# original input already saved
return
input_plug = dynamicnode + '.inputMesh'
input_mesh_connections = cmds.listConnections(input_plug, plugs=True)
if not input_mesh_connections:
raise ValueError("No input attract mesh found for " + dynamicnode)
cmds.connectAttr(input_mesh_connections[0], store_plug)
def get_orignial_input_mesh(dynamicnode):
store_plug = dynamicnode + '.' + ORIGINAL_INPUTSHAPE_ATTRIBUTE
connections = cmds.listConnections(store_plug, shapes=True)
if connections:
return connections[0]
return
if __name__ == "__main__":
create_and_record_cacheversion(
workspace="C:/test/chrfx",
nodes=None,
start_frame=0,
end_frame=100,
behavior=2,
name="Cache",
comment="salut")
| 39.043228
| 83
| 0.714275
|
1597ca52d7ead48a76678cbcd9f3401996623874
| 4,694
|
py
|
Python
|
heartbeat/heartbeat.py
|
WATORACE/dds-tunnel
|
85726c9742eed41befb7cde96bd3d58389b7a82f
|
[
"MIT"
] | null | null | null |
heartbeat/heartbeat.py
|
WATORACE/dds-tunnel
|
85726c9742eed41befb7cde96bd3d58389b7a82f
|
[
"MIT"
] | null | null | null |
heartbeat/heartbeat.py
|
WATORACE/dds-tunnel
|
85726c9742eed41befb7cde96bd3d58389b7a82f
|
[
"MIT"
] | null | null | null |
import argparse
import asyncio
import os
import rticonnextdds_connector as rti
import sys
import threading
from enum import IntEnum
from time import sleep, time
try:
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
except NameError:
SCRIPT_DIR = os.getcwd()
DOMAIN_ID_ENV_VAR = 'HEARTBEAT_DOMAIN_ID'
# rti connector is not thread-safe. Using an Rlock to create mutual-exclusively between threads
# https://community.rti.com/static/documentation/connector/1.0.0/api/python/threading.html
connector_lock = threading.RLock()
class MessageType(IntEnum):
"""
The type of the heartbeat. This must be synchronized with Heartbeat.xml.
"""
HEARTBEAT = 0
ACK = 1
async def publishHeartbeat(writer, writelog, period=1):
"""
In a loop: publish heartbeat, then wait `period` seconds
"""
current_seq = 0
while True:
print(f"Sending heartbeat seq {current_seq}")
with connector_lock:
writer.instance.set_number("seq", current_seq)
writer.instance.set_number("type", MessageType.HEARTBEAT)
writelog[current_seq] = time()
writer.write()
current_seq += 1
await asyncio.sleep(period)
async def subscribeToAck(reader, writelog):
while True:
current_time = time()
with connector_lock:
reader.take()
for sample in reader.samples.valid_data_iter:
msg_type = sample.get_number('type')
seq = int(sample.get_number("seq"))
if msg_type != MessageType.ACK:
continue
outgoing_time = writelog.get(seq)
del writelog[seq]
if outgoing_time is None:
print(f"ACK: seq {seq}")
else:
print(f"ACK: seq {seq}, roundtrip time: {(current_time - outgoing_time) * 1000:.2f} ms")
await asyncio.sleep(0.001)
async def run(*coroutines):
await asyncio.gather(*coroutines)
def initiator(reader, writer):
# TODO: writelog does not remove old entries. This will continue to eat up memory.
writelog = {}
writer_coroutine = publishHeartbeat(writer, writelog)
reader_coroutine = subscribeToAck(reader, writelog)
runnable = run(writer_coroutine, reader_coroutine)
asyncio.run(runnable)
def responder(reader, writer):
while True:
try:
reader.wait(500) # milliseconds
except rti.TimeoutError:
pass
with connector_lock:
reader.take()
for sample in reader.samples.valid_data_iter:
msg_type = sample.get_number('type')
seq = int(sample.get_number("seq"))
if msg_type != MessageType.HEARTBEAT:
continue
print(f"HEARTBEAT: seq {seq}")
writer.instance.set_number("seq", seq)
writer.instance.set_number("type", MessageType.ACK)
writer.write()
def main():
parser = argparse.ArgumentParser(description='process start-tunnel arguments')
parser.add_argument("action", choices=["initiator", "responder"])
parser.add_argument("--domain_id", "-d", type=int,
help="The domain ID where the heartbeat will be sent/read from (Default 0)", default=0)
args = parser.parse_args()
print(args)
if os.name == 'nt':
# Setting os.environ does not propagate env into the connector on Windows. Prompt user to manually set the env.
env_domain_id = os.getenv(DOMAIN_ID_ENV_VAR)
if env_domain_id != str(args.domain_id):
sys.exit("Automatically setting domain_id in Windows is not supported. Please run `set {}={}` in cmd.exe (current value is {})".format(
DOMAIN_ID_ENV_VAR, args.domain_id, env_domain_id))
else:
os.environ[DOMAIN_ID_ENV_VAR] = str(args.domain_id)
with rti.open_connector(
config_name="HeartbeatParticipantLibrary::HeartbeatParticipant",
url=os.path.join(SCRIPT_DIR, "Heartbeat.xml")) as connector:
writer = connector.get_output("HeartbeatPublisher::HeartbeatWriter")
reader = connector.get_input("HeartbeatSubscriber::HeartbeatReader")
try:
print(f"Starting {args.action} on domain {os.getenv(DOMAIN_ID_ENV_VAR)}")
if args.action == 'initiator':
initiator(reader, writer)
elif args.action == 'responder':
responder(reader, writer)
else:
print(f"Unknown command {args.action}")
except KeyboardInterrupt:
print("Stopping...")
if __name__ == '__main__':
main()
| 34.514706
| 147
| 0.629527
|
1b38735078606e8ab55563bd6ab367910eed1669
| 3,818
|
py
|
Python
|
Code/splines_projection.py
|
AndreasMadsen/grace
|
bf472d30a2fac76145d3f68e819c92da4a1970ba
|
[
"MIT"
] | 1
|
2016-05-17T22:52:19.000Z
|
2016-05-17T22:52:19.000Z
|
Code/splines_projection.py
|
AndreasMadsen/grace
|
bf472d30a2fac76145d3f68e819c92da4a1970ba
|
[
"MIT"
] | null | null | null |
Code/splines_projection.py
|
AndreasMadsen/grace
|
bf472d30a2fac76145d3f68e819c92da4a1970ba
|
[
"MIT"
] | null | null | null |
import grace
import grace.times
import grace.ols
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
worldImage = mpimg.imread('equirectangular.png')
days = grace.ols.time_vector()
days_all = np.linspace(np.min(days), np.max(days), np.max(days) - np.min(days))
X_all = grace.ols.design_matrix(days_all, frequencies = 3, splines = True)
H = grace.ols.hat_matrix(X_all, frequencies = 3, splines = True)
description = grace.ols.theta_description(frequencies = 3, splines = True)
initial = (24, 134)
# The complexity in this code, comes from the clickability on the world map
def onclick(event):
"""
This is the function executed when the window is clicked on. The main purpose
here is to filter invalid clicks and then call the redraw with the lat and
lon index.
"""
if (event.xdata == None or event.ydata == None or event.button != 1 or
event.xdata >= 360 or event.ydata >= 180 ): return
redraw(int(event.ydata), int(event.xdata))
def histval(X):
"""
Computes histogram values for a data series. plt.hist was not used
as this is not as updateable as just using plt.bars
"""
(frequencies, pos) = np.histogram(X, bins=np.arange(-3, 1, 0.05))
return (frequencies, (pos[:-1] + pos[1:]) / 2)
# Create figure and attach mouse click handler
fig = plt.figure(figsize=(12, 9.3))
fig.canvas.mpl_connect('button_press_event', onclick)
# None of the code below plots anything (histogram is a funny exception)
# the purose is to initalize the window and then let redraw(lat, lon) do
# the actual data plotting.
# World map
plt.subplot(3,1,1)
plt.imshow(worldImage)
(point,) = plt.plot([], [], 'r*')
plt.ylim(179, 0)
plt.xlim(0, 359)
plt.xticks(np.arange(0, 360, 40), np.arange(-179.5, +179.5, 40))
plt.yticks(np.arange(0, 180, 20), np.arange(89.5, -89.5, -20))
plt.ylabel('latitude')
plt.xlabel('longitude')
plt.gca().xaxis.set_label_position('top')
# Scatter plot
plt.subplot(3,1,2)
(observe,) = plt.plot([], [], 'ro', label='Observations')
(estimat,) = plt.plot([], [], 'k-', label='Estimations')
plt.xlim(np.min(days), np.max(days))
date_ticks = np.linspace(np.min(days), np.max(days), 6).astype('int')
plt.xticks(date_ticks, grace.times.days_to_str(date_ticks))
plt.ylabel('EWH [m]')
# Histogram
plt.subplot(3,2,5)
(frequencies, pos) = histval(grace.grids[initial[0], initial[1], :])
hist_rects = plt.bar(pos, frequencies, pos[1] - pos[0], color='tomato')
plt.xlim(-3, 1)
plt.ylabel('frequency')
plt.xlabel('EWH [m]')
# Theta bars
plt.subplot(3,2,6)
theta_rects = plt.bar( np.arange(0, len(description)) + 0.5, np.zeros([len(description)]) )
plt.xlim(0, len(description) + 1)
plt.xticks(np.arange(1, len(description) + 1), description, fontsize = 9)
plt.setp(plt.xticks()[1], rotation=-90)
def redraw(latIndex, lonIndex):
"""
Sets the (x, y) data for the plots updates some
axies and then redraw the window canvas.
"""
print (latIndex, lonIndex)
# Get EWH values for this position
Y = grace.grids[latIndex, lonIndex, :].ravel()
Theta = grace.ols.theta_vector(Y)
# Update star on world map
plt.subplot(3,1,1)
point.set_ydata([latIndex])
point.set_xdata([lonIndex])
# Update scatter plot
plt.subplot(3,1,2)
observe.set_ydata(Y)
observe.set_xdata(days)
estimat.set_ydata((H * np.asmatrix(Y).T).A.ravel())
estimat.set_xdata(days_all)
plt.ylim(np.min(Y)*1.2, np.max(Y)*1.2)
# Update histogtam
plt.subplot(3,2,5)
(frequencies, pos) = histval(Y)
for rect, f in zip(hist_rects, frequencies):
rect.set_height(f)
plt.ylim(0, np.max(frequencies) * 1.1)
# Update histogtam
plt.subplot(3,2,6)
for rect, v in zip(theta_rects, Theta.A.ravel()):
rect.set_height(v)
plt.ylim(np.min(Theta)*1.2, np.max(Theta)*1.2)
# Redraw window
fig.canvas.draw()
# Do inital redraw also
redraw(initial[0], initial[1])
# Show window
plt.show()
| 28.924242
| 91
| 0.701676
|
81f7252a16c7b660b0788cc1e584d8be47ad6c81
| 1,212
|
py
|
Python
|
clients/kratos/python/test/test_admin_create_identity_body.py
|
kolotaev/sdk
|
0dda1becd70be8d7b9d678321ebe780c1ba00485
|
[
"Apache-2.0"
] | null | null | null |
clients/kratos/python/test/test_admin_create_identity_body.py
|
kolotaev/sdk
|
0dda1becd70be8d7b9d678321ebe780c1ba00485
|
[
"Apache-2.0"
] | null | null | null |
clients/kratos/python/test/test_admin_create_identity_body.py
|
kolotaev/sdk
|
0dda1becd70be8d7b9d678321ebe780c1ba00485
|
[
"Apache-2.0"
] | null | null | null |
"""
Ory Kratos API
Documentation for all public and administrative Ory Kratos APIs. Public and administrative APIs are exposed on different ports. Public APIs can face the public internet without any protection while administrative APIs should never be exposed without prior authorization. To protect the administative API port you should use something like Nginx, Ory Oathkeeper, or any other technology capable of authorizing incoming requests. # noqa: E501
The version of the OpenAPI document: v0.7.0-alpha.1
Contact: hi@ory.sh
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import ory_kratos_client
from ory_kratos_client.model.admin_create_identity_body import AdminCreateIdentityBody
class TestAdminCreateIdentityBody(unittest.TestCase):
"""AdminCreateIdentityBody unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAdminCreateIdentityBody(self):
"""Test AdminCreateIdentityBody"""
# FIXME: construct object with mandatory attributes with example values
# model = AdminCreateIdentityBody() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 32.756757
| 446
| 0.749175
|
e3229314b62acbba0f69bb94935aa371efd3144c
| 3,145
|
py
|
Python
|
basicts/options/GMAN/GMAN_PEMS-BAY.py
|
zezhishao/GuanCang_BasicTS
|
bbf82b9d08e82db78d4e9e9b11f43a676b54ad7c
|
[
"Apache-2.0"
] | null | null | null |
basicts/options/GMAN/GMAN_PEMS-BAY.py
|
zezhishao/GuanCang_BasicTS
|
bbf82b9d08e82db78d4e9e9b11f43a676b54ad7c
|
[
"Apache-2.0"
] | null | null | null |
basicts/options/GMAN/GMAN_PEMS-BAY.py
|
zezhishao/GuanCang_BasicTS
|
bbf82b9d08e82db78d4e9e9b11f43a676b54ad7c
|
[
"Apache-2.0"
] | null | null | null |
import os
from easydict import EasyDict
# architecture
from basicts.archs.GMAN_arch import GMAN
# runner
from basicts.runners.GMAN_runner import GMANRunner
from basicts.data.base_dataset import BaseDataset
from basicts.metrics.mae import masked_mae
from basicts.metrics.mape import masked_mape
from basicts.metrics.rmse import masked_rmse
from basicts.losses.losses import masked_l1_loss
from basicts.utils.serialization import load_node2vec_emb
CFG = EasyDict()
# ================= general ================= #
CFG.DESCRIPTION = 'GMAN model configuration'
CFG.RUNNER = GMANRunner
CFG.DATASET_CLS = BaseDataset
CFG.DATASET_NAME = "PEMS-BAY"
CFG.DATASET_TYPE = 'Traffic speed'
CFG.GPU_NUM = 1
CFG.METRICS = {
"MAE": masked_mae,
"RMSE": masked_rmse,
"MAPE": masked_mape
}
# ================= environment ================= #
CFG.ENV = EasyDict()
CFG.ENV.SEED = 1
CFG.ENV.CUDNN = EasyDict()
CFG.ENV.CUDNN.ENABLED = True
# ================= model ================= #
CFG.MODEL = EasyDict()
CFG.MODEL.NAME = 'GMAN'
CFG.MODEL.ARCH = GMAN
spatial_embed = load_node2vec_emb("datasets/" + CFG.DATASET_NAME + "/node2vec_emb.txt")
CFG.MODEL.PARAM = {
"SE": spatial_embed,
"L" : 1,
"K" : 8,
"d" : 8,
"num_his" : 12,
"bn_decay": 0.1
}
CFG.MODEL.FROWARD_FEATURES = [0, 1, 2]
CFG.MODEL.TARGET_FEATURES = [0]
# ================= optim ================= #
CFG.TRAIN = EasyDict()
CFG.TRAIN.LOSS = masked_l1_loss
CFG.TRAIN.OPTIM = EasyDict()
CFG.TRAIN.OPTIM.TYPE = "Adam"
CFG.TRAIN.OPTIM.PARAM= {
"lr":0.002,
"weight_decay":0.0001,
}
CFG.TRAIN.LR_SCHEDULER = EasyDict()
CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR"
CFG.TRAIN.LR_SCHEDULER.PARAM= {
"milestones":[1, 50, 80],
"gamma":0.5
}
# ================= train ================= #
CFG.TRAIN.CLIP = 5
CFG.TRAIN.NUM_EPOCHS = 100
CFG.TRAIN.CKPT_SAVE_DIR = os.path.join(
'checkpoints',
'_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)])
)
# train data
CFG.TRAIN.DATA = EasyDict()
CFG.TRAIN.NULL_VAL = 0.0
## read data
CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME
## dataloader args, optional
CFG.TRAIN.DATA.BATCH_SIZE = 16
CFG.TRAIN.DATA.PREFETCH = False
CFG.TRAIN.DATA.SHUFFLE = True
CFG.TRAIN.DATA.NUM_WORKERS = 2
CFG.TRAIN.DATA.PIN_MEMORY = False
# ================= validate ================= #
CFG.VAL = EasyDict()
CFG.VAL.INTERVAL = 1
# validating data
CFG.VAL.DATA = EasyDict()
## read data
CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME
## dataloader args, optional
CFG.VAL.DATA.BATCH_SIZE = 32
CFG.VAL.DATA.PREFETCH = False
CFG.VAL.DATA.SHUFFLE = False
CFG.VAL.DATA.NUM_WORKERS = 2
CFG.VAL.DATA.PIN_MEMORY = False
# ================= test ================= #
CFG.TEST = EasyDict()
CFG.TEST.INTERVAL = 1
# validating data
CFG.TEST.DATA = EasyDict()
## read data
CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME
## dataloader args, optional
CFG.TEST.DATA.BATCH_SIZE = 32
CFG.TEST.DATA.PREFETCH = False
CFG.TEST.DATA.SHUFFLE = False
CFG.TEST.DATA.NUM_WORKERS = 2
CFG.TEST.DATA.PIN_MEMORY = False
| 27.831858
| 89
| 0.635612
|
adfa0c6184b1ecdfee4857e80706325c950e40f4
| 1,626
|
py
|
Python
|
gridpath/objective/system/reliability/local_capacity/aggregate_local_capacity_violation_penalties.py
|
souissim/gridpath
|
4eeca2be24b485edc56026e38cfda83f4a6b27ea
|
[
"Apache-2.0"
] | null | null | null |
gridpath/objective/system/reliability/local_capacity/aggregate_local_capacity_violation_penalties.py
|
souissim/gridpath
|
4eeca2be24b485edc56026e38cfda83f4a6b27ea
|
[
"Apache-2.0"
] | null | null | null |
gridpath/objective/system/reliability/local_capacity/aggregate_local_capacity_violation_penalties.py
|
souissim/gridpath
|
4eeca2be24b485edc56026e38cfda83f4a6b27ea
|
[
"Apache-2.0"
] | 1
|
2021-12-21T20:44:21.000Z
|
2021-12-21T20:44:21.000Z
|
# Copyright 2016-2020 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyomo.environ import Expression
from gridpath.auxiliary.dynamic_components import cost_components
def add_model_components(m, d, scenario_directory, subproblem, stage):
"""
:param m:
:param d:
:return:
"""
def total_penalty_costs_rule(mod):
return sum(
mod.Local_Capacity_Shortage_MW_Expression[z, p]
* mod.local_capacity_violation_penalty_per_mw[z]
* mod.number_years_represented[p]
* mod.discount_factor[p]
for (z, p) in mod.LOCAL_CAPACITY_ZONE_PERIODS_WITH_REQUIREMENT
)
m.Total_Local_Capacity_Shortage_Penalty_Costs = Expression(
rule=total_penalty_costs_rule
)
record_dynamic_components(dynamic_components=d)
def record_dynamic_components(dynamic_components):
"""
:param dynamic_components:
Add local capacity shortage penalty costs to cost components
"""
getattr(dynamic_components, cost_components).append(
"Total_Local_Capacity_Shortage_Penalty_Costs"
)
| 30.111111
| 74
| 0.728167
|
801c1b102ed3f845dd8b1bf138933334e33a686d
| 28,312
|
py
|
Python
|
sgml/SGMLLexer.py
|
leon332157/Grail-browser
|
5b357c9414a2e0139779724439afbc917ef2fb60
|
[
"CNRI-Jython"
] | 8
|
2015-02-18T18:50:50.000Z
|
2022-03-15T22:21:03.000Z
|
sgml/SGMLLexer.py
|
leon332157/Grail-browser
|
5b357c9414a2e0139779724439afbc917ef2fb60
|
[
"CNRI-Jython"
] | null | null | null |
sgml/SGMLLexer.py
|
leon332157/Grail-browser
|
5b357c9414a2e0139779724439afbc917ef2fb60
|
[
"CNRI-Jython"
] | 3
|
2016-04-04T23:54:07.000Z
|
2020-10-29T04:25:42.000Z
|
"""A lexer for SGML, using derived classes as parser and DTD.
This module provides a transparent interface allowing the use of
alternate lexical analyzers without modifying higher levels of SGML
or HTML support.
"""
__version__ = "$Revision: 1.45 $"
# These constants are not used in this module, but are provided to
# allow other modules to know about the concrete syntax we support.
COM = "--" # comment start or end
CRO = "&#" # character reference open
REFC = ";" # reference close
DSO = "[" # declaration subset open
DSC = "]" # declaration subset close
ERO = "&" # entity reference open
LIT = '"' # literal start or end
LITA = "'" # literal start or end (alternative)
MDO = "<!" # markup declaration open
MDC = ">" # markup declaration close
MSC = "]]" # marked section close
NET = "/" # null end tag
PIO = "<?" # processing instruciton open
PIC = ">" # processing instruction close
STAGO = "<" # start tag open
ETAGO = "</" # end tag open
TAGC = ">" # tag close
VI = "=" # value indicator
whitespace = '\\t\\n\x0b\x0c\\r '
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special).
import re
import string
try:
class SGMLError(Exception):
pass
except TypeError:
class SGMLError:
pass
# SGML lexer base class -- find tags and call handler functions.
# Usage: p = SGMLLexer(); p.feed(data); ...; p.close().
# The data between tags is passed to the parser by calling
# self.lex_data() with some data as argument (the data may be split up
# in arbutrary chunks). Entity references are passed by calling
# self.lex_entityref() with the entity reference as argument.
class SGMLLexerBase:
# This is a "dummy" base class which provides documentation on the
# lexer API; this can be used by tools which can extract missing
# method documentation from base classes.
def feed(self, input_data):
"""Feed some data to the parser.
input_data
Input data to be fed to the scanner. An empty string
indicates end-of-input.
Call this as often as you want, with as little or as much text
as you want (may include '\n').
"""
pass
def close(self):
"""Terminate the input stream.
If any data remains unparsed or any events have not been
dispatched, they must be forced to do so by this method before
returning.
"""
pass
def line(self):
"""Return the current line number if known.
"""
def normalize(self, norm):
"""Control normalization of name tokens.
norm
Boolean indicating new setting of case normalization.
If `norm' is true, names tokens will be converted to lower
case before being based to the `lex_*()' interfaces described
below. Otherwise, names will be reported in the case in which
they are found in the input stream. Tokens which are affected
include tag names, attribute names, and named character
references. Note that general entity references are not
affected.
A boolean indicating the previous value is returned.
"""
pass
def reset(self):
"""Attempt to reset the lexical analyzer.
"""
pass
def restrict(self, strict):
"""Control recognition of particular constructs.
"""
pass
# The rest of the methods of this class are intended to be overridden
# by parser subclasses interested in different events on the input
# stream. They are called by the implementation of the lexer object.
def lex_data(self, data_string):
"""Process data characters.
"""
pass
def lex_starttag(self, tagname, attributes):
"""Process a start tag and attributes.
tagname
General identifier of the start tag encountered.
attributes
Dictionary of the attribute/value pairs found in the document
source.
The general identifier and attribute names are normalized to
lower case if only if normalization is enabled; all attribute
values are strings. Attribute values coded as string literals
using either LIT or LITA quoting will have the surrounding
quotation marks removed. Attributes with no value specified
in the document source will have a value of `None' in the
dictionary passed to this method.
"""
pass
def lex_endtag(self, tagname):
"""Process an end tag.
tagname
General identifier of the end tag found.
"""
pass
def lex_charref(self, ordinal, terminator):
"""Process a numeric character reference.
"""
pass
def lex_namedcharref(self, refname, terminator):
"""Process a named character reference.
"""
pass
def lex_entityref(self, refname, terminator):
"""Process a general entity reference.
"""
pass
def lex_pi(self, pi_data):
"""Process a processing instruction.
"""
pass
def lex_comment(self, comment_string):
"""Process a comment string.
If a markup declaration consists entirely of comments, each comment
is passed to this method in sequence. The parser has no way of
knowing whether multiple comments received in sequence are part of
a single markup declaration or originated in multiple declarations.
Empty comments ('<!>') are ignored. Comments embedded in other
markup declarations are not handled via this method.
"""
pass
def lex_declaration(self, declaration_info):
"""Process a markup declaration other than a comment.
declaration_info
List of strings. The first string will be the name of the
declaration (doctype, etc.), followed by each additional
name, nametoken, quoted literal, or comment in the
declaration.
Literals and comments will include the quotation marks or
comment delimiters to allow the client to process each
correctly. Normalization of names and nametokens will be
handled as for general identifiers.
"""
pass
def lex_error(self, error_string):
"""Process an error packet.
error_string
String which describes a lexical error in the input stream.
Values passed to this method may be affected by the current
scanning mode. Further callbacks may show symptoms described
by the error described by `error_string'.
"""
pass
def lex_limitation(self, limit_string):
"""Process a limitation packet.
limit_string
String which describes a lexical limitation in the current
scanning mode.
Further callbacks may show symptoms determined by the limitation
described by `limit_string'.
"""
pass
class SGMLLexer(SGMLLexerBase):
entitydefs = {}
_in_parse = 0
_finish_parse = 0
def __init__(self):
self.reset()
def strict_p(self):
return self._strict
def cleanup(self):
pass
rawdata = ''
def reset(self):
self.stack = []
self.lasttag = '???'
self.nomoretags = 0
self.literal = 0
self._normfunc = lambda s: s
self._strict = 0
def close(self):
if not self._in_parse:
self.goahead(1)
self.cleanup()
else:
self._finish_parse = 1
def line(self):
return None
def feed(self, data):
self.rawdata = self.rawdata + data
if not self._in_parse:
self._in_parse = 1
self.goahead(0)
self._in_parse = 0
if self._finish_parse:
self.cleanup()
def normalize(self, norm):
prev = ((self._normfunc is string.lower) and 1) or 0
self._normfunc = (norm and string.lower) or (lambda s: s)
return prev
def restrict(self, constrain):
prev = not self._strict
self._strict = not ((constrain and 1) or 0)
return prev
def setliteral(self, tag):
self.literal = 1
re = "%s%s[%s]*%s" % (ETAGO, tag, whitespace, TAGC)
if self._normfunc is string.lower:
self._lit_etag_re = re.compile(re, re.IGNORECASE)
else:
self._lit_etag_re = re.compile(re)
def setnomoretags(self):
self.nomoretags = 1
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
#print "goahead", self.rawdata
i = 0
n = len(self.rawdata)
while i < n:
rawdata = self.rawdata # pick up any appended data
n = len(rawdata)
if self.nomoretags:
self.lex_data(rawdata[i:n])
i = n
break
if self.literal:
match = self._lit_etag_re.search(rawdata, i)
if match:
pos = match.start()
# found end
self.lex_data(rawdata[i:pos])
i = pos + len(match.group(0))
self.literal = 0
continue
else:
pos = string.rfind(rawdata, "<", i)
if pos >= 0:
self.lex_data(rawdata[i:pos])
i = pos
break
# pick up self._finish_parse as soon as possible:
end = end or self._finish_parse
match = interesting.search(rawdata, i)
if match: j = match.start()
else: j = n
if i < j: self.lex_data(rawdata[i:j])
i = j
if i == n: break
#print "interesting", j, i
if rawdata[i] == '<':
#print "<", self.literal, rawdata[i:20]
if starttagopen.match(rawdata, i):
#print "open"
if self.literal:
self.lex_data(rawdata[i])
i = i+1
continue
#print "parse_starttag", self.parse_starttag
k = self.parse_starttag(i)
if k < 0: break
i = k
continue
if endtagopen.match(rawdata, i):
k = self.parse_endtag(i)
if k < 0: break
i = k
self.literal = 0
continue
if commentopen.match(rawdata, i):
if self.literal:
self.lex_data(rawdata[i])
i = i+1
continue
k = self.parse_comment(i, end)
if k < 0: break
i = i + k
continue
match = processinginstruction.match(rawdata, i)
if match:
k = match.start()
# Processing instruction:
if self._strict:
self.lex_pi(match.group(1))
i = match.end()
else:
self.lex_data(rawdata[i])
i = i + 1
continue
match = special.match(rawdata, i)
if match:
k = match.start()
if k-i == 3:
self.lex_declaration([])
i = i + 3
continue
if self._strict:
if rawdata[i+2] in string.letters:
k = self.parse_declaration(i)
if k > -1:
i = i + k
else:
self.lex_data('<!')
i = i + 2
else:
# Pretend it's data:
if self.literal:
self.lex_data(rawdata[i])
k = 1
i = match.end()
continue
elif rawdata[i] == '&':
charref = (self._strict and legalcharref) or simplecharref
match = charref.match(rawdata, i)
if match:
k = match.end()
if rawdata[k-1] not in ';\n':
k = k-1
terminator = ''
else:
terminator = rawdata[k-1]
name = match.group(1)[:-1]
postchar = ''
if terminator == '\n' and not self._strict:
postchar = '\n'
terminator = ''
if name[0] in '0123456789':
# Character reference:
try:
self.lex_charref(string.atoi(name), terminator)
except ValueError:
self.lex_data("&#%s%s" % (name, terminator))
else:
# Named character reference:
self.lex_namedcharref(self._normfunc(name),
terminator)
if postchar:
self.lex_data(postchar)
i = k
continue
match = entityref.match(rawdata, i)
if match:
k = match.end()
# General entity reference:
#k = i+k
if rawdata[k-1] not in ';\n':
k = k-1
terminator = ''
else:
terminator = rawdata[k-1]
name = match.group(1)
self.lex_entityref(name, terminator)
i = k
continue
else:
raise RuntimeError, 'neither < nor & ??'
# We get here only if incomplete matches but
# nothing else
match = incomplete.match(rawdata, i)
if not match:
self.lex_data(rawdata[i])
i = i+1
continue
k = match.end()
j = k
if j == n:
break # Really incomplete
self.lex_data(rawdata[i:j])
i = j
# end while
if (end or self._finish_parse) and i < n:
self.lex_data(self.rawdata[i:n])
i = n
self.rawdata = self.rawdata[i:]
# Internal -- parse comment, return length or -1 if not terminated
def parse_comment(self, i, end):
#print "parse comment"
rawdata = self.rawdata
if rawdata[i:i+4] <> (MDO + COM):
raise RuntimeError, 'unexpected call to parse_comment'
if self._strict:
# stricter parsing; this requires legal SGML:
pos = i + len(MDO)
datalength = len(rawdata)
comments = []
while (pos < datalength) and rawdata[pos] != MDC:
matchlength, comment = comment_match(rawdata, pos)
if matchlength >= 0:
pos = pos + matchlength
comments.append(comment)
elif end:
self.lex_error("unexpected end of data in comment")
comments.append(rawdata[pos+2:])
pos = datalength
elif rawdata[pos] != "-":
self.lex_error("illegal character in"
" markup declaration: "
+ `rawdata[pos]`)
pos = pos + 1
else:
return -1
map(self.lex_comment, comments)
return pos + len(MDC) - i
# not strict
match = commentclose.search(rawdata, i+4)
if not match:
if end:
if MDC in rawdata[i:]:
j = string.find(rawdata, MDC, i)
self.lex_comment(rawdata[i+4: j])
return j + len(MDC) - i
self.lex_comment(rawdata[i+4:])
return len(rawdata) - i
return -1
j = match.start()
self.lex_comment(rawdata[i+4: j])
match = commentclose.match(rawdata, j)
if match:
j = match.start()
return j - i
# Internal -- handle starttag, return length or -1 if not terminated
def parse_starttag(self, i):
rawdata = self.rawdata
#print "parse_starttag", rawdata
if self._strict and shorttagopen.match(rawdata, i):
# SGML shorthand: <tag/data/ == <tag>data</tag>
# XXX Can data contain &... (entity or char refs)? ... yes
# XXX Can data contain < or > (tag characters)? ... > yes,
# < not as delimiter-in-context
# XXX Can there be whitespace before the first /? ... no
match = shorttag.match(rawdata, i)
if not match:
self.lex_data(rawdata[i])
return i + 1
k = match.end()
tag, data = match.group(1, 2)
tag = self._normfunc(tag)
self.lex_starttag(tag, {})
self.lex_data(data) # should scan for entity refs
self.lex_endtag(tag)
return k
# XXX The following should skip matching quotes (' or ")
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
#print "parse_starttag endbracket", j
# Now parse the data between i+1 and j into a tag and attrs
if rawdata[i:i+2] == '<>':
# Semantics of the empty tag are handled by lex_starttag():
if self._strict:
self.lex_starttag('', {})
else:
self.lex_data('<>')
return i + 2
#print "tagfind start", i+1
match = tagfind.match(rawdata, i+1) # matches just the GI
if not match:
raise RuntimeError, 'unexpected call to parse_starttag'
k = match.end(0)
#print "tagfind end", k
tag = self._normfunc(rawdata[i+1:k])
#print "tag", tag
# pull recognizable attributes
attrs = {}
while k < j:
match = attrfind.match(rawdata, k)
if not match: break
l = match.start(0)
k = k + l
# Break out the name[/value] pair:
attrname, rest, attrvalue = match.group(1, 2, 3)
if not rest:
attrvalue = None # was: = attrname
elif attrvalue[:1] == LITA == attrvalue[-1:] or \
attrvalue[:1] == LIT == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if '&' in attrvalue:
from SGMLReplacer import replace
attrvalue = replace(attrvalue, self.entitydefs)
attrs[self._normfunc(attrname)] = attrvalue
k = match.end(0)
# close the start-tag
xx = tagend.match(rawdata, k)
if not xx:
# something vile
endchars = self._strict and "<>/" or "<>"
while 1:
try:
while rawdata[k] in string.whitespace:
k = k + 1
except IndexError:
return -1
if rawdata[k] not in endchars:
self.lex_error("bad character in tag")
k = k + 1
else:
break
if not self._strict:
if rawdata[k] == '<':
self.lex_limitation("unclosed start tag not supported")
elif rawdata[k] == '/':
self.lex_limitation("NET-enabling start tags"
" not supported")
else:
k = k + len(xx.group(0)) - 1
#
# Vicious hack to allow XML-style empty tags, like "<hr />".
# We don't require the space, but appearantly it's significant
# on Netscape Navigator. Only in non-strict mode.
#
c = rawdata[k]
if c == '/' and not self._strict:
if rawdata[k:k+2] == "/>":
# using XML empty-tag hack
self.lex_starttag(tag, attrs)
self.lex_endtag(tag)
return k + 2
else:
self.lex_starttag(tag, attrs)
return k + 1
if c in '>/':
k = k + 1
self.lex_starttag(tag, attrs)
return k
# Internal -- parse endtag
def parse_endtag(self, i):
rawdata = self.rawdata
if rawdata[i+2] in '<>':
if rawdata[i+2] == '<' and not self._strict:
self.lex_limitation("unclosed end tags not supported")
self.lex_data(ETAGO)
return i + 2
self.lex_endtag('')
return i + 2 + (rawdata[i+2] == TAGC)
match = endtag.match(rawdata, i)
if not match:
return -1
j = match.end(0)-1
#j = i + j - 1
if rawdata[j] == TAGC:
j = j + 1
self.lex_endtag(self._normfunc(match.group(1)))
return j
def parse_declaration(self, start):
# This only gets used in "strict" mode.
rawdata = self.rawdata
i = start
# Markup declaration, possibly illegal:
strs = []
i = i + 2
match = md_name.match(rawdata, i)
k = match.start()
strs.append(self._normfunc(match.group(1)))
i = i + k
end_target = '>'
while k > 0:
# Have to check the comment pattern first so we don't get
# confused and think this is a name that starts with '--':
if rawdata[i] == '[':
self.lex_limitation("declaration subset not supported")
end_target = ']>'
break
k, comment = comment_match(rawdata, i)
if k > 0:
strs.append(comment)
i = i + k
continue
match = md_string.match(rawdata, i)
if match:
k = match.start()
strs.append(match.group(1))
i = i + k
continue
match = md_name.match(rawdata, i)
if match:
k = match.start()
s = match.group(1)
try:
strs.append(string.atoi(s))
except string.atoi_error:
strs.append(self._normfunc(s))
i = i + k
continue
k = string.find(rawdata, end_target, i)
if end_target == ']>':
if k < 0:
k = string.find(rawdata, '>', i)
else:
k = k + 1
if k >= 0:
i = k + 1
else:
return -1
self.lex_declaration(strs)
return i - start
# Regular expressions used for parsing:
OPTIONAL_WHITESPACE = "[%s]*" % whitespace
interesting = re.compile('[&<]')
incomplete = re.compile('&([a-zA-Z][a-zA-Z0-9]*|'
'#[0-9]*)?|'
'<([a-zA-Z][^<>]*|'
'/([a-zA-Z][^<>]*)?|'
'![^<>]*)?')
entityref = re.compile(ERO + '([a-zA-Z][-.a-zA-Z0-9]*)[^-.a-zA-Z0-9]')
simplecharref = re.compile(CRO + '([0-9]+[^0-9])')
legalcharref \
= re.compile(CRO + '([0-9]+[^0-9]|[a-zA-Z.-]+[^a-zA-Z.-])')
processinginstruction = re.compile('<\\?([^>]*)' + PIC)
starttagopen = re.compile(STAGO + '[>a-zA-Z]')
shorttagopen = re.compile(STAGO + '[a-zA-Z][-.a-zA-Z0-9]*'
+ OPTIONAL_WHITESPACE + NET)
shorttag = re.compile(STAGO + '([a-zA-Z][-.a-zA-Z0-9]*)'
+ OPTIONAL_WHITESPACE + NET + '([^/]*)' + NET)
endtagopen = re.compile(ETAGO + '[<>a-zA-Z]')
endbracket = re.compile('[<>]')
endtag = re.compile(ETAGO +
'([a-zA-Z][-.a-zA-Z0-9]*)'
'([^-.<>a-zA-Z0-9]?[^<>]*)[<>]')
special = re.compile(MDO + '[^>]*' + MDC)
markupdeclaration = re.compile(MDO +
'(([-.a-zA-Z0-9]+|'
+ LIT + '[^"]*' + LIT + '|'
+ LITA + "[^']*" + LITA + '|'
+ COM + '([^-]|-[^-])*' + COM
+ ')' + OPTIONAL_WHITESPACE
+ ')*' + MDC)
md_name = re.compile('([^>%s\'"]+)' % whitespace
+ OPTIONAL_WHITESPACE)
md_string = re.compile('("[^"]*"|\'[^\']*\')' + OPTIONAL_WHITESPACE)
commentopen = re.compile(MDO + COM)
commentclose = re.compile(COM + OPTIONAL_WHITESPACE + MDC)
tagfind = re.compile('[a-zA-Z][-_.a-zA-Z0-9]*')
attrfind = re.compile(
# comma is for compatibility
('[%s,]*([_a-zA-Z][-:.a-zA-Z_0-9]*)' % whitespace)
+ '(' + OPTIONAL_WHITESPACE + VI + OPTIONAL_WHITESPACE # VI
+ '(' + LITA + "[^']*" + LITA
+ '|' + LIT + '[^"]*' + LIT
+ '|[\-~a-zA-Z0-9,./:+*%?!\\(\\)_#=]*))?')
tagend = re.compile(OPTIONAL_WHITESPACE + '[<>/]')
# used below in comment_match()
comment_start = re.compile(COM + '([^-]*)-(.|\\n)')
comment_segment = re.compile('([^-]*)-(.|\\n)')
comment_whitespace = re.compile(OPTIONAL_WHITESPACE)
del re
def comment_match(rawdata, start):
"""Match a legal SGML comment.
rawdata
Data buffer, as a string.
start
Starting index into buffer. This should point to the `<'
character of the Markup Declaration Open.
Analyzes SGML comments using very simple regular expressions to
ensure that the limits of the regular expression package are not
exceeded. Very long comments with embedded hyphens which cross
buffer boundaries can easily generate problems with less-than-
ideal RE implementations.
Returns the number of characters to consume from the input buffer
(*not* including the first `start' characters!) and the text of
comment located. If no comment was identified, returns -1 and
an empty string.
"""
matcher = comment_start.match(rawdata, start)
if not matcher or matcher.start() < 0:
return -1, ''
pos = start
comment = ''
matchlength = m.start()
while matchlength >= 0:
if matcher.group(2) == "-":
# skip any whitespace
ws = comment_whitespace.match(rawdata, pos + matchlength)
if ws:
ws = ws.start()
else:
ws = 0
pos = pos + matchlength + ws
return pos - start, comment + matcher.group(1)
# only a partial match
comment = "%s%s-%s" % (comment,
matcher.group(1), matcher.group(2))
pos = pos + matchlength
matcher = comment_segment.match(rawdata, pos)
if not matcher:
matchlength = -1
else:
matchlength = matcher.start()
return -1, ''
| 36.25096
| 76
| 0.492583
|
dae0187f8846482fe0cb5c8dabc2fa1adecd8e8e
| 753
|
py
|
Python
|
tests/io/stringio1.py
|
LabAixBidouille/micropython
|
11aa6ba456287d6c80598a7ebbebd2887ce8f5a2
|
[
"MIT"
] | null | null | null |
tests/io/stringio1.py
|
LabAixBidouille/micropython
|
11aa6ba456287d6c80598a7ebbebd2887ce8f5a2
|
[
"MIT"
] | null | null | null |
tests/io/stringio1.py
|
LabAixBidouille/micropython
|
11aa6ba456287d6c80598a7ebbebd2887ce8f5a2
|
[
"MIT"
] | null | null | null |
import _io as io
a = io.StringIO()
print(a.getvalue())
print(a.read())
a = io.StringIO("foobar")
print(a.getvalue())
print(a.read())
print(a.read())
a = io.StringIO()
a.write("foo")
print(a.getvalue())
a = io.StringIO("foo")
a.write("12")
print(a.getvalue())
a = io.StringIO("foo")
a.write("123")
print(a.getvalue())
a = io.StringIO("foo")
a.write("1234")
print(a.getvalue())
a = io.StringIO()
a.write("foo")
print(a.read())
a = io.StringIO()
a.close()
for f in [a.read, a.getvalue, lambda:a.write("")]:
# CPython throws for operations on closed I/O, micropython makes
# the underlying string empty unless MICROPY_CPYTHON_COMPAT defined
try:
f()
print("ValueError")
except ValueError:
print("ValueError")
| 17.928571
| 71
| 0.642762
|
45cded092f5ec52801ec3684e3568c8b5ad86154
| 2,579
|
py
|
Python
|
nova/api/openstack/compute/schemas/services.py
|
lixiaoy1/nova
|
357b8b38e88300948bb2e07d1bbaabd1e9d7b60e
|
[
"Apache-2.0"
] | 2
|
2021-10-11T04:56:25.000Z
|
2022-02-16T08:49:29.000Z
|
nova/api/openstack/compute/schemas/services.py
|
woraser/nova
|
fc3890667e4971e3f0f35ac921c2a6c25f72adec
|
[
"Apache-2.0"
] | 132
|
2017-03-27T11:31:52.000Z
|
2022-03-30T08:45:02.000Z
|
nova/api/openstack/compute/schemas/services.py
|
woraser/nova
|
fc3890667e4971e3f0f35ac921c2a6c25f72adec
|
[
"Apache-2.0"
] | 8
|
2017-03-27T07:50:38.000Z
|
2020-02-14T16:55:56.000Z
|
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.validation import parameter_types
service_update = {
'type': 'object',
'properties': {
'host': parameter_types.hostname,
'binary': {
'type': 'string', 'minLength': 1, 'maxLength': 255,
},
'disabled_reason': {
'type': 'string', 'minLength': 1, 'maxLength': 255,
}
},
'required': ['host', 'binary'],
'additionalProperties': False
}
service_update_v211 = {
'type': 'object',
'properties': {
'host': parameter_types.hostname,
'binary': {
'type': 'string', 'minLength': 1, 'maxLength': 255,
},
'disabled_reason': {
'type': 'string', 'minLength': 1, 'maxLength': 255,
},
'forced_down': parameter_types.boolean
},
'required': ['host', 'binary'],
'additionalProperties': False
}
# The 2.53 body is for updating a service's status and/or forced_down fields.
# There are no required attributes since the service is identified using a
# unique service_id on the request path, and status and/or forced_down can
# be specified in the body. If status=='disabled', then 'disabled_reason' is
# also checked in the body but is not required. Requesting status='enabled' and
# including a 'disabled_reason' results in a 400, but this is checked in code.
service_update_v2_53 = {
'type': 'object',
'properties': {
'status': {
'type': 'string',
'enum': ['enabled', 'disabled'],
},
'disabled_reason': {
'type': 'string', 'minLength': 1, 'maxLength': 255,
},
'forced_down': parameter_types.boolean
},
'additionalProperties': False
}
index_query_schema = {
'type': 'object',
'properties': {
'host': parameter_types.common_query_param,
'binary': parameter_types.common_query_param,
},
# For backward compatible changes
'additionalProperties': True
}
| 32.64557
| 79
| 0.629701
|
590cbd0d8abf43993a20424a5c92101d12ecc728
| 1,051
|
py
|
Python
|
blogposts/models.py
|
cesarrodas/cesars-blog
|
497f4c20bab24db0c0052004e6199d669dac1fe0
|
[
"MIT"
] | null | null | null |
blogposts/models.py
|
cesarrodas/cesars-blog
|
497f4c20bab24db0c0052004e6199d669dac1fe0
|
[
"MIT"
] | null | null | null |
blogposts/models.py
|
cesarrodas/cesars-blog
|
497f4c20bab24db0c0052004e6199d669dac1fe0
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.utils import timezone
# Create your models here.
class BlogPost(models.Model):
title = models.CharField(max_length=250)
description = models.CharField(max_length=300)
pub_date = models.DateTimeField()
image = models.CharField(max_length=300)
body = models.TextField()
def __str__(self):
return self.title
def pub_date_pretty(self):
return self.pub_date.strftime('%b %e %Y')
def summary(self):
return self.body[:100]
class Comment(models.Model):
username = models.CharField(max_length=100)
pub_date = models.DateTimeField()
message = models.TextField()
post_id = models.IntegerField()
@classmethod
def create(cls, username, message, post_id):
comment = cls(username=username, message=message, \
post_id=post_id, pub_date=timezone.datetime.now())
return comment
def __str__(self):
return self.message[:100]
def pub_date_pretty(self):
return self.pub_date.strftime('%b %e %Y')
| 27.657895
| 59
| 0.681256
|
343bdcdf7c2c46293277d788bcbd0c22d70bc4be
| 23,973
|
py
|
Python
|
lib/spc.py
|
rinocloud/rinobot-plugin-parse-spc
|
6bb675a646455e8b9c61bb380705875894fc52e4
|
[
"MIT"
] | null | null | null |
lib/spc.py
|
rinocloud/rinobot-plugin-parse-spc
|
6bb675a646455e8b9c61bb380705875894fc52e4
|
[
"MIT"
] | null | null | null |
lib/spc.py
|
rinocloud/rinobot-plugin-parse-spc
|
6bb675a646455e8b9c61bb380705875894fc52e4
|
[
"MIT"
] | null | null | null |
"""
spc class: main class that starts loading data from Thermo Grams *.SPC
file
author: Rohan Isaac
"""
from __future__ import division, absolute_import, unicode_literals, print_function
import struct
import numpy as np
from .sub import subFile, subFileOld
from .global_fun import read_subheader, flag_bits
class File:
"""
Starts loading the data from a .SPC spectral file using data from the
header. Stores all the attributes of a spectral file:
Data
----
content: Full raw data
sub[i]: sub file object for each subfileFor each subfile
sub[i].y: y data for each subfile
x: x-data, global, or for the first subheader
Examples
--------
>>> import spc
>>> ftir_1 = spc.File('/path/to/ftir.spc')
"""
# Format strings for various parts of the file
# calculate size of strings using `struct.calcsize(string)`
head_str = "<cccciddicccci9s9sh32s130s30siicchf48sfifc187s"
old_head_str = "<cchfffcchcccc8shh28s130s30s32s"
logstc_str = "<iiiii44s"
# byte positon of various parts of the file
head_siz = 512
old_head_siz = 256
subhead_siz = 32
log_siz = 64
subhead1_pos = head_siz + subhead_siz
# ------------------------------------------------------------------------
# CONSTRUCTOR
# ------------------------------------------------------------------------
def __init__(self, filename):
# load entire into memory temporarly
with open(filename, "rb") as fin:
content = fin.read()
# print "Read raw data"
self.length = len(content)
# extract first two bytes to determine file type version
self.ftflg, self.fversn = struct.unpack('<cc'.encode('utf8'), content[:2])
# --------------------------------------------
# NEW FORMAT (LSB)
# --------------------------------------------
if self.fversn == b'\x4b':
# format: new LSB 1st
# -------------
# unpack header
# -------------
# use little-endian format with standard sizes
# use naming scheme in SPC.H header file
self.ftflg, \
self.fversn, \
self.fexper, \
self.fexp, \
self.fnpts, \
self.ffirst, \
self.flast, \
self.fnsub, \
self.fxtype, \
self.fytype, \
self.fztype, \
self.fpost, \
self.fdate, \
self.fres, \
self.fsource, \
self.fpeakpt, \
self.fspare, \
self.fcmnt, \
self.fcatxt, \
self.flogoff, \
self.fmods, \
self.fprocs, \
self.flevel, \
self.fsampin, \
self.ffactor, \
self.fmethod, \
self.fzinc, \
self.fwplanes, \
self.fwinc, \
self.fwtype, \
self.freserv \
= struct.unpack(self.head_str.encode('utf8'), content[:self.head_siz])
# Flag bits
self.tsprec, \
self.tcgram, \
self.tmulti, \
self.trandm, \
self.tordrd, \
self.talabs, \
self.txyxys, \
self.txvals = flag_bits(self.ftflg)[::-1]
# fix data types if necessary
self.fnpts = int(self.fnpts) # of points should be int
self.fexp = ord(self.fexp)
self.ffirst = float(self.ffirst)
self.flast = float(self.flast)
self.flogoff = int(self.flogoff) # byte; should be int
self.fxtype = ord(self.fxtype)
self.fytype = ord(self.fytype)
self.fztype = ord(self.fztype)
self.fexper = ord(self.fexper)
self.fcmnt = str(self.fcmnt)
# Convert date time to appropriate format
d = self.fdate
self.year = d >> 20
self.month = (d >> 16) % (2**4)
self.day = (d >> 11) % (2**5)
self.hour = (d >> 6) % (2**5)
self.minute = d % (2**6)
# null terminated string, replace null characters with spaces
# split and join to remove multiple spaces
try:
self.cmnt = ' '.join((self.fcmnt.replace('\x00', ' ')).split())
except:
self.cmnt = self.fcmnt
# figure out type of file
if self.fnsub > 1:
self.dat_multi = True
if self.txyxys:
# x values are given
self.dat_fmt = '-xy'
elif self.txvals:
# only one subfile, which contains the x data
self.dat_fmt = 'x-y'
else:
# no x values are given, but they can be generated
self.dat_fmt = 'gx-y'
print('{}({})'.format(self.dat_fmt, self.fnsub))
sub_pos = self.head_siz
if not self.txyxys:
# txyxys don't have global x data
if self.txvals:
# if global x data is given
x_dat_pos = self.head_siz
x_dat_end = self.head_siz + (4 * self.fnpts)
self.x = np.array(
[struct.unpack_from(
'f', content[x_dat_pos:x_dat_end], 4 * i)[0]
for i in range(0, self.fnpts)])
sub_pos = x_dat_end
else:
# otherwise generate them
self.x = np.linspace(self.ffirst, self.flast, num=self.fnpts)
# make a list of subfiles
self.sub = []
# if subfile directory is given
if self.dat_fmt == '-xy' and self.fnpts > 0:
self.directory = True
# loop over entries in directory
for i in range(0, self.fnsub):
ssfposn, ssfsize, ssftime = struct.unpack(
'<iif'.encode('utf8'), content[self.fnpts + (i * 12):self.fnpts + ((i + 1) * 12)])
# add sufile, load defaults for npts and exp
self.sub.append(subFile(content[ssfposn:ssfposn + ssfsize], 0, 0, True, self.tsprec))
else:
# don't have directory, for each subfile
for i in range(self.fnsub):
# figure out its size
if self.txyxys:
# use points in subfile
subhead_lst = read_subheader(content[sub_pos:(sub_pos + 32)])
pts = subhead_lst[6]
# 4 bytes each for x and y, and 32 for subheader
dat_siz = (8 * pts) + 32
else:
# use global points
pts = self.fnpts
dat_siz = (4 * pts) + 32
sub_end = sub_pos + dat_siz
# read into object, add to list
self.sub.append(subFile(content[sub_pos:sub_end],
self.fnpts, self.fexp, self.txyxys, self.tsprec))
# update positions
sub_pos = sub_end
# if log data exists
# flog offset to log data offset not zero (bytes)
if self.flogoff:
log_head_end = self.flogoff + self.log_siz
self.logsizd, \
self.logsizm, \
self.logtxto, \
self.logbins, \
self.logdsks, \
self.logspar \
= struct.unpack(self.logstc_str.encode('utf8'),
content[self.flogoff:log_head_end])
log_pos = self.flogoff + self.logtxto
log_end_pos = log_pos + self.logsizd
# line endings: get rid of any '\r' and then split on '\n'
self.log_content = content[log_pos:log_end_pos].replace(b'\r', b'').split(b'\n')
# split log data into dictionary based on =
self.log_dict = dict()
self.log_other = [] # put the rest into a list
for x in self.log_content:
if x.find(b'=') >= 0:
# stop it from breaking if there is more than 1 =
key, value = x.split(b'=')[:2]
self.log_dict[key] = value
else:
self.log_other.append(x)
# spacing between data
self.spacing = (self.flast - self.ffirst) / (self.fnpts - 1)
# call functions
self.set_labels()
self.set_exp_type()
# --------------------------------------------
# NEW FORMAT (MSB)
# --------------------------------------------
elif self.fversn == b'\x4c':
# new MSB 1st
print("New MSB 1st, yet to be implemented")
pass # To be implemented
# --------------------------------------------
# OLD FORMAT
# --------------------------------------------
elif self.fversn == b'\x4d':
# old format
# oxtype -> fxtype
# oytype -> fytype
self.oftflgs, \
self.oversn, \
self.oexp, \
self.onpts, \
self.ofirst, \
self.olast, \
self.fxtype, \
self.fytype, \
self.oyear, \
self.omonth, \
self.oday, \
self.ohour, \
self.ominute, \
self.ores, \
self.opeakpt, \
self.onscans, \
self.ospare, \
self.ocmnt, \
self.ocatxt, \
self.osubh1 = struct.unpack(self.old_head_str.encode('utf8'),
content[:self.old_head_siz])
# Flag bits (assuming same)
self.tsprec, \
self.tcgram, \
self.tmulti, \
self.trandm, \
self.tordrd, \
self.talabs, \
self.txyxys, \
self.txvals = flag_bits(self.oftflgs)[::-1]
# fix data types
self.oexp = int(self.oexp)
self.onpts = int(self.onpts) # can't have floating num of pts
self.ofirst = float(self.ofirst)
self.olast = float(self.olast)
# Date information
# !! to fix !!
# Year collected (0=no date/time) - MSB 4 bits are Z type
# extracted as characters, using ord
self.omonth = ord(self.omonth)
self.oday = ord(self.oday)
self.ohour = ord(self.ohour)
self.ominute = ord(self.ominute)
# number of scans (? subfiles sometimes ?)
self.onscans = int(self.onscans)
# null terminated strings
self.ores = self.ores.split(b'\x00')[0]
self.ocmnt = self.ocmnt.split(b'\x00')[0]
# can it have separate x values ?
self.x = np.linspace(self.ofirst, self.olast, num=self.onpts)
# make a list of subfiles
self.sub = []
# already have subheader from main header, retrace steps
sub_pos = self.old_head_siz - self.subhead_siz
# for each subfile
# in the old format we don't know how many subfiles to expect,
# just looping till we run out
i = 0
while True:
try:
# read in subheader
subhead_lst = read_subheader(content[sub_pos:sub_pos + self.subhead_siz])
if subhead_lst[6] > 0:
# default to subfile points, unless it is zero
pts = subhead_lst[6]
else:
pts = self.onpts
# figure out size of subheader
dat_siz = (4 * pts)
sub_end = sub_pos + self.subhead_siz + dat_siz
# read into object, add to list
# send it pts since we have already figured that out
self.sub.append(subFileOld(
content[sub_pos:sub_end], pts, self.oexp, self.txyxys))
# update next subfile postion, and index
sub_pos = sub_end
i += 1
except:
# zero indexed, set the total number of subfile
self.fnsub = i + 1
break
# assuming it can't have separate x values
self.dat_fmt = 'gx-y'
print('{}({})'.format(self.dat_fmt, self.fnsub))
self.fxtype = ord(self.fxtype)
self.fytype = ord(self.fytype)
# need to find from year apparently
self.fztype = 0
self.set_labels()
# --------------------------------------------
# SHIMADZU
# --------------------------------------------
elif self.fversn == b'\xcf':
print("Highly experimental format, may not work ")
raw_data = content[10240:] # data starts here (maybe every time)
# spacing between y and x data is atleast 0 bytes
s_32 = chr(int('0', 2)) * 32
s_8 = chr(int('0', 2)) * 8 # zero double
dat_len = raw_data.find(s_32)
for i in range(dat_len, len(raw_data), 8):
# find first non zero double
if raw_data[i:i + 8] != s_8:
break
dat_siz = int(dat_len / 8)
self.y = struct.unpack(('<' + dat_siz * 'd').encode('utf8'), raw_data[:dat_len])
self.x = struct.unpack(('<' + dat_siz * 'd').encode('utf8'), raw_data[i:i + dat_len])
else:
print("File type %s not supported yet. Please add issue. " % hex(ord(self.fversn)))
self.content = content
# ------------------------------------------------------------------------
# Process other data
# ------------------------------------------------------------------------
def set_labels(self):
"""
Set the x, y, z axis labels using various information in file content
"""
# --------------------------
# units for x,z,w axes
# --------------------------
fxtype_op = ["Arbitrary",
"Wavenumber (cm-1)",
"Micrometers (um)",
"Nanometers (nm)",
"Seconds ",
"Minutes", "Hertz (Hz)",
"Kilohertz (KHz)",
"Megahertz (MHz) ",
"Mass (M/z)",
"Parts per million (PPM)",
"Days",
"Years",
"Raman Shift (cm-1)",
"eV",
"XYZ text labels in fcatxt (old 0x4D version only)",
"Diode Number",
"Channel",
"Degrees",
"Temperature (F)",
"Temperature (C)",
"Temperature (K)",
"Data Points",
"Milliseconds (mSec)",
"Microseconds (uSec) ",
"Nanoseconds (nSec)",
"Gigahertz (GHz)",
"Centimeters (cm)",
"Meters (m)",
"Millimeters (mm)",
"Hours"]
if self.fxtype < 30:
self.xlabel = fxtype_op[self.fxtype]
else:
self.xlabel = "Unknown"
if self.fztype < 30:
self.zlabel = fxtype_op[self.fztype]
else:
self.zlabel = "Unknown"
# --------------------------
# units y-axis
# --------------------------
fytype_op = ["Arbitrary Intensity",
"Interferogram",
"Absorbance",
"Kubelka-Munk",
"Counts",
"Volts",
"Degrees",
"Milliamps",
"Millimeters",
"Millivolts",
"Log(1/R)",
"Percent",
"Intensity",
"Relative Intensity",
"Energy",
"",
"Decibel",
"",
"",
"Temperature (F)",
"Temperature (C)",
"Temperature (K)",
"Index of Refraction [N]",
"Extinction Coeff. [K]",
"Real",
"Imaginary",
"Complex"]
fytype_op2 = ["Transmission",
"Reflectance",
"Arbitrary or Single Beam with Valley Peaks",
"Emission"]
if self.fytype < 27:
self.ylabel = fytype_op[self.fytype]
elif self.fytype > 127 and self.fytype < 132:
self.ylabel = fytype_op2[self.fytype - 128]
else:
self.ylabel = "Unknown"
# --------------------------
# check if labels are included as text
# --------------------------
# split it based on 00 string
# format x, y, z
if self.talabs:
ll = self.fcatxt.split(b'\x00')
if len(ll) > 2:
# make sure there are enough items to extract from
xl, yl, zl = ll[:3]
# overwrite only if non zero
if len(xl) > 0:
self.xlabel = xl
if len(yl) > 0:
self.ylabel = yl
if len(zl) > 0:
self.zlabel = zl
def set_exp_type(self):
""" Sets the experiment type """
fexper_op = ["General SPC",
"Gas Chromatogram",
"General Chromatogram",
"HPLC Chromatogram",
"FT-IR, FT-NIR, FT-Raman Spectrum or Igram",
"NIR Spectrum",
"UV-VIS Spectrum",
"X-ray Diffraction Spectrum",
"Mass Spectrum ",
"NMR Spectrum or FID",
"Raman Spectrum",
"Fluorescence Spectrum",
"Atomic Spectrum",
"Chromatography Diode Array Spectra"]
self.exp_type = fexper_op[self.fexper]
# ------------------------------------------------------------------------
# output
# ------------------------------------------------------------------------
def data_txt(self, delimiter='\t', newline='\n'):
""" Returns x,y column data as a string variable, can be printed to
standard output or fed to text file.
Arguments
---------
delimiter: chr (default='\t')
delimiter character for column separation
newline: chr (default='\n')
newline character, may want to use '\r\n' for Windows based output
Example
-------
>>> f.data_txt(newline='\r\n')
"""
dat = ''
if self.fnsub == 1:
if self.dat_fmt.endswith('-xy'):
x = self.sub[0].x
else:
x = self.x
y = self.sub[0].y
for x1, y1 in zip(x, y):
dat += '{}{}{}{}'.format(x1, delimiter, y1, newline)
else:
if not self.dat_fmt.endswith('-xy'):
# does not have separate x data
for i in range(len(self.x)):
dat += '{}'.format(self.x[i])
for s in self.sub:
dat += '{}{}'.format(delimiter, s.y[i])
dat += newline
else:
# txyxy format, return one long xy file with subfiles
# separated by blank lines
for i in self.sub:
for x1, y1 in zip(i.x, i.y):
dat += '{}{}{}{}'.format(x1, delimiter, y1, newline)
dat += newline
return dat
def write_file(self, path, delimiter='\t', newline='\n'):
""" Output x,y data to text file tab seperated
Arguments
---------
path: str
full path to output file including extension
delimiter: chr (default='\t')
delimiter character for column separation
newline: chr (default='\n')
newline character, may want to use '\r\n' for Windows based output
Example
-------
>>> f.writefile('/Users/home/output.txt', delimiter=',')
"""
with open(path, 'w') as f:
f.write(self.data_txt(delimiter, newline))
def print_metadata(self):
""" Print out select metadata"""
print("Scan: ", self.log_dict['Comment'], "\n",
float(self.log_dict['Start']), "to ",
float(self.log_dict['End']), "; ",
float(self.log_dict['Increment']), "cm-1;",
float(self.log_dict['Integration Time']), "s integration time")
def plot(self):
""" Plots data, and use column headers, returns figure object plotted
Requires matplotlib installed
Example
-------
>>> f.plot()
"""
import matplotlib.pyplot as plt
if self.dat_fmt.endswith('-xy'):
for s in self.sub:
plt.plot(s.x, s.y)
else:
x = self.x
for s in self.sub:
plt.plot(x, s.y)
plt.xlabel(self.xlabel)
plt.ylabel(self.ylabel)
return plt.gcf()
def debug_info(self):
"""
Interpret flags and header information to debug more about the file
format
Example
-------
>>> f.debug_info()
"""
print("\nDEBUG INFO\nFlags:\n")
# Flag bits
if self.tsprec:
print("16-bit y data")
if self.tcgram:
print("enable fexper")
if self.tmulti:
print("multiple traces")
if self.trandm:
print("arb time (z) values")
if self.tordrd:
print("ordered but uneven subtimes")
if self.talabs:
print("use fcatxt axis not fxtype")
if self.txyxys:
print("each subfile has own x's")
if self.txvals:
print("floating x-value array preceeds y's")
print('----\n')
# spc format version
if self.fversn == chr(0x4b):
self.pr_versn = "new LSB 1st"
elif self.fversn == chr(0x4c):
self.pr_versn = "new MSB 1st"
elif self.fversn == chr(0x4d):
self.pr_versn = "old format"
else:
self.pr_versn = "unknown version"
print("Version:", self.pr_versn)
# subfiles
if self.fnsub == 1:
print("Single file only")
else:
print("Multiple subfiles:", self.fnsub)
# multiple y values
if self.tmulti:
print("Multiple y-values")
else:
print("Single set of y-values")
# print "There are ", self.fnpts, \
# " points between ", self.ffirst, \
# " and ", self.flast, \
# " in steps of ", self.pr_spacing
| 34.99708
| 106
| 0.435073
|
03269605c9b199d9a9f397921e3af1f6096d4a92
| 1,545
|
py
|
Python
|
release/stubs.min/System/Windows/Forms/__init___parts/UICuesEventArgs.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/Windows/Forms/__init___parts/UICuesEventArgs.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/Windows/Forms/__init___parts/UICuesEventArgs.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
class UICuesEventArgs(EventArgs):
"""
Provides data for the System.Windows.Forms.Control.ChangeUICues event.
UICuesEventArgs(uicues: UICues)
"""
@staticmethod
def __new__(self, uicues):
""" __new__(cls: type,uicues: UICues) """
pass
Changed = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets the bitwise combination of the System.Windows.Forms.UICues values.
Get: Changed(self: UICuesEventArgs) -> UICues
"""
ChangeFocus = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value indicating whether the state of the focus cues has changed.
Get: ChangeFocus(self: UICuesEventArgs) -> bool
"""
ChangeKeyboard = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value indicating whether the state of the keyboard cues has changed.
Get: ChangeKeyboard(self: UICuesEventArgs) -> bool
"""
ShowFocus = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets a value indicating whether focus rectangles are shown after the change.
Get: ShowFocus(self: UICuesEventArgs) -> bool
"""
ShowKeyboard = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value indicating whether keyboard cues are underlined after the change.
Get: ShowKeyboard(self: UICuesEventArgs) -> bool
"""
| 20.6
| 89
| 0.642071
|
192dcbd0dbbb52cdf7068fd0800303842ac87d51
| 1,721
|
py
|
Python
|
examples/pylab_examples/anscombe.py
|
jbbrokaw/matplotlib
|
86ec1b6fc5628bfb2d09797c58d7eed0ca8c2427
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
examples/pylab_examples/anscombe.py
|
jbbrokaw/matplotlib
|
86ec1b6fc5628bfb2d09797c58d7eed0ca8c2427
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
examples/pylab_examples/anscombe.py
|
jbbrokaw/matplotlib
|
86ec1b6fc5628bfb2d09797c58d7eed0ca8c2427
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function
"""
Edward Tufte uses this example from Anscombe to show 4 datasets of x
and y that have the same mean, standard deviation, and regression
line, but which are qualitatively different.
matplotlib fun for a rainy day
"""
from pylab import *
x = array([10, 8, 13, 9, 11, 14, 6, 4, 12, 7, 5])
y1 = array([8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68])
y2 = array([9.14, 8.14, 8.74, 8.77, 9.26, 8.10, 6.13, 3.10, 9.13, 7.26, 4.74])
y3 = array([7.46, 6.77, 12.74, 7.11, 7.81, 8.84, 6.08, 5.39, 8.15, 6.42, 5.73])
x4 = array([8, 8, 8, 8, 8, 8, 8, 19, 8, 8, 8])
y4 = array([6.58, 5.76, 7.71, 8.84, 8.47, 7.04, 5.25, 12.50, 5.56, 7.91, 6.89])
def fit(x):
return 3+0.5*x
xfit = array([amin(x), amax(x)])
subplot(221)
plot(x, y1, 'ks', xfit, fit(xfit), 'r-', lw=2)
axis([2, 20, 2, 14])
setp(gca(), xticklabels=[], yticks=(4, 8, 12), xticks=(0, 10, 20))
text(3, 12, 'I', fontsize=20)
subplot(222)
plot(x, y2, 'ks', xfit, fit(xfit), 'r-', lw=2)
axis([2, 20, 2, 14])
setp(gca(), xticklabels=[], yticks=(4, 8, 12), yticklabels=[], xticks=(0, 10, 20))
text(3, 12, 'II', fontsize=20)
subplot(223)
plot(x, y3, 'ks', xfit, fit(xfit), 'r-', lw=2)
axis([2, 20, 2, 14])
text(3, 12, 'III', fontsize=20)
setp(gca(), yticks=(4, 8, 12), xticks=(0, 10, 20))
subplot(224)
xfit = array([amin(x4), amax(x4)])
plot(x4, y4, 'ks', xfit, fit(xfit), 'r-', lw=2)
axis([2, 20, 2, 14])
setp(gca(), yticklabels=[], yticks=(4, 8, 12), xticks=(0, 10, 20))
text(3, 12, 'IV', fontsize=20)
# verify the stats
pairs = (x, y1), (x, y2), (x, y3), (x4, y4)
for x, y in pairs:
print('mean=%1.2f, std=%1.2f, r=%1.2f' % (mean(y), std(y), corrcoef(x, y)[0][1]))
show()
| 28.683333
| 85
| 0.578733
|
a1a4cdc0b6a81da470b6fc42321181ce1b72778b
| 4,272
|
py
|
Python
|
src/inference.py
|
by2101/OpenLAS
|
0acb30dae98ab89009a919ce86e064c943c51643
|
[
"Apache-2.0"
] | 6
|
2019-07-11T12:42:42.000Z
|
2020-12-25T07:24:38.000Z
|
src/inference.py
|
by2101/OpenLAS
|
0acb30dae98ab89009a919ce86e064c943c51643
|
[
"Apache-2.0"
] | null | null | null |
src/inference.py
|
by2101/OpenLAS
|
0acb30dae98ab89009a919ce86e064c943c51643
|
[
"Apache-2.0"
] | 1
|
2020-06-27T15:55:02.000Z
|
2020-06-27T15:55:02.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : train
# @Author: Ye Bai
# @Date : 2019/4/2
import sys
import os
import argparse
import logging
import yaml
import torch
from data import SpeechDataset, FrameBasedSampler, Collate, load_vocab
from utils import Timer, str2bool
from encoder import BiRNN_Torch, BiRNN
from decoder import RNNDecoder
from model import LAS
import schedule
from trainer import Trainer
import utils
import pdb
logging.basicConfig(format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s')
# logging.basicConfig(format='train.py [line:%(lineno)d] - %(levelname)s: %(message)s')
logger = logging.getLogger("train")
logger.setLevel(logging.INFO)
# logger = logging.getLogger()
# console = logging.StreamHandler(sys.stdout)
# formatter = logging.Formatter('train.py [line:%(lineno)d] - %(levelname)s: %(message)s')
# console.setFormatter(formatter)
# logger.addHandler(console)
def get_args():
parser = argparse.ArgumentParser(description="""
Usage: train.py <data_file> <model_file> """)
parser.add_argument("data_file", help="path to data file (json format)")
parser.add_argument("vocab_file", help="path to vocab file")
parser.add_argument("model_file", help="path to model file")
parser.add_argument("result_file", help="path for writing decoding results.")
args = parser.parse_args()
return args
if __name__ == "__main__":
total_timer = Timer()
total_timer.tic()
args = get_args()
pkg = torch.load(args.model_file)
model_config = pkg['model_config']
vocab = load_vocab(args.vocab_file)
id2token = [None] * len(vocab)
for k, v in vocab.items():
id2token[v] = k
collate = Collate(model_config["left_context"],
model_config["right_context"],
model_config["skip_frame"],
model_config["norm_mean"],
model_config["norm_var"])
testset = SpeechDataset(args.data_file)
test_loader = torch.utils.data.DataLoader(testset, collate_fn=collate, shuffle=False)
# check dim match
if model_config["feat_dim"] != testset[0]["feat"]["dim"]:
raise ValueError(("Dim mismatch: "+
"model {} vs. feat {}.").format(model_config["feat_dim"], testset[0]["feat"]["dim"]))
model_load_timer = Timer()
model_load_timer.tic()
# build encoder and decoder
if model_config["encoder"]["type"] == "BiRNN":
encoder = BiRNN(model_config["encoder"])
elif model_config["encoder"]["type"] == "BiRNN_Torch":
encoder = BiRNN_Torch(model_config["encoder"])
else:
raise ValueError("Unknown encoder type.")
if model_config["decoder"]["type"] == "RNNDecoder":
decoder = RNNDecoder(model_config["decoder"])
else:
raise ValueError("Unknown decoder type.")
model = LAS(encoder, decoder, model_config)
model.load_state_dict(pkg['state_dict'])
model = model.cuda()
model.eval()
logger.info("Spend {:.3f} sec for building model..".format(model_load_timer.toc()))
model_load_timer.tic()
# model = model.cuda()
logger.info("Spend {:.3f} sec for loading model to gpu..".format(model_load_timer.toc()))
logger.info("Model: \n{}".format(model))
tot_utt = len(testset)
decode_cnt = 0
fw = open(args.result_file, 'w', encoding="utf8")
# batch_size is 1, only one sentence.
for utts, feats, feat_lengths, src_ids, src_lengths, tgt_ids, tgt_lengths in test_loader:
feats = feats.cuda()
feat_lengths = feat_lengths.cuda()
decode_cnt += 1
logger.info("Decoding {} [{}/{}]\n".format(utts[0], decode_cnt, tot_utt))
ref = " ".join([id2token[id] for id in tgt_ids.view(-1)])
best_hyp, ended_hyps = model.beam_search_sentence(feats, feat_lengths, id2token)
hyp = " ".join([id2token[id] for id in best_hyp['ids']])
logger.info("\nref:\t{}\nhyp:\t{}\n".format(ref, hyp))
res = "{} {}\n".format(utts[0], " ".join([id2token[id] for id in best_hyp['ids'][:-1]]))
fw.write(res)
fw.close()
logger.info("Finished. Total Time: {:.3f} hrs.".format(total_timer.toc()/3600.))
| 32.610687
| 102
| 0.64162
|
134bd36b58d77a09cd9d844bfd485e9850dc7140
| 7,998
|
py
|
Python
|
matilda/data_pipeline/data_scapers/financial_statements_scraper/financial_statements_scraper.py
|
AlainDaccache/Quantropy
|
6cfa06ed2b764471382ebf94d40af867f10433bb
|
[
"MIT"
] | 45
|
2021-01-28T04:12:21.000Z
|
2022-02-24T13:15:50.000Z
|
matilda/data_pipeline/data_scapers/financial_statements_scraper/financial_statements_scraper.py
|
AlainDaccache/Quantropy
|
6cfa06ed2b764471382ebf94d40af867f10433bb
|
[
"MIT"
] | 32
|
2021-03-02T18:45:16.000Z
|
2022-03-12T00:53:10.000Z
|
matilda/data_pipeline/data_scapers/financial_statements_scraper/financial_statements_scraper.py
|
AlainDaccache/Quantropy
|
6cfa06ed2b764471382ebf94d40af867f10433bb
|
[
"MIT"
] | 10
|
2020-12-25T15:02:40.000Z
|
2021-12-30T11:40:15.000Z
|
import collections
import os
import re
import traceback
from datetime import datetime
import pandas as pd
from pprint import pprint
from matilda import config
import io
import json
from zope.interface import Interface
from matilda.data_pipeline.data_preparation_helpers import save_pretty_excel, read_dates_from_csv
class FinancialStatementsParserInterface(Interface):
regex_patterns: dict
def load_data_source(self, ticker: str) -> dict:
"""
Load in the file links
:param ticker:
:return: dictionary of format: frequency ('Quarterly' or 'Yearly')
-> financial statement ('Balance Sheet', 'Income Statement', 'Cash Flow Statement')
-> date (datetime)
-> link
"""
pass
def scrape_tables(self, url: str, filing_date: datetime, filing_type: str) -> dict:
"""Extract tables from the currently loaded file."""
pass
def normalize_tables(self, regex_patterns, filing_date, input_dict, visited_data_names) -> (dict, dict):
"""Standardize tables to match across years and companies"""
pass
date_regex = re.compile(r'^(0[1-9]|1[012])[- /.](0[1-9]|[12][0-9]|3[01])[- /.](19|20)\d\d$' # match mm/dd/yyyy
r'|'
r'^(0[1-9]|[12][0-9]|3[01])[- /.](0[1-9]|1[012])[- /.](19|20)\d\d$' # match dd-mm-yyyy
r'|'
r'^([^\s]+) (\d{2}),? ?(\d{4})$' # match Month D, Yr (i.e. February 17, 2009 or February 17,2009)
r'|'
r'^\d{4}$' # match year (i.e. 2011)
r'|'
'Fiscal\d{4}'
r'|'
r'^Change$'
r'|'
r'(\b\d{1,2}\D{0,3})?\b(?:Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|Jun(?:e)?|Jul(?:y)?|Aug(?:ust)?|Sep(?:tember)?|Oct(?:ober)?|(Nov|Dec)(?:ember)?)\D?(\d{1,2}\D?)?\D?((19[7-9]\d|20\d{2})|\d{2})')
# make sure delta between bigger and smaller is 3 months i.e. 3 and 6, 6 and 9, 9 and 12
def quarterlize_statements(smaller_period_df, bigger_period_df, existing_quarters):
global closest_smaller_period_index
df_output = pd.DataFrame()
for bigger_period_index, bigger_period_date in enumerate(bigger_period_df.columns):
if bigger_period_date not in existing_quarters:
try:
closest_smaller_period_index = next(
index for (index, item) in enumerate(smaller_period_df.columns) if item < bigger_period_date)
except:
traceback.print_exc()
# print(closest_smaller_period_index)
df_output[bigger_period_date] = bigger_period_df[bigger_period_date] \
- smaller_period_df.iloc[:, closest_smaller_period_index]
# print(smaller_period_df.to_string())
return df_output
def scrape_financial_statements(scraper_interface_implementation, ticker: str, how_many_years: int = 2,
how_many_quarters: int = 8):
# if not FinancialStatementsParserInterface.implementedBy(scraper_interface_implementation):
# raise Exception
global quarterly_df
path = '{}/{}.xlsx'.format(config.FINANCIAL_STATEMENTS_DIR_PATH, ticker)
log_folder_path = os.path.join(config.DATA_DIR_PATH, 'logs')
if not os.path.exists(log_folder_path):
os.mkdir(log_folder_path)
company_log_path = os.path.join(log_folder_path, ticker)
if not os.path.exists(company_log_path):
os.mkdir(company_log_path)
dictio_period_year_table = {}
filing_dictio = scraper_interface_implementation().load_data_source(ticker=ticker)
for filing_type, statement_date_link in filing_dictio.items():
for statement, dates_and_links in statement_date_link.items():
# find missing dates from excel (this way we don't rescrape those that are there)
missing_dates_links = []
existing_dates = read_dates_from_csv(path, '{} {}'.format(statement, filing_type))
for date, link in dates_and_links:
formatted_date = datetime.strptime(date, '%Y-%m-%d')
if formatted_date not in existing_dates and formatted_date not in [x for x, y in missing_dates_links]:
missing_dates_links.append((formatted_date, statement, link))
missing_dates_links.sort(key=lambda tup: tup[0], reverse=True)
for index, (filing_date, filing_statement, link) in enumerate(missing_dates_links):
try:
if (index > how_many_years - 1 and filing_type == 'Yearly') \
or (index > how_many_quarters - 1 and filing_type == 'Quarterly'):
break
print(filing_date, link)
output = scraper_interface_implementation().scrape_tables(url=link, filing_date=filing_date,
filing_type=filing_type)
pprint(output)
for sheet_period, sheet_dict in output.items():
if sheet_period not in dictio_period_year_table.keys():
dictio_period_year_table[sheet_period] = {}
for year, title_dict in sheet_dict.items():
# if we don't have the year in our dictio that collects everything, just add all and go to next year of the output
if year not in dictio_period_year_table[sheet_period].keys():
dictio_period_year_table[sheet_period][year] = title_dict
continue
# else, we have a year, so we add up those two dicts together
for title, last_layer in title_dict.items(): # title / key:float
if title not in dictio_period_year_table[sheet_period][year].keys():
dictio_period_year_table[sheet_period][year][title] = last_layer
else:
dictio_period_year_table[sheet_period][year][title].update(last_layer)
except Exception:
traceback.print_exc()
# if same links across statements, break from loop, so you go to next filing type
with io.open(os.path.join(company_log_path, 'scraped_dictio.txt'), "w", encoding="utf-8") as f:
f.write(json.dumps(str(dictio_period_year_table)))
financials_dictio = {}
for sheet_period, sheet_dict in dictio_period_year_table.items():
visited_data_names = {}
if sheet_period not in financials_dictio.keys():
financials_dictio[sheet_period] = {}
for year, title_dict in sheet_dict.items():
if year not in financials_dictio[sheet_period].keys():
financials_dictio[sheet_period][year] = {}
visited_data_names, financials_dictio[sheet_period][year] = \
scraper_interface_implementation().normalize_tables(
regex_patterns=scraper_interface_implementation().regex_patterns, filing_date=year,
input_dict=title_dict, visited_data_names=visited_data_names)
# log = open(os.path.join(company_log_path, '{}_normalized_dictio.txt'.format(sheet_period)), "w")
# print(visited_data_names, file=log)
save_pretty_excel(path=path, financials_dictio=financials_dictio)
# if __name__ == '__main__':
# tickers = excel.get_stock_universe('DJIA')
# for ticker in ['AAPL', 'FB']:
# scrape_financial_statements(scraper_interface_implementation=html_scraper.HtmlParser,
# ticker=ticker, how_many_years=3, how_many_quarters=0)
| 49.37037
| 231
| 0.595274
|
e05697fc7062a204a1ec05348dad244e1eb321ed
| 13,179
|
py
|
Python
|
lttnganalyses/linuxautomaton/io.py
|
mjeanson/debian-lttnganalyses
|
d098edb86bd2b6c33f63aa388c305367302f37d7
|
[
"MIT"
] | null | null | null |
lttnganalyses/linuxautomaton/io.py
|
mjeanson/debian-lttnganalyses
|
d098edb86bd2b6c33f63aa388c305367302f37d7
|
[
"MIT"
] | null | null | null |
lttnganalyses/linuxautomaton/io.py
|
mjeanson/debian-lttnganalyses
|
d098edb86bd2b6c33f63aa388c305367302f37d7
|
[
"MIT"
] | null | null | null |
# The MIT License (MIT)
#
# Copyright (C) 2015 - Julien Desfossez <jdesfossez@efficios.com>
# 2015 - Antoine Busque <abusque@efficios.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import socket
from babeltrace import CTFScope
from . import sp, sv
from ..common import format_utils, trace_utils
class IoStateProvider(sp.StateProvider):
def __init__(self, state):
cbs = {
'syscall_entry': self._process_syscall_entry,
'syscall_exit': self._process_syscall_exit,
'syscall_entry_connect': self._process_connect,
'writeback_pages_written': self._process_writeback_pages_written,
'mm_vmscan_wakeup_kswapd': self._process_mm_vmscan_wakeup_kswapd,
'mm_page_free': self._process_mm_page_free
}
super().__init__(state, cbs)
def _process_syscall_entry(self, event):
# Only handle IO Syscalls
name = trace_utils.get_syscall_name(event)
if name not in sv.SyscallConsts.IO_SYSCALLS:
return
cpu_id = event['cpu_id']
if cpu_id not in self._state.cpus:
return
cpu = self._state.cpus[cpu_id]
if cpu.current_tid is None:
return
proc = self._state.tids[cpu.current_tid]
# check if we can fix the pid from a context
self._fix_context_pid(event, proc)
if name in sv.SyscallConsts.OPEN_SYSCALLS:
self._track_open(event, name, proc)
elif name in sv.SyscallConsts.CLOSE_SYSCALLS:
self._track_close(event, name, proc)
elif name in sv.SyscallConsts.READ_SYSCALLS or \
name in sv.SyscallConsts.WRITE_SYSCALLS:
self._track_read_write(event, name, proc)
elif name in sv.SyscallConsts.SYNC_SYSCALLS:
self._track_sync(event, name, proc)
def _process_syscall_exit(self, event):
cpu_id = event['cpu_id']
if cpu_id not in self._state.cpus:
return
cpu = self._state.cpus[cpu_id]
if cpu.current_tid is None:
return
proc = self._state.tids[cpu.current_tid]
current_syscall = proc.current_syscall
if current_syscall is None:
return
name = current_syscall.name
if name not in sv.SyscallConsts.IO_SYSCALLS:
return
self._track_io_rq_exit(event, proc)
proc.current_syscall = None
def _process_connect(self, event):
cpu_id = event['cpu_id']
if cpu_id not in self._state.cpus:
return
cpu = self._state.cpus[cpu_id]
if cpu.current_tid is None:
return
proc = self._state.tids[cpu.current_tid]
parent_proc = self._get_parent_proc(proc)
# FIXME: handle on syscall_exit_connect only when succesful
if 'family' in event and event['family'] == socket.AF_INET:
fd = event['fd']
if fd in parent_proc.fds:
parent_proc.fds[fd].filename = format_utils.format_ipv4(
event['v4addr'], event['dport']
)
def _process_writeback_pages_written(self, event):
for cpu in self._state.cpus.values():
if cpu.current_tid is None:
continue
current_syscall = self._state.tids[cpu.current_tid].current_syscall
if current_syscall is None:
continue
if current_syscall.io_rq:
current_syscall.io_rq.pages_written += event['pages']
def _process_mm_vmscan_wakeup_kswapd(self, event):
cpu_id = event['cpu_id']
if cpu_id not in self._state.cpus:
return
cpu = self._state.cpus[cpu_id]
if cpu.current_tid is None:
return
current_syscall = self._state.tids[cpu.current_tid].current_syscall
if current_syscall is None:
return
if current_syscall.io_rq:
current_syscall.io_rq.woke_kswapd = True
def _process_mm_page_free(self, event):
for cpu in self._state.cpus.values():
if cpu.current_tid is None:
continue
proc = self._state.tids[cpu.current_tid]
# if the current process is kswapd0, we need to
# attribute the page freed to the process that
# woke it up.
if proc.comm == 'kswapd0' and proc.prev_tid > 0:
proc = self._state.tids[proc.prev_tid]
current_syscall = proc.current_syscall
if current_syscall is None:
continue
if current_syscall.io_rq and current_syscall.io_rq.woke_kswapd:
current_syscall.io_rq.pages_freed += 1
def _track_open(self, event, name, proc):
current_syscall = proc.current_syscall
if name in sv.SyscallConsts.DISK_OPEN_SYSCALLS:
current_syscall.io_rq = sv.OpenIORequest.new_from_disk_open(
event, proc.tid)
elif name in ['accept', 'accept4']:
current_syscall.io_rq = sv.OpenIORequest.new_from_accept(
event, proc.tid)
elif name == 'socket':
current_syscall.io_rq = sv.OpenIORequest.new_from_socket(
event, proc.tid)
elif name in sv.SyscallConsts.DUP_OPEN_SYSCALLS:
self._track_dup(event, name, proc)
def _track_dup(self, event, name, proc):
current_syscall = proc.current_syscall
# If the process that triggered the io_rq is a thread,
# its FDs are that of the parent process
parent_proc = self._get_parent_proc(proc)
fds = parent_proc.fds
if name == 'dup':
oldfd = event['fildes']
elif name in ['dup2', 'dup3']:
oldfd = event['oldfd']
newfd = event['newfd']
if newfd in fds:
self._close_fd(parent_proc, newfd, event.timestamp,
event['cpu_id'])
elif name == 'fcntl':
# Only handle if cmd == F_DUPFD (0)
if event['cmd'] != 0:
return
oldfd = event['fd']
old_file = None
if oldfd in fds:
old_file = fds[oldfd]
current_syscall.io_rq = sv.OpenIORequest.new_from_old_fd(
event, proc.tid, old_file)
if name == 'dup3':
cloexec = event['flags'] & os.O_CLOEXEC == os.O_CLOEXEC
current_syscall.io_rq.cloexec = cloexec
def _track_close(self, event, name, proc):
proc.current_syscall.io_rq = sv.CloseIORequest(
event.timestamp, proc.tid, event['fd'])
def _track_read_write(self, event, name, proc):
current_syscall = proc.current_syscall
if name == 'splice':
current_syscall.io_rq = sv.ReadWriteIORequest.new_from_splice(
event, proc.tid)
return
elif name == 'sendfile64':
current_syscall.io_rq = sv.ReadWriteIORequest.new_from_sendfile64(
event, proc.tid)
return
if name in ['writev', 'pwritev', 'readv', 'preadv']:
size_key = 'vlen'
elif name == 'recvfrom':
size_key = 'size'
elif name == 'sendto':
size_key = 'len'
elif name in ['recvmsg', 'sendmsg']:
size_key = None
else:
size_key = 'count'
current_syscall.io_rq = sv.ReadWriteIORequest.new_from_fd_event(
event, proc.tid, size_key)
def _track_sync(self, event, name, proc):
current_syscall = proc.current_syscall
if name == 'sync':
current_syscall.io_rq = sv.SyncIORequest.new_from_sync(
event, proc.tid)
elif name in ['fsync', 'fdatasync']:
current_syscall.io_rq = sv.SyncIORequest.new_from_fsync(
event, proc.tid)
elif name == 'sync_file_range':
current_syscall.io_rq = sv.SyncIORequest.new_from_sync_file_range(
event, proc.tid)
def _track_io_rq_exit(self, event, proc):
ret = event['ret']
cpu_id = event['cpu_id']
io_rq = proc.current_syscall.io_rq
# io_rq can be None in the case of fcntl when cmd is not
# F_DUPFD, in which case we disregard the syscall as it did
# not open any FD
if io_rq is None:
return
io_rq.update_from_exit(event)
if ret >= 0:
self._create_fd(proc, io_rq, cpu_id)
parent_proc = self._get_parent_proc(proc)
self._state.send_notification_cb('io_rq_exit',
io_rq=io_rq,
proc=proc,
parent_proc=parent_proc,
cpu_id=cpu_id)
if isinstance(io_rq, sv.CloseIORequest) and ret == 0:
self._close_fd(proc, io_rq.fd, io_rq.end_ts, cpu_id)
def _create_fd(self, proc, io_rq, cpu_id):
parent_proc = self._get_parent_proc(proc)
if io_rq.fd is not None and io_rq.fd not in parent_proc.fds:
if isinstance(io_rq, sv.OpenIORequest):
parent_proc.fds[io_rq.fd] = sv.FD.new_from_open_rq(io_rq)
else:
parent_proc.fds[io_rq.fd] = sv.FD(io_rq.fd)
self._state.send_notification_cb('create_fd',
fd=io_rq.fd,
parent_proc=parent_proc,
timestamp=io_rq.end_ts,
cpu_id=cpu_id)
elif isinstance(io_rq, sv.ReadWriteIORequest):
if io_rq.fd_in is not None and io_rq.fd_in not in parent_proc.fds:
parent_proc.fds[io_rq.fd_in] = sv.FD(io_rq.fd_in)
self._state.send_notification_cb('create_fd',
fd=io_rq.fd_in,
parent_proc=parent_proc,
timestamp=io_rq.end_ts,
cpu_id=cpu_id)
if io_rq.fd_out is not None and \
io_rq.fd_out not in parent_proc.fds:
parent_proc.fds[io_rq.fd_out] = sv.FD(io_rq.fd_out)
self._state.send_notification_cb('create_fd',
fd=io_rq.fd_out,
parent_proc=parent_proc,
timestamp=io_rq.end_ts,
cpu_id=cpu_id)
def _close_fd(self, proc, fd, timestamp, cpu_id):
parent_proc = self._get_parent_proc(proc)
self._state.send_notification_cb('close_fd',
fd=fd,
parent_proc=parent_proc,
timestamp=timestamp,
cpu_id=cpu_id)
del parent_proc.fds[fd]
def _get_parent_proc(self, proc):
if proc.pid is not None and proc.tid != proc.pid:
parent_proc = self._state.tids[proc.pid]
else:
parent_proc = proc
return parent_proc
def _fix_context_pid(self, event, proc):
for context in event.field_list_with_scope(
CTFScope.STREAM_EVENT_CONTEXT):
if context != 'pid':
continue
# make sure the 'pid' field is not also in the event
# payload, otherwise we might clash
for context in event.field_list_with_scope(
CTFScope.EVENT_FIELDS):
if context == 'pid':
return
if proc.pid is None:
proc.pid = event['pid']
if event['pid'] != proc.tid:
proc.pid = event['pid']
parent_proc = sv.Process(proc.pid, proc.pid, proc.comm,
proc.prio)
self._state.tids[parent_proc.pid] = parent_proc
| 37.979827
| 79
| 0.573716
|
0cc17f72abcfa8707ab2a24d38c907371905c221
| 21,497
|
py
|
Python
|
geomstats/geometry/discrete_curves.py
|
tfunatomi/geomstats
|
a5651680f98dea95c1f82a48af1a6dccf3e26bd1
|
[
"MIT"
] | 2
|
2020-01-23T04:01:02.000Z
|
2020-08-18T19:20:27.000Z
|
geomstats/geometry/discrete_curves.py
|
tfunatomi/geomstats
|
a5651680f98dea95c1f82a48af1a6dccf3e26bd1
|
[
"MIT"
] | null | null | null |
geomstats/geometry/discrete_curves.py
|
tfunatomi/geomstats
|
a5651680f98dea95c1f82a48af1a6dccf3e26bd1
|
[
"MIT"
] | null | null | null |
"""Parameterized curves on any given manifold."""
import math
import geomstats.backend as gs
from geomstats.geometry.euclidean import Euclidean
from geomstats.geometry.euclidean import EuclideanMetric
from geomstats.geometry.landmarks import L2Metric
from geomstats.geometry.manifold import Manifold
from geomstats.geometry.riemannian_metric import RiemannianMetric
R2 = Euclidean(dim=2)
R3 = Euclidean(dim=3)
class DiscreteCurves(Manifold):
r"""Space of discrete curves sampled at points in ambient_manifold.
Each individual curve is represented by a 2d-array of shape `[
n_sampling_points, ambient_dim]`. A Batch of curves can be passed to
all methods either as a 3d-array if all curves have the same number of
sampled points, or as a list of 2d-arrays, each representing a curve.
Parameters
----------
ambient_manifold : Manifold
Manifold in which curves take values.
Attributes
----------
ambient_manifold : Manifold
Manifold in which curves take values.
l2_metric : callable
Function that takes as argument an integer number of sampled points
and returns the corresponding L2 metric (product) metric,
a RiemannianMetric object
square_root_velocity_metric : RiemannianMetric
Square root velocity metric.
"""
def __init__(self, ambient_manifold):
super(DiscreteCurves, self).__init__(dim=math.inf)
self.ambient_manifold = ambient_manifold
self.l2_metric = lambda n: L2Metric(
self.ambient_manifold, n_landmarks=n)
self.square_root_velocity_metric = SRVMetric(self.ambient_manifold)
def belongs(self, point, atol=gs.atol):
"""Test whether a point belongs to the manifold.
Test that all points of the curve belong to the ambient manifold.
Parameters
----------
point : array-like, shape=[..., n_sampling_points, ambient_dim]
Point representing a discrete curve.
atol : float
Absolute tolerance.
Optional, default: backend atol.
Returns
-------
belongs : bool
Boolean evaluating if point belongs to the space of discrete
curves.
"""
def each_belongs(pt):
return gs.all(self.ambient_manifold.belongs(pt))
if isinstance(point, list) or point.ndim > 2:
return gs.stack([each_belongs(pt) for pt in point])
return each_belongs(point)
def is_tangent(self, vector, base_point, atol=gs.atol):
"""Check whether the vector is tangent at a curve.
A vector is tangent at a curve if it is a vector field along that
curve.
Parameters
----------
vector : array-like, shape=[..., n_sampling_points, ambient_dim]
Vector.
base_point : array-like, shape=[..., n_sampling_points, ambient_dim]
Discrete curve.
atol : float
Absolute tolerance.
Optional, default: backend atol.
Returns
-------
is_tangent : bool
Boolean denoting if vector is a tangent vector at the base point.
"""
ambient_manifold = self.ambient_manifold
shape = vector.shape
stacked_vec = gs.reshape(vector, (-1, shape[-1]))
stacked_point = gs.reshape(base_point, (-1, shape[-1]))
is_tangent = ambient_manifold.is_tangent(
stacked_vec, stacked_point, atol)
is_tangent = gs.reshape(is_tangent, shape[:-1])
return gs.all(is_tangent, axis=-1)
def to_tangent(self, vector, base_point):
"""Project a vector to a tangent space of the manifold.
As tangent vectors are vector fields along a curve, each component of
the vector is projected to the tangent space of the corresponding
point of the discrete curve. The number of sampling points should
match in the vector and the base_point.
Parameters
----------
vector : array-like, shape=[..., n_sampling_points, ambient_dim]
Vector.
base_point : array-like, shape=[..., n_sampling_points, ambient_dim]
Discrete curve.
Returns
-------
tangent_vec : array-like, shape=[..., n_sampling_points, ambient_dim]
Tangent vector at base point.
"""
ambient_manifold = self.ambient_manifold
shape = vector.shape
stacked_vec = gs.reshape(vector, (-1, shape[-1]))
stacked_point = gs.reshape(base_point, (-1, shape[-1]))
tangent_vec = ambient_manifold.to_tangent(stacked_vec, stacked_point)
tangent_vec = gs.reshape(tangent_vec, vector.shape)
return tangent_vec
def random_point(self, n_samples=1, bound=1., n_sampling_points=10):
"""Sample random curves.
If the ambient manifold is compact, a uniform distribution is used.
Parameters
----------
n_samples : int
Number of samples.
Optional, default: 1.
bound : float
Bound of the interval in which to sample for non compact
ambient manifolds.
Optional, default: 1.
n_sampling_points : int
Number of sampling points for the discrete curves.
Optional, default : 10.
Returns
-------
samples : array-like, shape=[..., n_sampling_points, {dim, [n, n]}]
Points sampled on the hypersphere.
"""
sample = self.ambient_manifold.random_point(
n_samples * n_sampling_points)
sample = gs.reshape(sample, (n_samples, n_sampling_points, -1))
return sample[0] if n_samples == 1 else sample
class SRVMetric(RiemannianMetric):
"""Elastic metric defined using the Square Root Velocity Function.
See [Sea2011]_ for details.
Parameters
----------
ambient_manifold : Manifold
Manifold in which curves take values.
metric : RiemannianMetric
Metric to use on the ambient manifold. If None is passed, ambient
manifold should have a metric attribute, which will be used.
Optional, default : None.
References
----------
.. [Sea2011] A. Srivastava, E. Klassen, S. H. Joshi and I. H. Jermyn,
"Shape Analysis of Elastic Curves in Euclidean Spaces,"
in IEEE Transactions on Pattern Analysis and Machine Intelligence,
vol. 33, no. 7, pp. 1415-1428, July 2011.
"""
def __init__(self, ambient_manifold, metric=None):
super(SRVMetric, self).__init__(dim=math.inf,
signature=(math.inf, 0, 0))
if metric is None:
if hasattr(ambient_manifold, 'metric'):
self.ambient_metric = ambient_manifold.metric
else:
raise ValueError('Instantiating an object of class '
'DiscreteCurves requires either a metric'
' or an ambient manifold'
' equipped with a metric.')
else:
self.ambient_metric = metric
self.l2_metric = lambda n: L2Metric(ambient_manifold, n_landmarks=n)
def pointwise_inner_product(self, tangent_vec_a, tangent_vec_b,
base_curve):
"""Compute the pointwise inner product of pair of tangent vectors.
Compute the point-wise inner-product between two tangent vectors
at a base curve.
Parameters
----------
tangent_vec_a : array-like, shape=[..., n_sampling_points, ambient_dim]
Tangent vector to discrete curve.
tangent_vec_b : array-like, shape=[..., n_sampling_points, ambient_dim]
Tangent vector to discrete curve.
base_curve : array-like, shape=[..., n_sampling_points, ambient_dim]
Point representing a discrete curve.
Returns
-------
inner_prod : array-like, shape=[..., n_sampling_points]
Point-wise inner-product.
"""
def inner_prod_aux(vec_a, vec_b, curve):
inner_prod = self.ambient_metric.inner_product(vec_a, vec_b, curve)
return gs.squeeze(inner_prod)
inner_prod = gs.vectorize(
(tangent_vec_a, tangent_vec_b, base_curve),
inner_prod_aux,
dtype=gs.float32,
multiple_args=True,
signature='(i,j),(i,j),(i,j)->(i)')
return inner_prod
def pointwise_norm(self, tangent_vec, base_curve):
"""Compute the point-wise norm of a tangent vector at a base curve.
Parameters
----------
tangent_vec : array-like, shape=[..., n_sampling_points, ambient_dim]
Tangent vector to discrete curve.
base_curve : array-like, shape=[..., n_sampling_points, ambient_dim]
Point representing a discrete curve.
Returns
-------
norm : array-like, shape=[..., n_sampling_points]
Point-wise norms.
"""
sq_norm = self.pointwise_inner_product(
tangent_vec_a=tangent_vec, tangent_vec_b=tangent_vec,
base_curve=base_curve)
return gs.sqrt(sq_norm)
def square_root_velocity(self, curve):
"""Compute the square root velocity representation of a curve.
The velocity is computed using the log map. In the case of several
curves, an index selection procedure allows to get rid of the log
between the end point of curve[k, :, :] and the starting point of
curve[k + 1, :, :].
Parameters
----------
curve : array-like, shape=[..., n_sampling_points, ambient_dim]
Discrete curve.
Returns
-------
srv : array-like, shape=[..., n_sampling_points - 1, ambient_dim]
Square-root velocity representation of a discrete curve.
"""
curve = gs.to_ndarray(curve, to_ndim=3)
n_curves, n_sampling_points, n_coords = curve.shape
srv_shape = (n_curves, n_sampling_points - 1, n_coords)
curve = gs.reshape(curve, (n_curves * n_sampling_points, n_coords))
coef = gs.cast(gs.array(n_sampling_points - 1), gs.float32)
velocity = coef * self.ambient_metric.log(point=curve[1:, :],
base_point=curve[:-1, :])
velocity_norm = self.ambient_metric.norm(velocity, curve[:-1, :])
srv = gs.einsum(
'...i,...->...i', velocity, 1. / gs.sqrt(velocity_norm))
index = gs.arange(n_curves * n_sampling_points - 1)
mask = ~((index + 1) % n_sampling_points == 0)
srv = gs.reshape(srv[mask], srv_shape)
return srv
def square_root_velocity_inverse(self, srv, starting_point):
"""Retrieve a curve from sqrt velocity rep and starting point.
Parameters
----------
srv : array-like, shape=[..., n_sampling_points - 1, ambient_dim]
Square-root velocity representation of a discrete curve.
starting_point : array-like, shape=[..., ambient_dim]
Point of the ambient manifold to use as start of the retrieved
curve.
Returns
-------
curve : array-like, shape=[..., n_sampling_points, ambient_dim]
Curve retrieved from its square-root velocity.
"""
if not isinstance(self.ambient_metric, EuclideanMetric):
raise AssertionError('The square root velocity inverse is only '
'implemented for discrete curves embedded '
'in a Euclidean space.')
if gs.ndim(srv) != gs.ndim(starting_point):
starting_point = gs.to_ndarray(
starting_point, to_ndim=srv.ndim, axis=1)
srv_shape = srv.shape
srv = gs.to_ndarray(srv, to_ndim=3)
n_curves, n_sampling_points_minus_one, n_coords = srv.shape
srv = gs.reshape(srv,
(n_curves * n_sampling_points_minus_one, n_coords))
srv_norm = self.ambient_metric.norm(srv)
delta_points = gs.einsum(
'...,...i->...i', 1 / n_sampling_points_minus_one * srv_norm, srv)
delta_points = gs.reshape(delta_points, srv_shape)
curve = gs.concatenate((starting_point, delta_points), -2)
curve = gs.cumsum(curve, -2)
return curve
def exp(self, tangent_vec, base_point):
"""Compute Riemannian exponential of tangent vector wrt to base curve.
Parameters
----------
tangent_vec : array-like, shape=[..., n_sampling_points, ambient_dim]
Tangent vector to discrete curve.
base_point : array-like, shape=[..., n_sampling_points, ambient_dim]
Discrete curve.
Return
------
end_curve : array-like, shape=[..., n_sampling_points, ambient_dim]
Discrete curve, result of the Riemannian exponential.
"""
if not isinstance(self.ambient_metric, EuclideanMetric):
raise AssertionError('The exponential map is only implemented '
'for discrete curves embedded in a '
'Euclidean space.')
base_point = gs.to_ndarray(base_point, to_ndim=3)
tangent_vec = gs.to_ndarray(tangent_vec, to_ndim=3)
n_sampling_points = base_point.shape[1]
base_curve_srv = self.square_root_velocity(base_point)
tangent_vec_derivative = (n_sampling_points - 1) * (
tangent_vec[:, 1:, :] - tangent_vec[:, :-1, :])
base_curve_velocity = (n_sampling_points - 1) * (
base_point[:, 1:, :] - base_point[:, :-1, :])
base_curve_velocity_norm = self.pointwise_norm(
base_curve_velocity, base_point[:, :-1, :])
inner_prod = self.pointwise_inner_product(
tangent_vec_derivative, base_curve_velocity, base_point[:, :-1, :])
coef_1 = 1 / gs.sqrt(base_curve_velocity_norm)
coef_2 = -1 / (2 * base_curve_velocity_norm**(5 / 2)) * inner_prod
term_1 = gs.einsum('ij,ijk->ijk', coef_1, tangent_vec_derivative)
term_2 = gs.einsum('ij,ijk->ijk', coef_2, base_curve_velocity)
srv_initial_derivative = term_1 + term_2
end_curve_srv = self.l2_metric(n_sampling_points - 1).exp(
tangent_vec=srv_initial_derivative, base_point=base_curve_srv)
end_curve_starting_point = self.ambient_metric.exp(
tangent_vec=tangent_vec[:, 0, :], base_point=base_point[:, 0, :])
end_curve = self.square_root_velocity_inverse(
end_curve_srv, end_curve_starting_point)
return end_curve
def log(self, point, base_point):
"""Compute Riemannian logarithm of a curve wrt a base curve.
Parameters
----------
point : array-like, shape=[..., n_sampling_points, ambient_dim]
Discrete curve.
base_point : array-like, shape=[..., n_sampling_points, ambient_dim]
Discrete curve to use as base point.
Returns
-------
log : array-like, shape=[..., n_sampling_points, ambient_dim]
Tangent vector to a discrete curve.
"""
if not isinstance(self.ambient_metric, EuclideanMetric):
raise AssertionError('The logarithm map is only implemented '
'for discrete curves embedded in a '
'Euclidean space.')
point = gs.to_ndarray(point, to_ndim=3)
base_point = gs.to_ndarray(base_point, to_ndim=3)
n_curves, n_sampling_points, n_coords = point.shape
curve_srv = self.square_root_velocity(point)
base_curve_srv = self.square_root_velocity(base_point)
base_curve_velocity = (n_sampling_points - 1) * (base_point[:, 1:, :] -
base_point[:, :-1, :])
base_curve_velocity_norm = self.pointwise_norm(base_curve_velocity,
base_point[:, :-1, :])
inner_prod = self.pointwise_inner_product(curve_srv - base_curve_srv,
base_curve_velocity,
base_point[:, :-1, :])
coef_1 = gs.sqrt(base_curve_velocity_norm)
coef_2 = 1 / base_curve_velocity_norm**(3 / 2) * inner_prod
term_1 = gs.einsum('ij,ijk->ijk', coef_1, curve_srv - base_curve_srv)
term_2 = gs.einsum('ij,ijk->ijk', coef_2, base_curve_velocity)
log_derivative = term_1 + term_2
log_starting_points = self.ambient_metric.log(
point=point[:, 0, :], base_point=base_point[:, 0, :])
log_starting_points = gs.to_ndarray(
log_starting_points, to_ndim=3, axis=1)
log_cumsum = gs.hstack(
[gs.zeros((n_curves, 1, n_coords)),
gs.cumsum(log_derivative, -2)])
log = log_starting_points + 1 / (n_sampling_points - 1) * log_cumsum
return log
def geodesic(self,
initial_curve,
end_curve=None,
initial_tangent_vec=None):
"""Compute geodesic from initial curve and end curve end curve.
Geodesic specified either by an initial curve and an end curve,
either by an initial curve and an initial tangent vector.
Parameters
----------
initial_curve : array-like, shape=[..., n_sampling_points, ambient_dim]
Discrete curve.
end_curve : array-like, shape=[..., n_sampling_points, ambient_dim]
Discrete curve. If None, an initial tangent vector must be given.
Optional, default : None
initial_tangent_vec : array-like,
shape=[..., n_sampling_points, ambient_dim]
Tangent vector at base curve, the initial speed of the geodesics.
If None, an end curve must be given and a logarithm is computed.
Optional, default : None
Returns
-------
curve_on_geodesic : callable
The time parameterized geodesic curve.
"""
if not isinstance(self.ambient_metric, EuclideanMetric):
raise AssertionError('The geodesics are only implemented for '
'discrete curves embedded in a '
'Euclidean space.')
curve_ndim = 2
initial_curve = gs.to_ndarray(initial_curve, to_ndim=curve_ndim + 1)
if end_curve is None and initial_tangent_vec is None:
raise ValueError('Specify an end curve or an initial tangent '
'vector to define the geodesic.')
if end_curve is not None:
end_curve = gs.to_ndarray(end_curve, to_ndim=curve_ndim + 1)
shooting_tangent_vec = self.log(point=end_curve,
base_point=initial_curve)
if initial_tangent_vec is not None:
if not gs.allclose(shooting_tangent_vec, initial_tangent_vec):
raise RuntimeError(
'The shooting tangent vector is too'
' far from the initial tangent vector.')
initial_tangent_vec = shooting_tangent_vec
initial_tangent_vec = gs.array(initial_tangent_vec)
initial_tangent_vec = gs.to_ndarray(initial_tangent_vec,
to_ndim=curve_ndim + 1)
def curve_on_geodesic(t):
t = gs.cast(t, gs.float32)
t = gs.to_ndarray(t, to_ndim=1)
t = gs.to_ndarray(t, to_ndim=2, axis=1)
new_initial_curve = gs.to_ndarray(initial_curve,
to_ndim=curve_ndim + 1)
new_initial_tangent_vec = gs.to_ndarray(initial_tangent_vec,
to_ndim=curve_ndim + 1)
tangent_vecs = gs.einsum('il,nkm->ikm', t, new_initial_tangent_vec)
curve_at_time_t = []
for tan_vec in tangent_vecs:
curve_at_time_t.append(
self.exp(tan_vec, new_initial_curve))
return gs.stack(curve_at_time_t)
return curve_on_geodesic
def dist(self, point_a, point_b, **kwargs):
"""Geodesic distance between two curves.
Parameters
----------
point_a : array-like, shape=[..., n_sampling_points, ambient_dim]
Discrete curve.
point_b : array-like, shape=[..., n_sampling_points, ambient_dim]
Discrete curve.
Returns
-------
dist : array-like, shape=[...,]
"""
if not isinstance(self.ambient_metric, EuclideanMetric):
raise AssertionError('The distance is only implemented for '
'discrete curves embedded in a '
'Euclidean space.')
if point_a.shape != point_b.shape:
raise ValueError('The curves need to have the same shapes.')
srv_a = self.square_root_velocity(point_a)
srv_b = self.square_root_velocity(point_b)
n_sampling_points = srv_a.shape[-2]
dist_starting_points = self.ambient_metric.dist(
point_a[0, :], point_b[0, :])
dist_srvs = self.l2_metric(n_sampling_points).dist(srv_a, srv_b)
dist = gs.sqrt(dist_starting_points**2 + dist_srvs**2)
return dist
| 40.483992
| 79
| 0.600223
|
ab8574459e236c6b48f5095cb8c81840118839f4
| 8,851
|
py
|
Python
|
dosagelib/plugins/f.py
|
acaranta/dosage
|
6e14e8709b3b213fdc07a2106464860e1cb99481
|
[
"MIT"
] | null | null | null |
dosagelib/plugins/f.py
|
acaranta/dosage
|
6e14e8709b3b213fdc07a2106464860e1cb99481
|
[
"MIT"
] | null | null | null |
dosagelib/plugins/f.py
|
acaranta/dosage
|
6e14e8709b3b213fdc07a2106464860e1cb99481
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2020 Tobias Gruetzmacher
from __future__ import absolute_import, division, print_function
from re import compile, escape
from ..util import tagre
from ..scraper import _BasicScraper, _ParserScraper
from ..helpers import indirectStarter, joinPathPartsNamer, xpath_class
from .common import _ComicControlScraper, _WPNaviIn, _WordPressScraper
class FalconTwin(_BasicScraper):
url = 'http://www.falcontwin.com/'
stripUrl = url + 'index.html?strip=%s'
firstStripUrl = stripUrl % '0'
imageSearch = compile(r'"(strips/.+?)"')
prevSearch = compile(r'"prev"><a href="(index.+?)"')
help = 'Index format: nnn'
class Faneurysm(_WPNaviIn):
url = 'http://hijinksensue.com/comic/think-only-tree/'
firstStripUrl = 'http://hijinksensue.com/comic/captains-prerogative/'
endOfLife = True
class FantasyRealms(_ParserScraper):
stripUrl = ('https://web.archive.org/web/20161204192651/'
'http://fantasyrealmsonline.com/manga/%s.php')
url = stripUrl % '091'
firstStripUrl = stripUrl % '001'
imageSearch = '//img[contains(@src, "/manga/0")]'
prevSearch = '//a[img[contains(@src, "nav-back")]]'
endOfLife = True
help = 'Index format: nnn'
class FarToTheNorth(_ComicControlScraper):
url = 'http://www.farnorthcomic.com/'
stripUrl = url + 'comic/%s'
firstStripUrl = stripUrl % 'don39t-tell'
class FauxPas(_ParserScraper):
url = 'http://www.ozfoxes.net/cgi/pl-fp1.cgi'
stripUrl = url + '?%s'
firstStripUrl = stripUrl % '1'
imageSearch = '//img[@name]'
prevSearch = '//a[img[@alt="Previous"]]'
help = 'Index format: nnn'
class FireflyCross(_WordPressScraper):
url = 'http://www.fireflycross.pensandtales.com/'
firstStripUrl = url + '?comic=05062002'
class FirstWorldProblems(_ParserScraper):
url = ('https://web.archive.org/web/20150710053456/'
'http://bradcolbow.com/archive/C5/')
stripUrl = url + '%s/'
firstStripUrl = stripUrl % 'P10'
imageSearch = '//div[{}]//img'.format(xpath_class('entry'))
prevSearch = '//a[{}]'.format(xpath_class('prev'))
multipleImagesPerStrip = True
endOfLife = True
class FlakyPastry(_BasicScraper):
baseUrl = 'http://flakypastry.runningwithpencils.com/'
url = baseUrl + 'index.php'
stripUrl = baseUrl + 'comic.php?strip_id=%s'
firstStripUrl = stripUrl % '0'
imageSearch = compile(r'<img src="(comics/.+?)"')
prevSearch = compile(r'<a href="(.+?)".+?btn_back')
help = 'Index format: nnnn'
class Flemcomics(_ParserScraper):
url = ('https://web.archive.org/web/20180414110349/'
'http://www.flemcomics.com/')
stripUrl = url + 'd/%s.html'
firstStripUrl = stripUrl % '19980101'
imageSearch = '//img[{}]'.format(xpath_class('ksc'))
prevSearch = '//a[@rel="prev"]'
endOfLife = True
help = 'Index format: yyyymmdd'
class Flipside(_ParserScraper):
url = 'http://flipside.keenspot.com/comic.php'
stripUrl = url + '?i=%s'
firstStripUrl = stripUrl % '1'
imageSearch = '//img[contains(@src, "comic/")]'
prevSearch = '//a[@rel="prev"]'
help = 'Index format: nnnn'
class FonFlatter(_ParserScraper):
url = 'https://www.fonflatter.de/'
stripUrl = url + '%s/'
firstStripUrl = url + '2005/09/20/01-begegnung-mit-batman/'
lang = 'de'
imageSearch = r'//img[re:test(@src, "/fred_\d+")]'
prevSearch = '//a[@rel="prev"]'
help = 'Index format: yyyy/mm/dd/number-stripname'
def shouldSkipUrl(self, url, data):
return url in (
self.stripUrl % "2006/11/30/adventskalender",
self.stripUrl % "2006/09/21/danke",
self.stripUrl % "2006/08/23/zgf-zuweilen-gestellte-fragen",
self.stripUrl % "2005/10/19/naq-never-asked-questions",
)
class ForestHill(_WordPressScraper):
url = 'https://www.foresthillcomic.org/'
class ForLackOfABetterComic(_BasicScraper):
url = 'http://forlackofabettercomic.com/'
rurl = r'http://(?:www\.)?forlackofabettercomic\.com/'
stripUrl = url + '?id=%s'
firstStripUrl = stripUrl % '1'
imageSearch = compile(tagre("img", "src", r'(%simg/comic/\d+[^"]+)' % rurl, after="comicimg"))
prevSearch = compile(tagre("a", "href", r'(%s\?id\=\d+)' % rurl) + r'Prev')
help = 'Index format: number'
class FoxDad(_ParserScraper):
url = 'https://foxdad.com/'
stripUrl = url + 'post/%s'
firstStripUrl = stripUrl % '149683014997/some-people-are-just-different-support-the-comic'
imageSearch = '//figure[@class="photo-hires-item"]//img'
prevSearch = '//a[@class="previous-button"]'
def namer(self, imageUrl, pageUrl):
page = self.getPage(pageUrl)
post = page.xpath('//link[@type="application/json+oembed"]')[0].get('href')
post = post.replace('https://www.tumblr.com/oembed/1.0?url=https://foxdad.com/post', '')
post = post.replace('-support-me-on-patreon', '')
return post.replace('/', '-')
class FoxTails(_ParserScraper):
stripUrl = 'http://foxtails.magickitsune.com/strips/%s.html'
url = stripUrl % 'current'
firstStripUrl = stripUrl % '20041024'
imageSearch = '//img[contains(@src, "img/2")]'
prevSearch = '//a[./img[contains(@src, "prev")]]'
endOfLife = True
def getPrevUrl(self, url, data):
# Include pre-reboot archive
if url == self.stripUrl % '20090906':
return self.stripUrl % '20090704'
return super(FoxTails, self).getPrevUrl(url, data)
class Fragile(_ParserScraper):
url = ('https://web.archive.org/web/20190308203109/'
'http://www.fragilestory.com/')
imageSearch = '//div[@id="comic_strip"]/a[@class="nobg"]/img'
prevSearch = '//div[@id="nav_comic_a"]/a[2]'
firstStripUrl = url + 'strips/chapter_01'
endOfLife = True
class FredoAndPidjin(_ParserScraper):
url = 'https://www.pidjin.net/'
stripUrl = url + '%s/'
firstStripUrl = stripUrl % '2006/02/19/goofy-monday'
imageSearch = '//div[%s]//img' % xpath_class("episode")
multipleImagesPerStrip = True
prevSearch = '//span[%s]/a' % xpath_class("prev")
latestSearch = '//section[%s]//a' % xpath_class("latest")
starter = indirectStarter
namer = joinPathPartsNamer((0, 1, 2))
class Freefall(_ParserScraper):
url = 'http://freefall.purrsia.com/'
stripUrl = url + 'ff%d/%s%05d.htm'
firstStripUrl = stripUrl % (100, 'fv', 1)
imageSearch = '//img[contains(@src, "/ff")]'
prevSearch = '//a[text()="Previous"]'
multipleImagesPerStrip = True
def getIndexStripUrl(self, index):
# Get comic strip URL from index
index = int(index)
chapter = index + 100 - (index % 100)
color = 'fc' if index > 1252 else 'fv'
return self.stripUrl % (chapter, color, index)
class FreighterTails(_ParserScraper):
url = 'http://www.mzzkiti.com/'
stripUrl = url + 'log%s.htm'
firstStripUrl = stripUrl % '001'
imageSearch = ('//img[contains(@src, "Strip")]',
'//img[contains(@src, "Caption")]')
prevSearch = '//a[./img[contains(@src, "prev")]]'
endOfLife = True
class FullFrontalNerdity(_BasicScraper):
url = 'http://ffn.nodwick.com/'
rurl = escape(url)
stripUrl = url + '?p=%s'
firstStripUrl = stripUrl % '6'
imageSearch = compile(tagre("img", "src", r'(%sffnstrips/\d+-\d+-\d+\.[^"]+)' % rurl))
prevSearch = compile(tagre("a", "href", r'(%s\?p=\d+)' % rurl, after="prev"))
help = 'Index format: number'
class FunInJammies(_WordPressScraper):
url = ('https://web.archive.org/web/20170205105241/'
'http://funinjammies.com/')
stripUrl = url + 'comic.php?issue=%s'
firstStripUrl = stripUrl % '1'
prevSearch = '//a[text()="< Prev"]'
endOfLife = True
help = 'Index format: n (unpadded)'
class FurPiled(_ParserScraper):
stripUrl = ('https://web.archive.org/web/20160404074145/'
'http://www.liondogworks.com/images/fp-%03d.jpg')
url = stripUrl % 427
firstStripUrl = stripUrl % 1
endOfLife = True
def getPrevUrl(self, url, data):
# Skip missing pages
nextStrip = int(url.rsplit('/', 1)[-1].split('.', 1)[0].replace('fp-', '')) - 1
if nextStrip in [407, 258, 131, 110, 97, 31]:
nextStrip = nextStrip - 1
return self.stripUrl % nextStrip
def fetchUrls(self, url, data, urlSearch):
# URLs are direct links to images
return [url]
class FurthiaHigh(_ParserScraper):
url = 'http://furthiahigh.concessioncomic.com/'
stripUrl = url + 'index.php?pid=%s'
firstStripUrl = stripUrl % '20080128'
imageSearch = '//img[contains(@alt, "Comic")]'
prevSearch = '//a[./img[@alt="Previous"]]'
multipleImagesPerStrip = True
| 34.439689
| 98
| 0.632358
|
fd38dbc87d8d3af01fbe1c4b069db63ac7fa2606
| 1,268
|
py
|
Python
|
examples/ifft/phenpv3_speed_test.py
|
LBJ-Wade/phenom
|
8f0fdc14099dac09cb2eef36d825e577340a8421
|
[
"MIT"
] | null | null | null |
examples/ifft/phenpv3_speed_test.py
|
LBJ-Wade/phenom
|
8f0fdc14099dac09cb2eef36d825e577340a8421
|
[
"MIT"
] | null | null | null |
examples/ifft/phenpv3_speed_test.py
|
LBJ-Wade/phenom
|
8f0fdc14099dac09cb2eef36d825e577340a8421
|
[
"MIT"
] | null | null | null |
import phenom
import numpy as np
from phenom.utils.utils import Constants, HztoMf
import lal
import lalsimulation as lalsim
#
# m1 = 90./2.
# m2 = 30./2.
# chi1x = 0.9
# chi1y = 0.
# chi1z = 0.
# chi2x = 0.
# chi2y = 0.
# chi2z = 0.
# delta_f = 1./32.
# f_min = 7.
# fRef = f_min
# inclination = np.pi/3. * 0.
m1=80.4782639
m2=16.384655
chi1x=0.062809065
chi1y=0.528722703
chi1z=-0.77006942
chi2x=-0.102698207
chi2y=-0.0977499112
chi2z=-0.0815029368
delta_f=1.0/256.
f_min=5.
fRef = 10.
inclination=2.85646439
phenompv3 = phenom.Waveform(approximant="IMRPhenomPv3")
from copy import copy
phenpv3_1 = copy(phenompv3)
phenpv3_1.input_params['m1']=m1
phenpv3_1.input_params['m2']=m2
phenpv3_1.input_params['chi1x']=chi1x
phenpv3_1.input_params['chi1y']=chi1y
phenpv3_1.input_params['chi1z']=chi1z
phenpv3_1.input_params['chi2x']=chi2x
phenpv3_1.input_params['chi2y']=chi2y
phenpv3_1.input_params['chi2z']=chi2z
phenpv3_1.input_params['inclination']=inclination
phenpv3_1.input_params['f_min']=f_min
phenpv3_1.input_params['fRef']=fRef
phenpv3_1.input_params['delta_f']=delta_f
print("starting phenompv3 generator")
#phenomp_v3 waveform generator
for i in range(10):
phenpv3_1.phenompv3(phenpv3_1.input_params)
# phenpv3_1.phenompv3(phenpv3_1.input_params)
| 20.451613
| 55
| 0.755521
|
ab1daa485cbd0d04fcd11916195b55d83abd367f
| 682
|
py
|
Python
|
templates/sphinx/conf.py
|
wapcaplet/readthedocs.org
|
a0eadad2d1c4c61c0b72a2cd801e2aca32291e49
|
[
"MIT"
] | 1
|
2015-11-08T11:31:42.000Z
|
2015-11-08T11:31:42.000Z
|
templates/sphinx/conf.py
|
wapcaplet/readthedocs.org
|
a0eadad2d1c4c61c0b72a2cd801e2aca32291e49
|
[
"MIT"
] | null | null | null |
templates/sphinx/conf.py
|
wapcaplet/readthedocs.org
|
a0eadad2d1c4c61c0b72a2cd801e2aca32291e49
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys, os
extensions = []
templates_path = ['{{ template_dir }}', 'templates', '_templates', '.templates']
source_suffix = '{{ project.suffix }}'
master_doc = 'index'
project = u'{{ project.name }}'
copyright = u'{{ project.copyright }}'
version = '{{ project.version }}'
release = '{{ project.version }}'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = '{{ project.theme }}'
html_theme_path = ['.', '_theme', '.theme']
htmlhelp_basename = '{{ project.slug }}'
file_insertion_enabled = False
latex_documents = [
('index', '{{ project.slug }}.tex', u'{{ project.name }} Documentation',
u'{{ project.copyright }}', 'manual'),
]
| 31
| 80
| 0.645161
|
5d8ea62dd314044be0c745e4673d283b2257d3fd
| 561
|
py
|
Python
|
py_file_carving/libary/plugins/__init__.py
|
wahlflo/pyFileCarving
|
7bbbbedccb551273fd4b22614c86f51bc876bd78
|
[
"MIT"
] | null | null | null |
py_file_carving/libary/plugins/__init__.py
|
wahlflo/pyFileCarving
|
7bbbbedccb551273fd4b22614c86f51bc876bd78
|
[
"MIT"
] | null | null | null |
py_file_carving/libary/plugins/__init__.py
|
wahlflo/pyFileCarving
|
7bbbbedccb551273fd4b22614c86f51bc876bd78
|
[
"MIT"
] | null | null | null |
# Private Keys
# binaries
from .binaries import PeFile
from .certificates import Certificate
# Certificates and Certificate Requests
from .certificates import CertificateRequest
from .certificates import TrustedCertificate
# PDF
from .pdf import PDF
# Pictures
from .pictures import JPG
from .pictures import PNG
from .private_keys import EncryptedPrivateKey
from .private_keys import EncryptedPrivateKey
from .private_keys import PrivateDSAKey
from .private_keys import PrivateECKey
from .private_keys import PrivateKey
from .private_keys import PrivateRsaKey
| 29.526316
| 45
| 0.850267
|
a50aad2d56d1852110a3cf74eec61edabefa5e4f
| 5,313
|
py
|
Python
|
huaweicloud-sdk-gaussdbfornosql/huaweicloudsdkgaussdbfornosql/v3/model/slowlog_result.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-gaussdbfornosql/huaweicloudsdkgaussdbfornosql/v3/model/slowlog_result.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-gaussdbfornosql/huaweicloudsdkgaussdbfornosql/v3/model/slowlog_result.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class SlowlogResult:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'time': 'str',
'database': 'str',
'query_sample': 'str',
'type': 'str',
'start_time': 'str'
}
attribute_map = {
'time': 'time',
'database': 'database',
'query_sample': 'query_sample',
'type': 'type',
'start_time': 'start_time'
}
def __init__(self, time=None, database=None, query_sample=None, type=None, start_time=None):
"""SlowlogResult - a model defined in huaweicloud sdk"""
self._time = None
self._database = None
self._query_sample = None
self._type = None
self._start_time = None
self.discriminator = None
self.time = time
self.database = database
self.query_sample = query_sample
self.type = type
self.start_time = start_time
@property
def time(self):
"""Gets the time of this SlowlogResult.
执行时间。
:return: The time of this SlowlogResult.
:rtype: str
"""
return self._time
@time.setter
def time(self, time):
"""Sets the time of this SlowlogResult.
执行时间。
:param time: The time of this SlowlogResult.
:type: str
"""
self._time = time
@property
def database(self):
"""Gets the database of this SlowlogResult.
所属数据库。
:return: The database of this SlowlogResult.
:rtype: str
"""
return self._database
@database.setter
def database(self, database):
"""Sets the database of this SlowlogResult.
所属数据库。
:param database: The database of this SlowlogResult.
:type: str
"""
self._database = database
@property
def query_sample(self):
"""Gets the query_sample of this SlowlogResult.
执行语法。
:return: The query_sample of this SlowlogResult.
:rtype: str
"""
return self._query_sample
@query_sample.setter
def query_sample(self, query_sample):
"""Sets the query_sample of this SlowlogResult.
执行语法。
:param query_sample: The query_sample of this SlowlogResult.
:type: str
"""
self._query_sample = query_sample
@property
def type(self):
"""Gets the type of this SlowlogResult.
语句类型。
:return: The type of this SlowlogResult.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this SlowlogResult.
语句类型。
:param type: The type of this SlowlogResult.
:type: str
"""
self._type = type
@property
def start_time(self):
"""Gets the start_time of this SlowlogResult.
发生时间,UTC时间。
:return: The start_time of this SlowlogResult.
:rtype: str
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this SlowlogResult.
发生时间,UTC时间。
:param start_time: The start_time of this SlowlogResult.
:type: str
"""
self._start_time = start_time
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SlowlogResult):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 24.37156
| 96
| 0.548842
|
2a3df4d51c027c2f604f481244a51acf122082b3
| 8,311
|
py
|
Python
|
app.py
|
bashkirtsevich/macaque
|
9c14dc0f7eb1fde7139ec9bc894282c396c481f7
|
[
"MIT"
] | null | null | null |
app.py
|
bashkirtsevich/macaque
|
9c14dc0f7eb1fde7139ec9bc894282c396c481f7
|
[
"MIT"
] | null | null | null |
app.py
|
bashkirtsevich/macaque
|
9c14dc0f7eb1fde7139ec9bc894282c396c481f7
|
[
"MIT"
] | null | null | null |
import os
from aiohttp import web
from sqlalchemy import create_engine
from sqlalchemy_aio import ASYNCIO_STRATEGY
import api
from arg_schemas import reply_entity_validator, reply_comment_validator, edit_comment_validator, \
remove_comment_validator, read_entity_comments_validator, validate_args, ValidatorException, \
read_user_comments_validator, read_comment_replies_validator, read_entity_replies_validator, \
stream_user_comments_validator, stream_entity_replies_validator
from response_streamer import XMLStreamer
from utils import parse_datetime
async def _read_args(request):
post_data = (await request.json()) if request.method == "POST" else {}
get_data = dict(request.match_info)
return {**get_data, **post_data}
async def reply_entity(connection, data):
comment_token = await api.add_comment(
connection,
entity_type=data["type"],
entity_token=data["entity"],
user_token=data["user_token"],
text=data["text"]
)
return {"comment_token": comment_token}
async def reply_comment(connection, data):
comment_token = await api.reply_comment(
connection,
parent_comment_token=data["comment_token"],
user_token=data["user_token"],
text=data["text"]
)
return {"comment_token": comment_token}
async def edit_comment(connection, data):
revision_key = await api.edit_comment(
connection,
user_token=data["user_token"],
comment_unique_key=data["comment_token"],
text=data["text"]
)
return {"success": revision_key is not None}
async def remove_comment(connection, data):
try:
await api.remove_comment(
connection,
user_token=data["user_token"],
comment_unique_key=data["comment_token"]
)
return {"success": True}
except api.APIException:
return {"success": False}
async def read_entity_comments(connection, data):
return [
item async for item in api.get_entity_comments(
connection,
entity_type=data["type"],
entity_token=data["entity"],
limit=int(data.get("limit", "1000")),
offset=int(data.get("offset", "0")),
with_replies=False
)
]
async def read_entity_replies(connection, data):
return [
item async for item in api.get_entity_comments(
connection,
entity_type=data["type"],
entity_token=data["entity"],
limit=int(data.get("limit", "1000")),
offset=int(data.get("offset", "0")),
with_replies=True
)
]
async def read_user_comments(connection, data):
return [
item async for item in api.get_user_comments(
connection,
user_token=data["user_token"]
)
]
async def read_comment_replies(connection, data):
return [
item async for item in api.get_comment_replies(
connection,
comment_token=data["comment_token"]
)
]
arg_validators = {
reply_entity: reply_entity_validator,
reply_comment: reply_comment_validator,
edit_comment: edit_comment_validator,
remove_comment: remove_comment_validator,
read_entity_comments: read_entity_comments_validator,
read_user_comments: read_user_comments_validator,
read_comment_replies: read_comment_replies_validator,
read_entity_replies: read_entity_replies_validator
}
async def handle_request(connection, request, future):
try:
data = await _read_args(request)
validate_args(data, arg_validators[future])
result = await future(connection, data)
return web.json_response({"result": result})
except TimeoutError:
return web.json_response({"result": "error", "reasons": "Request timeout expired"}, status=500)
except api.APIException as e:
return web.json_response({"result": "error", "reason": "API error ({})".format(str(e))}, status=500)
except ValidatorException as e:
return web.json_response({"result": "error", "error": str(e)}, status=500)
except Exception as e:
return web.json_response({"error": "Internal server error ({})".format(str(e))}, status=500)
async def stream_user_comments(connection, data):
idx = 1
async for item in api.get_user_comments(
connection,
user_token=data["user_token"],
timestamp_from=parse_datetime(data.get("timestamp_from", None)),
timestamp_to=parse_datetime(data.get("timestamp_to", None))
):
yield {"record{}".format(idx): item}
idx += 1
async def stream_entity_replies(connection, data):
idx = 1
async for item in api.get_entity_comments(
connection,
entity_type=data["type"],
entity_token=data["entity"],
limit=0,
offset=0,
timestamp_from=parse_datetime(data.get("timestamp_from", None)),
timestamp_to=parse_datetime(data.get("timestamp_to", None)),
with_replies=True
):
yield {"record{}".format(idx): item}
idx += 1
streamer_arg_validators = {
stream_user_comments: stream_user_comments_validator,
stream_entity_replies: stream_entity_replies_validator
}
async def handle_stream(connection, request, future):
response = web.StreamResponse(
status=200,
reason="OK",
headers={"Content-Type": "application/octet-stream"}
)
streamer = XMLStreamer(response)
await response.prepare(request)
try:
data = await _read_args(request)
validate_args(data, streamer_arg_validators[future])
await streamer.write_head()
async for item in future(connection, data):
await streamer.write_body(item)
await streamer.write_tail()
except api.APIException as e:
response.write_eof("Error: {}".format(str(e)).encode("utf-8"))
return response
async def get_app():
db_engine = create_engine(os.getenv("DATABASE_URL"), strategy=ASYNCIO_STRATEGY)
db_connection = await db_engine.connect()
app = web.Application()
app.router.add_post(
"/api/reply/{type}/{entity}",
lambda request: handle_request(db_connection, request, reply_entity)
)
app.router.add_post(
"/api/reply/{comment_token}",
lambda request: handle_request(db_connection, request, reply_comment)
)
app.router.add_post(
"/api/edit/{comment_token}/{user_token}",
lambda request: handle_request(db_connection, request, edit_comment)
)
app.router.add_post(
"/api/remove/{comment_token}",
lambda request: handle_request(db_connection, request, remove_comment)
)
for url in ["/api/comments/{type}/{entity}",
"/api/comments/{type}/{entity}/{limit}",
"/api/comments/{type}/{entity}/{offset}/{limit}"]:
app.router.add_get(
url, lambda request: handle_request(db_connection, request, read_entity_comments)
)
app.router.add_get(
"/api/comments/{user_token}",
lambda request: handle_request(db_connection, request, read_user_comments)
)
app.router.add_get(
"/api/replies/{comment_token}",
lambda request: handle_request(db_connection, request, read_comment_replies)
)
app.router.add_get(
"/api/replies/{type}/{entity}",
lambda request: handle_request(db_connection, request, read_entity_replies)
)
for url in ["/api/user/download/{user_token}",
"/api/user/download/{user_token}/{timestamp_from}",
"/api/user/download/{user_token}/{timestamp_from}/{timestamp_to}"]:
app.router.add_get(
url,
lambda request: handle_stream(db_connection, request, stream_user_comments)
)
for url in ["/api/download/{type}/{entity}",
"/api/download/{type}/{entity}/{timestamp_from}",
"/api/download/{type}/{entity}/{timestamp_from}/{timestamp_to}"]:
app.router.add_get(
url,
lambda request: handle_stream(db_connection, request, stream_entity_replies)
)
return app
if __name__ == "__main__":
web.run_app(get_app())
| 31.127341
| 108
| 0.654554
|
2ae879620ba8bc2a069f3a3015ebb65f477d8a74
| 1,843
|
py
|
Python
|
tests/test_compute.py
|
Edubya77/baidu_thing
|
b0612e926b63c698eea9f2cd924f9f8f054de80f
|
[
"Unlicense"
] | null | null | null |
tests/test_compute.py
|
Edubya77/baidu_thing
|
b0612e926b63c698eea9f2cd924f9f8f054de80f
|
[
"Unlicense"
] | null | null | null |
tests/test_compute.py
|
Edubya77/baidu_thing
|
b0612e926b63c698eea9f2cd924f9f8f054de80f
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
import unittest
from pathlib import Path
root = str(Path(__file__).resolve().parents[1])
sys.path.append(root)
from baidu.compute import *
class Parse_URLs(unittest.TestCase):
def setUp(self):
self.uri = 'http://www.example.com/en-us/'
def test_can_parse_domain_name(self):
self.assertNotEqual(nameParser(self.uri), self.uri)
self.assertEqual(nameParser(self.uri), 'example')
class Encoding_URLs(unittest.TestCase):
def setUp(self):
self.uri = 'https://zh.example.com/公司/价值观/'
self.encoded = 'https://zh.example.com/%E5%85%AC%E5%8F%B8/%E4%BB%B7%E5%80%BC%E8%A7%82/'
def test_can_encode_non_english_urls(self):
self.assertNotEqual(encodeUrl(self.uri), self.uri)
self.assertEqual(encodeUrl(self.uri), self.encoded)
class Prepare_base_URL_for_Baidu_endpoint(unittest.TestCase):
def setUp(self):
self.uri = 'https://zh.example.com/公司/价值观/'
def test_can_parse_Baidu_site_var(self):
self.assertEqual(prepSiteUrl(self.uri), 'zh.example.com')
# class URL_Submission_Logic_test(unittest.TestCase):
# def setUp(self):
# self.max = 2000
# self.min = 1
# self.invalid = 3000
# def test_input_is_max(self):
# test_list = []
# for x in range(self.max):
# test_list.append('http://www.example.com')
# self.assertEqual(len(prepData(test_list, self.max)), self.max)
# def test_input_is_mix(self):
# test_list = []
# for x in range(self.min):
# test_list.append('http://www.example.com')
# self.assertEqual(len(prepData(test_list, self.min)), self.min)
class NextTest(unittest.TestCase):
def test_something(self):
self.fail('Write more tests')
if __name__ == '__main__':
unittest.main()
| 29.725806
| 95
| 0.65274
|
57af2cbf577b550ace311b7a55be5954af0c9390
| 8,472
|
py
|
Python
|
neo/io/__init__.py
|
Moritz-Alexander-Kern/python-neo
|
2e00c994e46a1396fb4bdd393535604e6fc5568f
|
[
"BSD-3-Clause"
] | null | null | null |
neo/io/__init__.py
|
Moritz-Alexander-Kern/python-neo
|
2e00c994e46a1396fb4bdd393535604e6fc5568f
|
[
"BSD-3-Clause"
] | null | null | null |
neo/io/__init__.py
|
Moritz-Alexander-Kern/python-neo
|
2e00c994e46a1396fb4bdd393535604e6fc5568f
|
[
"BSD-3-Clause"
] | null | null | null |
"""
:mod:`neo.io` provides classes for reading and/or writing
electrophysiological data files.
Note that if the package dependency is not satisfied for one io, it does not
raise an error but a warning.
:attr:`neo.io.iolist` provides a list of successfully imported io classes.
Functions:
.. autofunction:: neo.io.get_io
Classes:
* :attr:`AlphaOmegaIO`
* :attr:`AsciiImageIO`
* :attr:`AsciiSignalIO`
* :attr:`AsciiSpikeTrainIO`
* :attr:`AxographIO`
* :attr:`AxonaIO`
* :attr:`AxonIO`
* :attr:`BCI2000IO`
* :attr:`BiocamIO`
* :attr:`BlackrockIO`
* :attr:`BlkIO`
* :attr:`BrainVisionIO`
* :attr:`BrainwareDamIO`
* :attr:`BrainwareF32IO`
* :attr:`BrainwareSrcIO`
* :attr:`CedIO`
* :attr:`ElanIO`
* :attr:`IgorIO`
* :attr:`IntanIO`
* :attr:`MEArecIO`
* :attr:`KlustaKwikIO`
* :attr:`KwikIO`
* :attr:`MaxwellIO`
* :attr:`MicromedIO`
* :attr:`NeoMatlabIO`
* :attr:`NestIO`
* :attr:`NeuralynxIO`
* :attr:`NeuroExplorerIO`
* :attr:`NeuroScopeIO`
* :attr:`NeuroshareIO`
* :attr:`NixIO`
* :attr:`NWBIO`
* :attr:`OpenEphysIO`
* :attr:`OpenEphysBinaryIO`
* :attr:`PhyIO`
* :attr:`PickleIO`
* :attr:`PlexonIO`
* :attr:`RawBinarySignalIO`
* :attr:`RawMCSIO`
* :attr:`Spike2IO`
* :attr:`SpikeGadgetsIO`
* :attr:`SpikeGLXIO`
* :attr:`StimfitIO`
* :attr:`TdtIO`
* :attr:`TiffIO`
* :attr:`WinEdrIO`
* :attr:`WinWcpIO`
.. autoclass:: neo.io.AlphaOmegaIO
.. autoattribute:: extensions
.. autoclass:: neo.io.AsciiImageIO
.. autoattribute:: extensions
.. autoclass:: neo.io.AsciiSignalIO
.. autoattribute:: extensions
.. autoclass:: neo.io.AsciiSpikeTrainIO
.. autoattribute:: extensions
.. autoclass:: neo.io.AxographIO
.. autoattribute:: extensions
.. autoclass:: neo.io.AxonaIO
.. autoattribute:: extensions
.. autoclass:: neo.io.AxonIO
.. autoattribute:: extensions
.. autoclass:: neo.io.BCI2000IO
.. autoattribute:: extensions
.. autoclass:: neo.io.BiocamIO
.. autoattribute:: extensions
.. autoclass:: neo.io.BlackrockIO
.. autoattribute:: extensions
.. autoclass:: neo.io.BlkIO
.. autoattribute:: extensions
.. autoclass:: neo.io.BrainVisionIO
.. autoattribute:: extensions
.. autoclass:: neo.io.BrainwareDamIO
.. autoattribute:: extensions
.. autoclass:: neo.io.BrainwareF32IO
.. autoattribute:: extensions
.. autoclass:: neo.io.BrainwareSrcIO
.. autoattribute:: extensions
.. autoclass:: neo.io.CedIO
.. autoattribute:: extensions
.. autoclass:: neo.io.ElanIO
.. autoattribute:: extensions
.. .. autoclass:: neo.io.ElphyIO
.. autoattribute:: extensions
.. autoclass:: neo.io.IgorIO
.. autoattribute:: extensions
.. autoclass:: neo.io.IntanIO
.. autoattribute:: extensions
.. autoclass:: neo.io.KlustaKwikIO
.. autoattribute:: extensions
.. autoclass:: neo.io.KwikIO
.. autoattribute:: extensions
.. autoclass:: neo.io.MEArecIO
.. autoattribute:: extensions
.. autoclass:: neo.io.MaxwellIO
.. autoattribute:: extensions
.. autoclass:: neo.io.MicromedIO
.. autoattribute:: extensions
.. autoclass:: neo.io.NeoMatlabIO
.. autoattribute:: extensions
.. autoclass:: neo.io.NestIO
.. autoattribute:: extensions
.. autoclass:: neo.io.NeuralynxIO
.. autoattribute:: extensions
.. autoclass:: neo.io.NeuroExplorerIO
.. autoattribute:: extensions
.. autoclass:: neo.io.NeuroScopeIO
.. autoattribute:: extensions
.. autoclass:: neo.io.NeuroshareIO
.. autoattribute:: extensions
.. autoclass:: neo.io.NixIO
.. autoattribute:: extensions
.. autoclass:: neo.io.NWBIO
.. autoattribute:: extensions
.. autoclass:: neo.io.OpenEphysIO
.. autoattribute:: extensions
.. autoclass:: neo.io.OpenEphysBinaryIO
.. autoattribute:: extensions
.. autoclass:: neo.io.PhyIO
.. autoattribute:: extensions
.. autoclass:: neo.io.PickleIO
.. autoattribute:: extensions
.. autoclass:: neo.io.PlexonIO
.. autoattribute:: extensions
.. autoclass:: neo.io.RawBinarySignalIO
.. autoattribute:: extensions
.. autoclass:: neo.io.RawMCSIO
.. autoattribute:: extensions
.. autoclass:: Spike2IO
.. autoattribute:: extensions
.. autoclass:: SpikeGadgetsIO
.. autoattribute:: extensions
.. autoclass:: SpikeGLXIO
.. autoattribute:: extensions
.. autoclass:: neo.io.StimfitIO
.. autoattribute:: extensions
.. autoclass:: neo.io.TdtIO
.. autoattribute:: extensions
.. autoclass:: neo.io.TiffIO
.. autoattribute:: extensions
.. autoclass:: neo.io.WinEdrIO
.. autoattribute:: extensions
.. autoclass:: neo.io.WinWcpIO
.. autoattribute:: extensions
"""
import os.path
# try to import the neuroshare library.
# if it is present, use the neuroshareapiio to load neuroshare files
# if it is not present, use the neurosharectypesio to load files
try:
import neuroshare as ns
except ImportError as err:
from neo.io.neurosharectypesio import NeurosharectypesIO as NeuroshareIO
# print("\n neuroshare library not found, loading data with ctypes" )
# print("\n to use the API be sure to install the library found at:")
# print("\n www.http://pythonhosted.org/neuroshare/")
else:
from neo.io.neuroshareapiio import NeuroshareapiIO as NeuroshareIO
# print("neuroshare library successfully imported")
# print("\n loading with API...")
from neo.io.alphaomegaio import AlphaOmegaIO
from neo.io.asciiimageio import AsciiImageIO
from neo.io.asciisignalio import AsciiSignalIO
from neo.io.asciispiketrainio import AsciiSpikeTrainIO
from neo.io.axographio import AxographIO
from neo.io.axonaio import AxonaIO
from neo.io.axonio import AxonIO
from neo.io.biocamio import BiocamIO
from neo.io.blackrockio import BlackrockIO
from neo.io.blkio import BlkIO
from neo.io.bci2000io import BCI2000IO
from neo.io.brainvisionio import BrainVisionIO
from neo.io.brainwaredamio import BrainwareDamIO
from neo.io.brainwaref32io import BrainwareF32IO
from neo.io.brainwaresrcio import BrainwareSrcIO
from neo.io.cedio import CedIO
from neo.io.elanio import ElanIO
from neo.io.elphyio import ElphyIO
from neo.io.exampleio import ExampleIO
from neo.io.igorproio import IgorIO
from neo.io.intanio import IntanIO
from neo.io.klustakwikio import KlustaKwikIO
from neo.io.kwikio import KwikIO
from neo.io.mearecio import MEArecIO
from neo.io.maxwellio import MaxwellIO
from neo.io.micromedio import MicromedIO
from neo.io.neomatlabio import NeoMatlabIO
from neo.io.nestio import NestIO
from neo.io.neuralynxio import NeuralynxIO
from neo.io.neuroexplorerio import NeuroExplorerIO
from neo.io.neuroscopeio import NeuroScopeIO
from neo.io.nixio import NixIO
from neo.io.nixio_fr import NixIO as NixIOFr
from neo.io.nwbio import NWBIO
from neo.io.openephysio import OpenEphysIO
from neo.io.openephysbinaryio import OpenEphysBinaryIO
from neo.io.phyio import PhyIO
from neo.io.pickleio import PickleIO
from neo.io.plexonio import PlexonIO
from neo.io.rawbinarysignalio import RawBinarySignalIO
from neo.io.rawmcsio import RawMCSIO
from neo.io.spike2io import Spike2IO
from neo.io.spikegadgetsio import SpikeGadgetsIO
from neo.io.spikeglxio import SpikeGLXIO
from neo.io.stimfitio import StimfitIO
from neo.io.tdtio import TdtIO
from neo.io.tiffio import TiffIO
from neo.io.winedrio import WinEdrIO
from neo.io.winwcpio import WinWcpIO
iolist = [
AlphaOmegaIO,
AsciiImageIO,
AsciiSignalIO,
AsciiSpikeTrainIO,
AxographIO,
AxonaIO,
AxonIO,
BCI2000IO,
BlackrockIO,
BlkIO,
BrainVisionIO,
BrainwareDamIO,
BrainwareF32IO,
BrainwareSrcIO,
CedIO,
ElanIO,
# ElphyIO,
ExampleIO,
IgorIO,
IntanIO,
KlustaKwikIO,
KwikIO,
MEArecIO,
MaxwellIO,
MicromedIO,
NixIO, # place NixIO before other IOs that use HDF5 to make it the default for .h5 files
NeoMatlabIO,
NestIO,
NeuralynxIO,
NeuroExplorerIO,
NeuroScopeIO,
NeuroshareIO,
NWBIO,
OpenEphysIO,
OpenEphysBinaryIO,
PhyIO,
PickleIO,
PlexonIO,
RawBinarySignalIO,
RawMCSIO,
Spike2IO,
SpikeGadgetsIO,
SpikeGLXIO,
StimfitIO,
TdtIO,
TiffIO,
WinEdrIO,
WinWcpIO
]
def get_io(filename, *args, **kwargs):
"""
Return a Neo IO instance, guessing the type based on the filename suffix.
"""
extension = os.path.splitext(filename)[1][1:]
for io in iolist:
if extension in io.extensions:
return io(filename, *args, **kwargs)
raise IOError("File extension %s not registered" % extension)
| 21.723077
| 93
| 0.717186
|
5075d20b2c2eb11ce73e519e601ada505e379e96
| 650
|
py
|
Python
|
problems/A/SuffixThree.py
|
deveshbajpai19/CodeForces
|
707b374f03012ec68054841f791d48b33ae4ef1b
|
[
"MIT"
] | 55
|
2016-06-19T05:45:15.000Z
|
2022-03-31T15:18:53.000Z
|
problems/A/SuffixThree.py
|
farhadcu/CodeForces-2
|
707b374f03012ec68054841f791d48b33ae4ef1b
|
[
"MIT"
] | null | null | null |
problems/A/SuffixThree.py
|
farhadcu/CodeForces-2
|
707b374f03012ec68054841f791d48b33ae4ef1b
|
[
"MIT"
] | 25
|
2016-07-29T13:03:15.000Z
|
2021-09-17T01:45:45.000Z
|
__author__ = 'Devesh Bajpai'
'''
https://codeforces.com/problemset/problem/1281/A
Solution: Capture the suffixes of the given string to match with the given possible patterns. Return the
result language accordingly.
~ Tidbit ~
Use s[-x:] to obtain the last x characters of the string s.
'''
def solve(s):
if s[-2:] == "po":
return "FILIPINO"
elif s[-4:] == "desu" or s[-4:] == "masu":
return "JAPANESE"
elif s[-5:] == "mnida":
return "KOREAN"
else:
return "NONE"
if __name__ == "__main__":
t = int(raw_input())
for _ in xrange(0, t):
s = raw_input()
print solve(s)
| 19.69697
| 105
| 0.593846
|
9d960a5f5996288d90ff77d6253683bfc2ca57b8
| 1,680
|
py
|
Python
|
.tools/download_emoji_variation_sequences.py
|
PredaaA/JackCogs
|
6677496ed030a219afe32843e95154b01406226a
|
[
"Apache-2.0"
] | null | null | null |
.tools/download_emoji_variation_sequences.py
|
PredaaA/JackCogs
|
6677496ed030a219afe32843e95154b01406226a
|
[
"Apache-2.0"
] | null | null | null |
.tools/download_emoji_variation_sequences.py
|
PredaaA/JackCogs
|
6677496ed030a219afe32843e95154b01406226a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018-2020 Jakub Kuczys (https://github.com/jack1142)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import textwrap
from pathlib import Path
import requests
ROOT_PATH = Path(__file__).parent.parent.absolute()
VARIATIONS_FILE = ROOT_PATH / "emojiinfo/variations.py"
r = requests.get(
"https://www.unicode.org/Public/UCD/latest/ucd/emoji/emoji-variation-sequences.txt"
)
r.raise_for_status()
backslash_emoji_reprs = []
for line in r.text.splitlines():
if not line or line.startswith("#"):
continue
variation_sequence = list(
map(
lambda x: chr(int(x, base=16)),
line.split(";", maxsplit=1)[0].strip().split(),
)
)
if variation_sequence[1] != "\N{VARIATION SELECTOR-16}":
continue
emoji = variation_sequence[0]
backslash_repr = emoji.encode("ascii", "backslashreplace").decode("utf-8")
backslash_emoji_reprs.append(backslash_repr)
inner_code = textwrap.indent(
",\n".join(f'"{backslash_repr}"' for backslash_repr in backslash_emoji_reprs),
" ",
)
code = f"EMOJIS_WITH_VARIATIONS = {{\n{inner_code},\n}}\n"
with VARIATIONS_FILE.open("w") as fp:
fp.write(code)
| 31.111111
| 87
| 0.704167
|
d82fd9d74d69a996e5a6eb6594e9f5456f2b6b40
| 5,215
|
py
|
Python
|
test/functional/test_framework/wallet_util.py
|
XbitCC/xbitcoin
|
2c71b1adc311680e8f3aa977b42029b1a26164f1
|
[
"MIT"
] | 2
|
2021-10-16T06:16:02.000Z
|
2022-03-26T21:48:38.000Z
|
test/functional/test_framework/wallet_util.py
|
XbitCC/xbitcoin
|
2c71b1adc311680e8f3aa977b42029b1a26164f1
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/wallet_util.py
|
XbitCC/xbitcoin
|
2c71b1adc311680e8f3aa977b42029b1a26164f1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The XBit Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Useful util functions for testing the wallet"""
from collections import namedtuple
from test_framework.address import (
byte_to_base58,
key_to_p2pkh,
key_to_p2sh_p2wpkh,
key_to_p2wpkh,
script_to_p2sh,
script_to_p2sh_p2wsh,
script_to_p2wsh,
)
from test_framework.key import ECKey
from test_framework.script import (
CScript,
OP_0,
OP_2,
OP_3,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_DUP,
OP_EQUAL,
OP_EQUALVERIFY,
OP_HASH160,
hash160,
sha256,
)
from test_framework.util import hex_str_to_bytes
Key = namedtuple('Key', ['privkey',
'pubkey',
'p2pkh_script',
'p2pkh_addr',
'p2wpkh_script',
'p2wpkh_addr',
'p2sh_p2wpkh_script',
'p2sh_p2wpkh_redeem_script',
'p2sh_p2wpkh_addr'])
Multisig = namedtuple('Multisig', ['privkeys',
'pubkeys',
'p2sh_script',
'p2sh_addr',
'redeem_script',
'p2wsh_script',
'p2wsh_addr',
'p2sh_p2wsh_script',
'p2sh_p2wsh_addr'])
def get_key(node):
"""Generate a fresh key on node
Returns a named tuple of privkey, pubkey and all address and scripts."""
addr = node.getnewaddress()
pubkey = node.getaddressinfo(addr)['pubkey']
pkh = hash160(hex_str_to_bytes(pubkey))
return Key(privkey=node.dumpprivkey(addr),
pubkey=pubkey,
p2pkh_script=CScript([OP_DUP, OP_HASH160, pkh, OP_EQUALVERIFY, OP_CHECKSIG]).hex(),
p2pkh_addr=key_to_p2pkh(pubkey),
p2wpkh_script=CScript([OP_0, pkh]).hex(),
p2wpkh_addr=key_to_p2wpkh(pubkey),
p2sh_p2wpkh_script=CScript([OP_HASH160, hash160(CScript([OP_0, pkh])), OP_EQUAL]).hex(),
p2sh_p2wpkh_redeem_script=CScript([OP_0, pkh]).hex(),
p2sh_p2wpkh_addr=key_to_p2sh_p2wpkh(pubkey))
def get_generate_key():
"""Generate a fresh key
Returns a named tuple of privkey, pubkey and all address and scripts."""
eckey = ECKey()
eckey.generate()
privkey = bytes_to_wif(eckey.get_bytes())
pubkey = eckey.get_pubkey().get_bytes().hex()
pkh = hash160(hex_str_to_bytes(pubkey))
return Key(privkey=privkey,
pubkey=pubkey,
p2pkh_script=CScript([OP_DUP, OP_HASH160, pkh, OP_EQUALVERIFY, OP_CHECKSIG]).hex(),
p2pkh_addr=key_to_p2pkh(pubkey),
p2wpkh_script=CScript([OP_0, pkh]).hex(),
p2wpkh_addr=key_to_p2wpkh(pubkey),
p2sh_p2wpkh_script=CScript([OP_HASH160, hash160(CScript([OP_0, pkh])), OP_EQUAL]).hex(),
p2sh_p2wpkh_redeem_script=CScript([OP_0, pkh]).hex(),
p2sh_p2wpkh_addr=key_to_p2sh_p2wpkh(pubkey))
def get_multisig(node):
"""Generate a fresh 2-of-3 multisig on node
Returns a named tuple of privkeys, pubkeys and all address and scripts."""
addrs = []
pubkeys = []
for _ in range(3):
addr = node.getaddressinfo(node.getnewaddress())
addrs.append(addr['address'])
pubkeys.append(addr['pubkey'])
script_code = CScript([OP_2] + [hex_str_to_bytes(pubkey) for pubkey in pubkeys] + [OP_3, OP_CHECKMULTISIG])
witness_script = CScript([OP_0, sha256(script_code)])
return Multisig(privkeys=[node.dumpprivkey(addr) for addr in addrs],
pubkeys=pubkeys,
p2sh_script=CScript([OP_HASH160, hash160(script_code), OP_EQUAL]).hex(),
p2sh_addr=script_to_p2sh(script_code),
redeem_script=script_code.hex(),
p2wsh_script=witness_script.hex(),
p2wsh_addr=script_to_p2wsh(script_code),
p2sh_p2wsh_script=CScript([OP_HASH160, witness_script, OP_EQUAL]).hex(),
p2sh_p2wsh_addr=script_to_p2sh_p2wsh(script_code))
def test_address(node, address, **kwargs):
"""Get address info for `address` and test whether the returned values are as expected."""
addr_info = node.getaddressinfo(address)
for key, value in kwargs.items():
if value is None:
if key in addr_info.keys():
raise AssertionError("key {} unexpectedly returned in getaddressinfo.".format(key))
elif addr_info[key] != value:
raise AssertionError("key {} value {} did not match expected value {}".format(key, addr_info[key], value))
def bytes_to_wif(b, compressed=True):
if compressed:
b += b'\x01'
return byte_to_base58(b, 239)
def generate_wif_key():
# Makes a WIF privkey for imports
k = ECKey()
k.generate()
return bytes_to_wif(k.get_bytes(), k.is_compressed)
| 39.507576
| 118
| 0.602685
|
d01d4b1328c684fe56afd3bfc2db9d49884b2488
| 2,356
|
py
|
Python
|
airflow/providers/cncf/kubernetes/example_dags/example_spark_kubernetes.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 15,947
|
2019-01-05T13:51:02.000Z
|
2022-03-31T23:33:16.000Z
|
airflow/providers/cncf/kubernetes/example_dags/example_spark_kubernetes.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 14,603
|
2019-01-05T09:43:19.000Z
|
2022-03-31T23:11:59.000Z
|
airflow/providers/cncf/kubernetes/example_dags/example_spark_kubernetes.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 8,429
|
2019-01-05T19:45:47.000Z
|
2022-03-31T22:13:01.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This is an example DAG which uses SparkKubernetesOperator and SparkKubernetesSensor.
In this example, we create two tasks which execute sequentially.
The first task is to submit sparkApplication on Kubernetes cluster(the example uses spark-pi application).
and the second task is to check the final state of the sparkApplication that submitted in the first state.
Spark-on-k8s operator is required to be already installed on Kubernetes
https://github.com/GoogleCloudPlatform/spark-on-k8s-operator
"""
from datetime import datetime, timedelta
# [START import_module]
# The DAG object; we'll need this to instantiate a DAG
from airflow import DAG
# Operators; we need this to operate!
from airflow.providers.cncf.kubernetes.operators.spark_kubernetes import SparkKubernetesOperator
from airflow.providers.cncf.kubernetes.sensors.spark_kubernetes import SparkKubernetesSensor
# [END import_module]
# [START instantiate_dag]
dag = DAG(
'spark_pi',
default_args={'max_active_runs': 1},
description='submit spark-pi as sparkApplication on kubernetes',
schedule_interval=timedelta(days=1),
start_date=datetime(2021, 1, 1),
catchup=False,
)
t1 = SparkKubernetesOperator(
task_id='spark_pi_submit',
namespace="default",
application_file="example_spark_kubernetes_spark_pi.yaml",
do_xcom_push=True,
dag=dag,
)
t2 = SparkKubernetesSensor(
task_id='spark_pi_monitor',
namespace="default",
application_name="{{ task_instance.xcom_pull(task_ids='spark_pi_submit')['metadata']['name'] }}",
dag=dag,
)
t1 >> t2
| 35.164179
| 106
| 0.771222
|
6b1084d73e1529677b6dd775e7f5c41b3b63a3ab
| 48,704
|
py
|
Python
|
src/bruce/scripts/check_bruce_counters.py
|
czchen/debian-bruce
|
0db47b345926fefbf958a704cac476b46cb44826
|
[
"Apache-2.0"
] | 2
|
2015-05-21T01:49:10.000Z
|
2015-06-09T23:28:30.000Z
|
src/bruce/scripts/check_bruce_counters.py
|
czchen/debian-bruce
|
0db47b345926fefbf958a704cac476b46cb44826
|
[
"Apache-2.0"
] | null | null | null |
src/bruce/scripts/check_bruce_counters.py
|
czchen/debian-bruce
|
0db47b345926fefbf958a704cac476b46cb44826
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
###############################################################################
# -----------------------------------------------------------------------------
# Copyright 2013-2014 if(we)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
#
# This script requests counter reports from bruce and analyzes the counters.
# It is intended to be executed by Nagios, or manually to get more information
# on a problem reported by Nagios.
#
# Program arguments:
# -m N
# --manual N: (optional)
# Specifies that we are running in manual mode. In manual mode, instead
# of getting counters from bruce and saving them to a counter file, we
# read a previously created counter file and analyze it, giving verbose
# output for each problem found. When the script (not running in manual
# mode) reports a problem to Nagios, a human being can later run it in
# manual mode to analyze the resulting counter file and determine the
# details of the problem(s) found. When running in manual mode, we
# don't delete old counter files. The 'N' value gives a number of
# seconds since the epoch specifying a counter file to be manually
# analyzed.
#
# -d DIR
# --work_dir DIR: (required)
# Specifies a directory under the Nagios user's home directory where the
# script keeps all of its data files. This directory will be created if
# it does not already exist.
#
# -i INTERVAL
# --interval INTERVAL: (required)
# The minimum number of seconds ago we will accept when looking for a
# recent counter file to compare our metadata update count against. The
# most recent counter file whose age is at least the minimum will be
# chosen. If no such counter file exists, the metadata update count
# test is skipped.
#
# -H HISTORY
# --history HISTORY: (required unless running in manual mode)
# The number of minutes of history to preserve when deleting old counter
# files. A value of 0 means "preserve everything".
#
# -s SERVER
# --nagios_server SERVER: (required)
# If not running in manual mode, this is a unique identifier for the
# Nagios server that triggered the current execution of this script. If
# running in manual mode, this is a unique identifier for the Nagios
# server that triggered the script execution that caused creation of the
# counter file we are analyzing.
#
# -w T
# --socket_error_warn_threshold T: (required)
# If the number of socket errors indicated by the counter output exceeds
# this value, it is treated as Warning.
#
# -c T
# --socket_error_critical_threshold T: (required)
# If the number of socket errors indicated by the counter output exceeds
# this value, it is treated as Critical.
#
# -W T
# --msg_count_warn_threshold T: (required)
# If the number of outstanding messages indicated by the counter output
# exceeds this value, it is treated as Warning.
#
# -C T
# --msg_count_critical_threshold T: (required)
# If the number of outstanding messages indicated by the counter output
# exceeds this value, it is treated as Critical.
#
# -b HOST
# --bruce_host HOST: (required unless running in manual mode)
# The host running bruce that we should connect to for counter data.
#
# -p PORT
# --bruce_status_port PORT: (optional, default = 9090)
# The port to connect to when asking bruce for counter data.
#
# -u USER
# --nagios_user USER: (optional, default = 'nrpe')
# The name of the nagios user. The script will create a directory under
# the nagios user's home directory (see '-d DIR' option above).
###############################################################################
import bisect
import errno
import getopt
import json
import os
import subprocess
import sys
import time
from urllib2 import URLError
from urllib2 import urlopen
# Nagios exit codes
EC_SUCCESS = 0
EC_WARNING = 1
EC_CRITICAL = 2
EC_UNKNOWN = 3
# This will contain program options information.
Opts = None
###############################################################################
# Print a message and exit with the given exit code.
###############################################################################
def Die(exit_code, msg):
print msg
sys.exit(exit_code)
###############################################################################
###############################################################################
# Return the number of seconds since the epoch.
###############################################################################
def SecondsSinceEpoch():
return int(time.time())
###############################################################################
###############################################################################
# Create the directory given by 'path' if it doesn't already exist.
###############################################################################
def MakeDirExist(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
Die(EC_UNKNOWN, 'Failed to create directory ' + path + ': ' +
e.strerror)
###############################################################################
###############################################################################
# Convert the given Nagios exit code to a string and return the result.
###############################################################################
def NagiosCodeToString(nagios_code):
if nagios_code == EC_SUCCESS:
return 'Success'
if nagios_code == EC_WARNING:
return 'Warning'
if nagios_code == EC_CRITICAL:
return 'Critical'
if nagios_code != EC_UNKNOWN:
Die(EC_UNKNOWN, 'Cannot convert unknown Nagios code ' +
str(nagios_code) + ' to string')
return 'Unknown'
###############################################################################
###############################################################################
# Return true iff. we are running in manual mode.
###############################################################################
def RunningInManualMode():
return (Opts.Manual >= 0)
###############################################################################
###############################################################################
# Print a problem message preceded by a string representation of its Nagios
# code.
###############################################################################
def ReportProblem(nagios_code, msg):
print NagiosCodeToString(nagios_code) + ': ' + msg
###############################################################################
###############################################################################
# Return the home directory of the Nagios user as a string.
###############################################################################
def GetNagiosDir():
try:
p = subprocess.Popen(['/bin/bash', '-c',
'echo -n ~' + Opts.NagiosUser],
stdout=subprocess.PIPE)
out, err = p.communicate()
except OSError as e:
Die(EC_UNKNOWN, 'Failed to execute shell command to determine '
'Nagios home directory: ' + e.strerror)
if not out:
Die(EC_UNKNOWN, 'Got empty result from shell command to determine '
'Nagios home directory')
if out[0] == '~':
Die(EC_UNKNOWN, 'Nagios home directory not found')
if out[0] != '/':
Die(EC_UNKNOWN, 'Got strange output while trying to determine '
'Nagios home directory: [' + out + ']')
return out
###############################################################################
###############################################################################
# Return the value associated with string 'key' in dictionary 'counters', or
# die with an error message if no such key was found.
###############################################################################
def LookupCounter(counters, key):
try:
value = counters[key]
except KeyError:
Die(EC_UNKNOWN, 'Counter ' + key + ' not found')
return value
###############################################################################
###############################################################################
# Parse a JSON counter report and return the result.
###############################################################################
def ParseCounterReport(json_input):
try:
result = json.loads(json_input)
except ValueError:
Die(EC_UNKNOWN, 'Failed to parse counter report')
if type(result) is not dict:
Die(EC_UNKNOWN, 'Counter report is not a dictionary')
if 'now' not in result:
Die(EC_UNKNOWN, 'Counter report missing "now" item')
if type(result['now']) is not int:
Die(EC_UNKNOWN, 'Item "now" in counter report is not an integer')
if 'since' not in result:
Die(EC_UNKNOWN, 'Counter report missing "since" item')
if type(result['since']) is not int:
Die(EC_UNKNOWN, 'Item "since" in counter report is not an integer')
if 'pid' not in result:
Die(EC_UNKNOWN, 'Counter report missing "pid" item')
if type(result['pid']) is not int:
Die(EC_UNKNOWN, 'Item "pid" in counter report is not an integer')
if 'version' not in result:
Die(EC_UNKNOWN, 'Counter report missing "version" item')
if type(result['version']) is not unicode:
Die(EC_UNKNOWN,
'Item "version" in counter report is not a unicode string')
if 'counters' not in result:
Die(EC_UNKNOWN, 'Counter report missing "counters" item')
if type(result['counters']) is not list:
Die(EC_UNKNOWN, 'Item "counters" in counter report is not a list')
counter_names = set()
for item in result['counters']:
if type(item) is not dict:
Die(EC_UNKNOWN, 'Counter item is not a dictionary')
if 'name' not in item:
Die(EC_UNKNOWN, 'Counter item has no name')
if type(item['name']) is not unicode:
Die(EC_UNKNOWN, 'Counter item name is not a unicode string')
if item['name'] in counter_names:
Die(EC_UNKNOWN, 'Duplicate counter name [' + item['name'] + ']')
counter_names.add(item['name'])
if 'value' not in item:
Die(EC_UNKNOWN, 'Counter item [' + item['name'] + '] has no value')
if type(item['value']) is not int:
Die(EC_UNKNOWN, 'Value of counter item [' + item['name'] + \
'] is not an integer')
if 'location' not in item:
Die(EC_UNKNOWN, 'Counter item [' + item['name'] + \
'] has no location')
if type(item['location']) is not unicode:
Die(EC_UNKNOWN, 'Location of counter item [' + item['name'] + \
'] is not a unicode string')
return result
###############################################################################
###############################################################################
# Serialize a counter report to a JSON string and return the result.
###############################################################################
def SerializeCounterReport(report):
return json.dumps(report, sort_keys=True, indent=4,
separators=(',', ': ')) + '\n'
###############################################################################
###############################################################################
# Send a "get counters" HTTP request to bruce, parse the response, and return
# the parsed counter report.
###############################################################################
def GetCounters(url):
try:
response = urlopen(url)
except URLError as e:
# Treat this as Critical, since it indicates that bruce is probably not
# running.
Die(EC_CRITICAL, 'Failed to open counters URL: ' + str(e.reason))
json_input = ''
for line in response:
json_input += line
return ParseCounterReport(json_input)
###############################################################################
###############################################################################
# Create a file with counter data whose location is given by 'path'.
# 'counter_report' is a counter report containing the data to be written.
# Serialize the report to JSON and write it to the file.
###############################################################################
def CreateCounterFile(path, counter_report):
json_report = SerializeCounterReport(counter_report)
try:
is_open = False
outfile = open(path, 'w')
is_open = True
outfile.write(json_report)
except OSError as e:
Die(EC_UNKNOWN, 'Failed to create counter file ' + path + ': ' +
e.strerror)
except IOError as e:
Die(EC_UNKNOWN, 'Failed to create counter file ' + path + ': ' +
e.strerror)
finally:
if is_open:
outfile.close()
###############################################################################
###############################################################################
# Read counter data from file given by 'path'. Return a counter report
# containing the data.
###############################################################################
def ReadCounterFile(path):
json_input = ''
try:
is_open = False
infile = open(path, 'r')
is_open = True
for line in infile:
json_input += line
except OSError as e:
Die(EC_UNKNOWN, 'Failed to open counter file ' + path +
' for reading: ' + e.strerror)
except IOError as e:
Die(EC_UNKNOWN, 'Failed to open counter file ' + path +
' for reading: ' + e.strerror)
finally:
if is_open:
infile.close()
return ParseCounterReport(json_input)
###############################################################################
###############################################################################
# Find all names of files in the directory given by 'path' that may be counter
# files based on their names, and that are older than the current time given by
# 'now'. Return the filenames converted to integers (each representing seconds
# since the epoch) as a list. The returned list will be sorted in ascending
# order.
###############################################################################
def FindOldCounterFileTimes(path, now):
try:
file_list = os.listdir(path)
except OSError as e:
Die(EC_UNKNOWN, 'Failed to list contents of directory ' + path + ': ' +
e.strerror)
except IOError as e:
Die(EC_UNKNOWN, 'Failed to list contents of directory ' + path + ': ' +
e.strerror)
result = []
for item in file_list:
try:
value = int(item)
except ValueError:
# The filename can't be converted to an integer, so it must not be
# a counter file.
continue
if (value < now) and (value >= 0):
result.append(value)
result.sort()
return result
###############################################################################
###############################################################################
# Read the counter file for the most recent previous counter report for the
# currently running instance of bruce. Return a counter report containing the
# data, or None if no such data exists.
#
# parameters:
# current_report: The current counter report.
# work_path: The pathname of the directory containing the counter files.
# old_counter_file_times: A list of integers sorted in ascending order.
# Each value is a number of seconds since the epoch whose string
# representation gives the name of a counter file in directory
# 'work_path'.
###############################################################################
def GetLastCounterReport(current_report, work_path, old_counter_file_times):
file_count = len(old_counter_file_times)
# There are no previous counter files, so we don't yet have any history.
if file_count == 0:
return None
newest_timestamp = old_counter_file_times[file_count - 1]
path = work_path + '/' + str(newest_timestamp)
last_report = ReadCounterFile(path)
# The most recent previous counter file contains data for a different
# invocation of bruce (i.e. bruce restarted). Therefore the data is not
# applicable.
if last_report['pid'] != current_report['pid']:
return None
if last_report['now'] != newest_timestamp:
Die(EC_UNKNOWN, 'Previous counter report file ' +
str(newest_timestamp) + ' contains timestamp ' +
str(last_report['now']) + ' that differs from filename')
return last_report
###############################################################################
###############################################################################
# Read the counter file for a previous counter report for the currently running
# instance of bruce. The most recent file that was created at least a given
# number of seconds ago will be chosen. Return a counter report containing the
# data, or None if no such data exists.
#
# parameters:
# current_report: The current counter report.
# work_path: The pathname of the directory containing the counter files.
# old_counter_file_times: A list of integers sorted in ascending order.
# Each value is a number of seconds since the epoch whose string
# representation gives the name of a counter file in directory
# 'work_path'.
# min_seconds_ago: The minimum number of seconds prior to the timestamp of
# 'current_report' that the chosen report's timestamp must be.
###############################################################################
def GetOldCounterReport(current_report, work_path, old_counter_file_times,
min_seconds_ago):
file_count = len(old_counter_file_times)
if file_count == 0:
return None
max_ok_timestamp = current_report['now'] - min_seconds_ago
# Use binary search to find the right timestamp.
i = bisect.bisect_left(old_counter_file_times, max_ok_timestamp)
if i == file_count:
chosen_timestamp = old_counter_file_times[file_count - 1]
elif i == 0:
chosen_timestamp = old_counter_file_times[0]
if chosen_timestamp > max_ok_timestamp:
# All reports are too recent.
return None
else:
chosen_timestamp = old_counter_file_times[i]
if chosen_timestamp > max_ok_timestamp:
chosen_timestamp = old_counter_file_times[i - 1]
path = work_path + '/' + str(chosen_timestamp)
old_report = ReadCounterFile(path)
# The old counter file contains data for a different invocation of bruce
# (i.e. bruce restarted). Therefore the data is not applicable.
if old_report['pid'] != current_report['pid']:
return None
if old_report['now'] != chosen_timestamp:
Die(EC_UNKNOWN, 'Old counter report file ' + str(chosen_timestamp) +
' contains wrong timestamp')
return old_report
###############################################################################
###############################################################################
# Input list 'in_counters' contains counter information from a counter report.
# Return a dictionary whose keys are the names of the counters in 'in_counters'
# and whose values are the corresponding counts.
###############################################################################
def CounterListToDict(in_counters):
result = {}
for item in in_counters:
result[item['name']] = item['value']
return result
###############################################################################
###############################################################################
# Take as input two lists, 'old_counters' and 'new_counters', of counter
# information from counter reports. The input lists must have exactly the same
# sets of counter names. If 'old_counters' is None, return a dictionary whose
# keys are the names of the counters from 'new_counters' and whose values are
# the corresponding counts. Otherwise return a dictionary with keys identical
# to the counter names from 'old_couners' and 'new_counters'. For each key K
# in the returned dictionary, suppose V_old is the value of the corresponding
# counter in 'old_counters' and V_new is the value of the corresponding counter
# in 'new_counters'. The value associated with K is then V_new - V_old. Exit
# with an error if the sets of counter names from the input lists are not
# identical or the computed value for any key in the result dictionary is
# negative.
###############################################################################
def ComputeCounterDeltas(old_counters, new_counters):
new_counter_dict = CounterListToDict(new_counters)
if old_counters == None:
return new_counter_dict
old_counter_dict = CounterListToDict(old_counters)
old_keys = set(k for k in old_counter_dict.keys())
new_keys = set(k for k in new_counter_dict.keys())
s = old_keys - new_keys
if s:
Die(EC_UNKNOWN, 'Old counter file has unmatched keys: ' + str(s))
s = new_keys - old_keys
if s:
Die(EC_UNKNOWN, 'New counter file has unmatched keys: ' + str(s))
result = {}
for k in new_keys:
delta = new_counter_dict[k] - old_counter_dict[k]
if delta < 0:
Die(EC_UNKNOWN, 'Key [' + k + '] decreased by ' + str(-delta) + \
' in new counters file')
result[k] = delta
return result
###############################################################################
###############################################################################
# Look up the counter whose name string is 'counter_name' in dictionary
# 'deltas'. If its value is > 'max_ok_value' then report it as a problem with
# Nagios code 'nagios_code' and return max(nagios_code, old_nagios_code).
# Otherwise return 'old_nagios_code'.
###############################################################################
def CheckDelta(deltas, counter_name, max_ok_value, nagios_code,
old_nagios_code):
new_nagios_code = old_nagios_code
count = LookupCounter(deltas, counter_name)
if count > max_ok_value:
new_nagios_code = max(new_nagios_code, nagios_code)
ReportProblem(nagios_code, counter_name + '=' + str(count))
return new_nagios_code
###############################################################################
###############################################################################
# Check for socket-related errors in dictionary 'deltas' and report a problem
# if the combined socket error count is high enough. Return a nagios code
# equal to max(old_nagios_code, nagios code of reported problem) if a problem
# was reported. Otherwise return 'old_nagios_code'.
###############################################################################
def CheckSocketErrorDeltas(deltas, old_nagios_code):
new_nagios_code = old_nagios_code
counter_names = [ 'ConnectFailOnTryGetMetadata',
'MetadataResponseRead1LostTcpConnection',
'MetadataResponseRead1TimedOut',
'MetadataResponseRead2LostTcpConnection',
'MetadataResponseRead2TimedOut',
'MetadataResponseRead2UnexpectedEnd',
'ReadMetadataResponse2Fail',
'ReceiverSocketBrokerClose',
'ReceiverSocketError',
'ReceiverSocketTimeout',
'SenderConnectFail',
'SenderSocketError',
'SenderSocketTimeout',
'SendMetadataRequestFail',
'SendMetadataRequestLostTcpConnection',
'SendMetadataRequestTimedOut'
]
nonzero_counter_names = []
sum = 0
for name in counter_names:
count = LookupCounter(deltas, name)
if count > 0:
nonzero_counter_names.append(name)
sum += count
print_counters = False
if sum > Opts.SocketErrorCriticalThreshold:
new_nagios_code = max(new_nagios_code, EC_CRITICAL)
print_counters = RunningInManualMode()
ReportProblem(EC_CRITICAL, str(sum) + ' socket errors:')
elif sum > Opts.SocketErrorWarnThreshold:
new_nagios_code = max(new_nagios_code, EC_WARNING)
print_counters = RunningInManualMode()
ReportProblem(EC_WARNING, str(sum) + ' socket errors:')
if print_counters:
for name in nonzero_counter_names:
print ' ' + name + '=' + str(deltas[name])
return new_nagios_code
###############################################################################
###############################################################################
# The keys of input dictionary 'deltas' are counter names, and the values are
# differences between integer counts from the current counter report (obtained
# directly from bruce) and the previous counter report (obtained from a file).
# Check for problems, report any problems found, and return a Nagios code
# representing the maximum severity of any problems found.
###############################################################################
def AnalyzeDeltas(deltas):
# A single instance of any of these is Critical.
nagios_code = CheckDelta(deltas, 'MsgUnprocessedDestroy', 0, EC_CRITICAL,
EC_SUCCESS)
nagios_code = CheckDelta(deltas, 'NoDiscardQuery', 0, EC_CRITICAL,
nagios_code)
nagios_code = CheckDelta(deltas, 'MetadataHasEmptyTopicList', 0,
EC_CRITICAL, nagios_code)
nagios_code = CheckDelta(deltas, 'MetadataHasEmptyBrokerList', 0,
EC_CRITICAL, nagios_code)
nagios_code = CheckDelta(deltas, 'BugDispatchBatchOutOfRangeIndex', 0,
EC_CRITICAL, nagios_code)
nagios_code = CheckDelta(deltas, 'BugDispatchMsgOutOfRangeIndex', 0,
EC_CRITICAL, nagios_code)
nagios_code = CheckDelta(deltas, 'TopicHasNoAvailablePartitions', 0,
EC_CRITICAL, nagios_code)
nagios_code = CheckDelta(deltas, 'InitialGetMetadataFail', 0, EC_CRITICAL,
nagios_code)
nagios_code = CheckDelta(deltas, 'GetMetadataFail', 0, EC_CRITICAL,
nagios_code)
# Any number of instances of any of these is Warning.
nagios_code = CheckDelta(deltas, 'MetadataResponseBadTopicNameLen', 0,
EC_WARNING, nagios_code)
nagios_code = CheckDelta(deltas, 'MetadataResponseBadBrokerHostLen', 0,
EC_WARNING, nagios_code)
nagios_code = CheckDelta(deltas,
'MetadataResponseNegativeCaughtUpReplicaNodeId', 0, EC_WARNING,
nagios_code)
nagios_code = CheckDelta(deltas,
'MetadataResponseNegativePartitionCaughtUpReplicaCount', 0,
EC_WARNING, nagios_code)
nagios_code = CheckDelta(deltas, 'MetadataResponseNegativeReplicaNodeId',
0, EC_WARNING, nagios_code)
nagios_code = CheckDelta(deltas,
'MetadataResponseNegativePartitionReplicaCount', 0, EC_WARNING,
nagios_code)
nagios_code = CheckDelta(deltas, 'MetadataResponseInvalidLeaderNodeId', 0,
EC_WARNING, nagios_code)
nagios_code = CheckDelta(deltas, 'MetadataResponseNegativePartitionId', 0,
EC_WARNING, nagios_code)
nagios_code = CheckDelta(deltas, 'MetadataResponseNegativePartitionCount',
0, EC_WARNING, nagios_code)
nagios_code = CheckDelta(deltas, 'MetadataResponseNegativeTopicCount', 0,
EC_WARNING, nagios_code)
nagios_code = CheckDelta(deltas, 'MetadataResponseBadBrokerPort', 0,
EC_WARNING, nagios_code)
nagios_code = CheckDelta(deltas, 'MetadataResponseNegativeBrokerNodeId', 0,
EC_WARNING, nagios_code)
nagios_code = CheckDelta(deltas, 'MetadataResponseNegativeBrokerCount', 0,
EC_WARNING, nagios_code)
nagios_code = CheckDelta(deltas, 'MetadataResponseHasExtraJunk', 0,
EC_WARNING, nagios_code)
nagios_code = CheckDelta(deltas, 'BadMetadataResponseSize', 0, EC_WARNING,
nagios_code)
nagios_code = CheckDelta(deltas, 'BadMetadataContent', 0, EC_WARNING,
nagios_code)
nagios_code = CheckDelta(deltas, 'BadMetadataResponse', 0, EC_WARNING,
nagios_code)
nagios_code = CheckDelta(deltas, 'ProduceResponseBadPartitionCount', 0,
EC_WARNING, nagios_code)
nagios_code = CheckDelta(deltas, 'ProduceResponseBadTopicNameLength', 0,
EC_WARNING, nagios_code)
nagios_code = CheckDelta(deltas, 'ProduceResponseBadTopicCount', 0,
EC_WARNING, nagios_code)
nagios_code = CheckDelta(deltas, 'BadKafkaResponseSize', 0, EC_WARNING,
nagios_code)
nagios_code = CheckDelta(deltas, 'BugGetAckWaitQueueOutOfRangeIndex', 0,
EC_WARNING, nagios_code)
nagios_code = CheckDelta(deltas, 'ProduceResponseUnexpectedTopic', 0,
EC_WARNING, nagios_code)
nagios_code = CheckDelta(deltas, 'ProduceResponseUnexpectedPartition', 0,
EC_WARNING, nagios_code)
nagios_code = CheckDelta(deltas, 'ProduceResponseShortPartitionList', 0,
EC_WARNING, nagios_code)
nagios_code = CheckDelta(deltas, 'ProduceResponseShortTopicList', 0,
EC_WARNING, nagios_code)
nagios_code = CheckDelta(deltas, 'CorrelationIdMismatch', 0, EC_WARNING,
nagios_code)
nagios_code = CheckDelta(deltas, 'BadProduceResponseSize', 0, EC_WARNING,
nagios_code)
nagios_code = CheckDelta(deltas, 'BadProduceResponse', 0, EC_WARNING,
nagios_code)
nagios_code = CheckDelta(deltas, 'BugAllTopicsEmpty', 0, EC_WARNING,
nagios_code)
nagios_code = CheckDelta(deltas, 'BugMsgListMultipleTopics', 0, EC_WARNING,
nagios_code)
nagios_code = CheckDelta(deltas, 'BugMsgSetEmpty', 0, EC_WARNING,
nagios_code)
nagios_code = CheckDelta(deltas, 'BugMultiPartitionGroupEmpty', 0,
EC_WARNING, nagios_code)
nagios_code = CheckDelta(deltas, 'BugProduceRequestEmpty', 0, EC_WARNING,
nagios_code)
nagios_code = CheckDelta(deltas, 'MsgSetCompressionError', 0, EC_WARNING,
nagios_code)
# Check counters indicating socket-related errors. These are summed
# together and compared against thresholds for Warning and Critical.
nagios_code = CheckSocketErrorDeltas(deltas, nagios_code)
# An instance of any of these indicates that something is wrong inside
# bruce's HTTP status monitoring mechanism, which means that the integrity
# of the information received by the monitoring scripts may be compromised.
nagios_code = CheckDelta(deltas, 'MongooseUrlDecodeError', 1, EC_UNKNOWN,
nagios_code)
nagios_code = CheckDelta(deltas, 'MongooseStdException', 1, EC_UNKNOWN,
nagios_code)
nagios_code = CheckDelta(deltas, 'MongooseUnknownException', 1,
EC_UNKNOWN, nagios_code)
return nagios_code
###############################################################################
###############################################################################
# Compare the 'MsgCreate' and 'MsgDestroy' counter values from input list
# 'counters' of counters from counter report. Report a problem if one is found
# and return the appropriate Nagios code.
###############################################################################
def CheckOutstandingMsgCount(counters):
counter_dict = CounterListToDict(counters)
msg_create_count = LookupCounter(counter_dict, 'MsgCreate')
msg_destroy_count = LookupCounter(counter_dict, 'MsgDestroy')
if msg_destroy_count > msg_create_count:
ReportProblem(EC_CRITICAL, 'MsgDestroy counter value ' +
str(msg_destroy_count) +
' is greater than MsgCreate counter value ' +
str(msg_create_count))
return EC_CRITICAL
msg_count = msg_create_count - msg_destroy_count
if msg_count > Opts.MsgCountCriticalThreshold:
ReportProblem(EC_CRITICAL, 'MsgCreate - MsgDestroy is ' +
str(msg_count))
return EC_CRITICAL
if msg_count > Opts.MsgCountWarnThreshold:
ReportProblem(EC_WARNING, 'MsgCreate - MsgDestroy is ' +
str(msg_count))
return EC_WARNING
return EC_SUCCESS
###############################################################################
###############################################################################
# Compare counter info lists 'current_counters' and 'old_counters' and report
# a problem if counter 'MetadataUpdated' has not increased. Return the
# appropriate Nagios code.
###############################################################################
def CheckMetadataUpdates(current_counters, old_counters):
old_counter_dict = CounterListToDict(old_counters)
new_counter_dict = CounterListToDict(current_counters)
counter_name = 'GetMetadataSuccess'
old_count = LookupCounter(old_counter_dict, counter_name)
new_count = LookupCounter(new_counter_dict, counter_name)
if new_count > old_count:
return EC_SUCCESS
ReportProblem(EC_WARNING, counter_name + ' not increasing: old value ' +
str(old_count) + ' new value ' + str(new_count))
return EC_CRITICAL
###############################################################################
###############################################################################
# Delete any counter files whose ages exceed the configured history period.
###############################################################################
def DeleteOldCounterFiles(work_path, now, old_counter_file_times):
# A value of 0 means "don't delete anything".
if Opts.History == 0:
return
for epoch_seconds in old_counter_file_times:
# Opts.History specifies a value in minutes.
if (epoch_seconds > now) or \
((now - epoch_seconds) <= (60 * Opts.History)):
break
path = work_path + '/' + str(epoch_seconds)
try:
os.remove(path)
except OSError as e:
Die(EC_UNKNOWN, 'Failed to delete counter file ' + path + ': ' +
e.strerror)
except IOError as e:
Die(EC_UNKNOWN, 'Failed to delete counter file ' + path + ': ' +
e.strerror)
###############################################################################
###############################################################################
# Class for storing program options
#
# members:
# Manual: If we are not running in manual mode, this is -1. Otherwise,
# it gives a number of seconds since the epoch specifying a counter
# file to be manually analyzed.
# WorkDir: A subdirectory beneath the Nagios user's home directory where
# the script maintains all of its counter data.
# Interval: The minimum number of seconds ago we will accept when looking
# for a recent counter file to compare our metadata update count
# against.
# History: The number of minutes of history to preserve when deleting old
# counter files.
# NagiosServer: If not running in manual mode, this is a unique identifier
# for the Nagios server that triggered the current execution of this
# script. If running in manual mode, this is a unique identifier for
# the Nagios server that triggered the script execution that caused
# creation of the counter file we are analyzing.
# SocketErrorWarnThreshold: If the number of socket errors indicated by the
# counter output exceeds this value, it is treated as Warning.
# SocketErrorCriticalThreshold: If the number of socket errors indicated by
# the counter output exceeds this value, it is treated as Critical.
# MsgCountWarnThreshold: If the number of outstanding messages indicated by
# the counter output exceeds this value, it is treated as Warning.
# MsgCountCriticalThreshold: If the number of outstanding messages
# indicated by the counter output exceeds this value, it is treated as
# Critical.
# BruceHost: The host running bruce that we should connect to for counter
# data.
# BruceStatusPort: The port to connect to when asking bruce for counter
# data.
# NagiosUser: The name of the nagios user.
###############################################################################
class TProgramOptions(object):
'program options class'
def __init__(self, manual, work_dir, interval, history, nagios_server,
socket_error_warn_threshold,
socket_error_critical_threshold, msg_count_warn_threshold,
msg_count_critical_threshold, bruce_host, bruce_status_port,
nagios_user):
self.Manual = manual
self.WorkDir = work_dir
self.Interval = interval
self.History = history
self.NagiosServer = nagios_server
self.SocketErrorWarnThreshold = socket_error_warn_threshold
self.SocketErrorCriticalThreshold = socket_error_critical_threshold
self.MsgCountWarnThreshold = msg_count_warn_threshold
self.MsgCountCriticalThreshold = msg_count_critical_threshold
self.BruceHost = bruce_host
self.BruceStatusPort = bruce_status_port
self.NagiosUser = nagios_user
###############################################################################
###############################################################################
# Parse command line arguments provided by input list parameter 'args' and
# return a corresponding TProgramOptions object on success. If there is a
# problem with the arguments, die with an error message.
###############################################################################
def ParseArgs(args):
try:
opts, args = getopt.getopt(sys.argv[1:], 'm:d:i:H:s:w:c:W:C:b:p:u:',
['manual', 'work_dir=', 'interval=', 'history=',
'nagios_server=', 'socket_error_warn_threshold=',
'socket_error_critical_threshold=',
'msg_count_warn_threshold=', 'msg_count_critical_threshold=',
'bruce_host=', 'bruce_status_port=', 'nagios_user='])
except getopt.GetoptError as e:
Die(EC_UNKNOWN, str(e))
opt_manual = -1
opt_work_dir = ''
opt_interval = -1
opt_history = -1
opt_nagios_server = ''
opt_socket_error_warn_threshold = -1
opt_socket_error_critical_threshold = -1
opt_msg_count_warn_threshold = -1
opt_msg_count_critical_threshold = -1
opt_bruce_host = ''
opt_bruce_status_port = 9090
opt_nagios_user = 'nrpe'
for o, a in opts:
if o in ('-m', '--manual'):
try:
opt_manual = int(a)
except ValueError:
Die(EC_UNKNOWN, 'The ' + o + ' option requires an integer')
if opt_manual < 0:
Die(EC_UNKNOWN,
'The ' + o + ' option requires a nonnegative integer')
elif o in ('-d', '--work_dir'):
opt_work_dir = a
elif o in ('-i', '--interval'):
try:
opt_interval = int(a)
except ValueError:
Die(EC_UNKNOWN, 'The ' + o + ' option requires an integer')
if opt_interval < 1:
Die(EC_UNKNOWN,
'The ' + o + ' option requires a positive integer')
elif o in ('-H', '--history'):
try:
opt_history = int(a)
except ValueError:
Die(EC_UNKNOWN, 'The ' + o + ' option requires an integer')
if opt_history < 0:
Die(EC_UNKNOWN,
'The ' + o + ' option requires a nonnegative integer')
elif o in ('-s', '--nagios_server'):
opt_nagios_server = a
elif o in ('-w', '--socket_error_warn_threshold'):
try:
opt_socket_error_warn_threshold = int(a)
except ValueError:
Die(EC_UNKNOWN, 'The ' + o + ' option requires an integer')
if opt_socket_error_warn_threshold < 0:
Die(EC_UNKNOWN,
'The ' + o + ' option requires a nonnegative integer')
elif o in ('-c', '--socket_error_critical_threshold'):
try:
opt_socket_error_critical_threshold = int(a)
except ValueError:
Die(EC_UNKNOWN, 'The ' + o + ' option requires an integer')
if opt_socket_error_critical_threshold < 0:
Die(EC_UNKNOWN,
'The ' + o + ' option requires a nonnegative integer')
elif o in ('-W', '--msg_count_warn_threshold'):
try:
opt_msg_count_warn_threshold = int(a)
except ValueError:
Die(EC_UNKNOWN, 'The ' + o + ' option requires an integer')
if opt_msg_count_warn_threshold < 0:
Die(EC_UNKNOWN,
'The ' + o + ' option requires a nonnegative integer')
elif o in ('-C', '--msg_count_critical_threshold'):
try:
opt_msg_count_critical_threshold = int(a)
except ValueError:
Die(EC_UNKNOWN, 'The ' + o + ' option requires an integer')
if opt_msg_count_critical_threshold < 0:
Die(EC_UNKNOWN,
'The ' + o + ' option requires a nonnegative integer')
elif o in ('-b', '--bruce_host'):
opt_bruce_host = a
elif o in ('-p', '--bruce_status_port'):
try:
opt_bruce_status_port = int(a)
except ValueError:
Die(EC_UNKNOWN, 'The ' + o + ' option requires an integer')
if (opt_bruce_status_port < 1) or (opt_bruce_status_port > 65535):
Die(EC_UNKNOWN,
'The ' + o +
' option requires an integer between 1 and 65535')
elif o in ('-u', '--nagios_user'):
opt_nagios_user = a
else:
Die(EC_UNKNOWN, 'Unhandled command line option')
if opt_work_dir == '':
Die(EC_UNKNOWN, '-d or --work_dir option must be specified')
if opt_interval == -1:
Die(EC_UNKNOWN, '-i or --interval option must be specified')
if opt_history == -1:
if opt_manual == -1:
Die(EC_UNKNOWN, '-H or --history option must be specified')
else:
opt_history = 0
if opt_nagios_server == '':
Die(EC_UNKNOWN, '-s or --nagios_server option must be specified')
if opt_socket_error_warn_threshold == -1:
Die(EC_UNKNOWN, '-w or --socket_error_warn_threshold option must be '
'specified')
if opt_socket_error_critical_threshold == -1:
Die(EC_UNKNOWN, '-c or --socket_error_critical_threshold option '
'must be specified')
if opt_msg_count_warn_threshold == -1:
Die(EC_UNKNOWN, '-W or --msg_count_warn_threshold option must be '
'specified')
if opt_msg_count_critical_threshold == -1:
Die(EC_UNKNOWN, '-C or --msg_count_critical_threshold option must '
'be specified')
if (opt_bruce_host == '') and (opt_manual == -1):
Die(EC_UNKNOWN, '-b or --bruce_host option must be specified')
return TProgramOptions(opt_manual, opt_work_dir, opt_interval,
opt_history, opt_nagios_server,
opt_socket_error_warn_threshold,
opt_socket_error_critical_threshold,
opt_msg_count_warn_threshold,
opt_msg_count_critical_threshold, opt_bruce_host,
opt_bruce_status_port, opt_nagios_user)
###############################################################################
###############################################################################
# main program
###############################################################################
def main():
# 'Opts' is a global variable containing a TProgramOptions object with all
# program options.
global Opts
Opts = ParseArgs(sys.argv[1:])
work_path = GetNagiosDir() + '/' + Opts.WorkDir + '/' + Opts.NagiosServer
MakeDirExist(work_path)
if RunningInManualMode():
now = Opts.Manual
counter_report = ReadCounterFile(work_path + '/' + str(now))
else:
counter_report = GetCounters('http://' + Opts.BruceHost + ':' + \
str(Opts.BruceStatusPort) + '/counters/json')
now = counter_report['now']
CreateCounterFile(work_path + '/' + str(now), counter_report)
old_counter_file_times = FindOldCounterFileTimes(work_path, now)
last_report = GetLastCounterReport(counter_report, work_path,
old_counter_file_times)
if last_report == None:
last_counters = None
else:
last_counters = last_report['counters']
deltas = ComputeCounterDeltas(last_counters, counter_report['counters'])
nagios_code = AnalyzeDeltas(deltas)
nagios_code = max(nagios_code,
CheckOutstandingMsgCount(counter_report['counters']))
older_report = GetOldCounterReport(counter_report, work_path,
old_counter_file_times, Opts.Interval)
if older_report != None:
nagios_code = max(nagios_code,
CheckMetadataUpdates(counter_report['counters'],
older_report['counters']))
# Nagios expects some sort of output even in the case of a successful
# result.
if nagios_code == EC_SUCCESS:
print "Ok"
# Manual mode is used by a human being to view the details of problems
# previously reported by this script while being executed by Nagios. In
# this case, deleting old counter files is not a desired behavior.
if RunningInManualMode():
print 'Nagios code: ' + NagiosCodeToString(nagios_code)
else:
DeleteOldCounterFiles(work_path, now, old_counter_file_times)
sys.exit(nagios_code)
###############################################################################
try:
main()
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
# Write stack trace to standard output, since that's where Nagios expects
# error output to go.
for elem in lines:
print elem
sys.exit(EC_UNKNOWN)
| 43.485714
| 79
| 0.554841
|
c4ddb27a8c37fd130e256b1924bf871fb31577da
| 81,438
|
py
|
Python
|
monai/transforms/intensity/array.py
|
ETS-Research-Repositories/MONAI
|
ec479d5b054063de61ef7af0caed021b36325fea
|
[
"Apache-2.0"
] | null | null | null |
monai/transforms/intensity/array.py
|
ETS-Research-Repositories/MONAI
|
ec479d5b054063de61ef7af0caed021b36325fea
|
[
"Apache-2.0"
] | null | null | null |
monai/transforms/intensity/array.py
|
ETS-Research-Repositories/MONAI
|
ec479d5b054063de61ef7af0caed021b36325fea
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A collection of "vanilla" transforms for intensity adjustment
https://github.com/Project-MONAI/MONAI/wiki/MONAI_Design
"""
from abc import abstractmethod
from collections.abc import Iterable
from functools import partial
from typing import Any, Callable, List, Optional, Sequence, Tuple, Union
from warnings import warn
import numpy as np
import torch
from monai.config import DtypeLike
from monai.config.type_definitions import NdarrayOrTensor, NdarrayTensor
from monai.data.utils import get_random_patch, get_valid_patch_size
from monai.networks.layers import GaussianFilter, HilbertTransform, SavitzkyGolayFilter
from monai.transforms.transform import RandomizableTransform, Transform
from monai.transforms.utils import Fourier, equalize_hist, is_positive, rescale_array
from monai.transforms.utils_pytorch_numpy_unification import clip, percentile, where
from monai.utils import (
PT_BEFORE_1_7,
InvalidPyTorchVersionError,
convert_data_type,
convert_to_dst_type,
ensure_tuple,
ensure_tuple_rep,
ensure_tuple_size,
fall_back_tuple,
)
from monai.utils.deprecate_utils import deprecated_arg
from monai.utils.enums import TransformBackends
from monai.utils.type_conversion import convert_to_tensor, get_equivalent_dtype
__all__ = [
"RandGaussianNoise",
"RandRicianNoise",
"ShiftIntensity",
"RandShiftIntensity",
"StdShiftIntensity",
"RandStdShiftIntensity",
"RandBiasField",
"ScaleIntensity",
"RandScaleIntensity",
"NormalizeIntensity",
"ThresholdIntensity",
"ScaleIntensityRange",
"AdjustContrast",
"RandAdjustContrast",
"ScaleIntensityRangePercentiles",
"MaskIntensity",
"DetectEnvelope",
"SavitzkyGolaySmooth",
"GaussianSmooth",
"RandGaussianSmooth",
"GaussianSharpen",
"RandGaussianSharpen",
"RandHistogramShift",
"GibbsNoise",
"RandGibbsNoise",
"KSpaceSpikeNoise",
"RandKSpaceSpikeNoise",
"RandCoarseTransform",
"RandCoarseDropout",
"RandCoarseShuffle",
"HistogramNormalize",
]
class RandGaussianNoise(RandomizableTransform):
"""
Add Gaussian noise to image.
Args:
prob: Probability to add Gaussian noise.
mean: Mean or “centre” of the distribution.
std: Standard deviation (spread) of distribution.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(self, prob: float = 0.1, mean: float = 0.0, std: float = 0.1) -> None:
RandomizableTransform.__init__(self, prob)
self.mean = mean
self.std = std
self.noise: Optional[np.ndarray] = None
def randomize(self, img: NdarrayOrTensor, mean: Optional[float] = None) -> None:
super().randomize(None)
if not self._do_transform:
return None
rand_std = self.R.uniform(0, self.std)
self.noise = self.R.normal(self.mean if mean is None else mean, rand_std, size=img.shape)
def __call__(self, img: NdarrayOrTensor, mean: Optional[float] = None, randomize: bool = True) -> NdarrayOrTensor:
"""
Apply the transform to `img`.
"""
if randomize:
self.randomize(img=img, mean=self.mean if mean is None else mean)
if not self._do_transform:
return img
if self.noise is None:
raise RuntimeError("please call the `randomize()` function first.")
noise, *_ = convert_to_dst_type(self.noise, img)
return img + noise
class RandRicianNoise(RandomizableTransform):
"""
Add Rician noise to image.
Rician noise in MRI is the result of performing a magnitude operation on complex
data with Gaussian noise of the same variance in both channels, as described in `Noise in Magnitude
Magnetic Resonance Images <https://doi.org/10.1002/cmr.a.20124>`_. This transform is adapted from
`DIPY<https://github.com/dipy/dipy>`_. See also: `The rician distribution of noisy mri data
<https://doi.org/10.1002/mrm.1910340618>`_.
Args:
prob: Probability to add Rician noise.
mean: Mean or "centre" of the Gaussian distributions sampled to make up
the Rician noise.
std: Standard deviation (spread) of the Gaussian distributions sampled
to make up the Rician noise.
channel_wise: If True, treats each channel of the image separately.
relative: If True, the spread of the sampled Gaussian distributions will
be std times the standard deviation of the image or channel's intensity
histogram.
sample_std: If True, sample the spread of the Gaussian distributions
uniformly from 0 to std.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(
self,
prob: float = 0.1,
mean: Union[Sequence[float], float] = 0.0,
std: Union[Sequence[float], float] = 1.0,
channel_wise: bool = False,
relative: bool = False,
sample_std: bool = True,
) -> None:
RandomizableTransform.__init__(self, prob)
self.prob = prob
self.mean = mean
self.std = std
self.channel_wise = channel_wise
self.relative = relative
self.sample_std = sample_std
self._noise1: NdarrayOrTensor
self._noise2: NdarrayOrTensor
def _add_noise(self, img: NdarrayTensor, mean: float, std: float):
dtype_np = get_equivalent_dtype(img.dtype, np.ndarray)
im_shape = img.shape
_std = self.R.uniform(0, std) if self.sample_std else std
self._noise1 = self.R.normal(mean, _std, size=im_shape).astype(dtype_np)
self._noise2 = self.R.normal(mean, _std, size=im_shape).astype(dtype_np)
if isinstance(img, torch.Tensor):
n1 = torch.tensor(self._noise1, device=img.device)
n2 = torch.tensor(self._noise2, device=img.device)
return torch.sqrt((img + n1) ** 2 + n2 ** 2)
return np.sqrt((img + self._noise1) ** 2 + self._noise2 ** 2)
def __call__(self, img: NdarrayTensor, randomize: bool = True) -> NdarrayTensor:
"""
Apply the transform to `img`.
"""
if randomize:
super().randomize(None)
if not self._do_transform:
return img
if self.channel_wise:
_mean = ensure_tuple_rep(self.mean, len(img))
_std = ensure_tuple_rep(self.std, len(img))
for i, d in enumerate(img):
img[i] = self._add_noise(d, mean=_mean[i], std=_std[i] * d.std() if self.relative else _std[i])
else:
if not isinstance(self.mean, (int, float)):
raise RuntimeError("If channel_wise is False, mean must be a float or int number.")
if not isinstance(self.std, (int, float)):
raise RuntimeError("If channel_wise is False, std must be a float or int number.")
std = self.std * img.std() if self.relative else self.std
if not isinstance(std, (int, float)):
raise RuntimeError("std must be a float or int number.")
img = self._add_noise(img, mean=self.mean, std=std)
return img
class ShiftIntensity(Transform):
"""
Shift intensity uniformly for the entire image with specified `offset`.
Args:
offset: offset value to shift the intensity of image.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(self, offset: float) -> None:
self.offset = offset
def __call__(self, img: NdarrayOrTensor, offset: Optional[float] = None) -> NdarrayOrTensor:
"""
Apply the transform to `img`.
"""
offset = self.offset if offset is None else offset
out = img + offset
out, *_ = convert_data_type(data=out, dtype=img.dtype)
return out
class RandShiftIntensity(RandomizableTransform):
"""
Randomly shift intensity with randomly picked offset.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(self, offsets: Union[Tuple[float, float], float], prob: float = 0.1) -> None:
"""
Args:
offsets: offset range to randomly shift.
if single number, offset value is picked from (-offsets, offsets).
prob: probability of shift.
"""
RandomizableTransform.__init__(self, prob)
if isinstance(offsets, (int, float)):
self.offsets = (min(-offsets, offsets), max(-offsets, offsets))
elif len(offsets) != 2:
raise ValueError("offsets should be a number or pair of numbers.")
else:
self.offsets = (min(offsets), max(offsets))
self._offset = self.offsets[0]
self._shfiter = ShiftIntensity(self._offset)
def randomize(self, data: Optional[Any] = None) -> None:
super().randomize(None)
if not self._do_transform:
return None
self._offset = self.R.uniform(low=self.offsets[0], high=self.offsets[1])
def __call__(self, img: NdarrayOrTensor, factor: Optional[float] = None, randomize: bool = True) -> NdarrayOrTensor:
"""
Apply the transform to `img`.
Args:
img: input image to shift intensity.
factor: a factor to multiply the random offset, then shift.
can be some image specific value at runtime, like: max(img), etc.
"""
if randomize:
self.randomize()
if not self._do_transform:
return img
return self._shfiter(img, self._offset if factor is None else self._offset * factor)
class StdShiftIntensity(Transform):
"""
Shift intensity for the image with a factor and the standard deviation of the image
by: ``v = v + factor * std(v)``.
This transform can focus on only non-zero values or the entire image,
and can also calculate the std on each channel separately.
Args:
factor: factor shift by ``v = v + factor * std(v)``.
nonzero: whether only count non-zero values.
channel_wise: if True, calculate on each channel separately. Please ensure
that the first dimension represents the channel of the image if True.
dtype: output data type, defaults to float32.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(
self, factor: float, nonzero: bool = False, channel_wise: bool = False, dtype: DtypeLike = np.float32
) -> None:
self.factor = factor
self.nonzero = nonzero
self.channel_wise = channel_wise
self.dtype = dtype
def _stdshift(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
ones: Callable
std: Callable
if isinstance(img, torch.Tensor):
ones = torch.ones
std = partial(torch.std, unbiased=False)
else:
ones = np.ones
std = np.std
slices = (img != 0) if self.nonzero else ones(img.shape, dtype=bool)
if slices.any():
offset = self.factor * std(img[slices])
img[slices] = img[slices] + offset
return img
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
"""
Apply the transform to `img`.
"""
img, *_ = convert_data_type(img, dtype=self.dtype)
if self.channel_wise:
for i, d in enumerate(img):
img[i] = self._stdshift(d) # type: ignore
else:
img = self._stdshift(img)
return img
class RandStdShiftIntensity(RandomizableTransform):
"""
Shift intensity for the image with a factor and the standard deviation of the image
by: ``v = v + factor * std(v)`` where the `factor` is randomly picked.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(
self,
factors: Union[Tuple[float, float], float],
prob: float = 0.1,
nonzero: bool = False,
channel_wise: bool = False,
dtype: DtypeLike = np.float32,
) -> None:
"""
Args:
factors: if tuple, the randomly picked range is (min(factors), max(factors)).
If single number, the range is (-factors, factors).
prob: probability of std shift.
nonzero: whether only count non-zero values.
channel_wise: if True, calculate on each channel separately.
dtype: output data type, defaults to float32.
"""
RandomizableTransform.__init__(self, prob)
if isinstance(factors, (int, float)):
self.factors = (min(-factors, factors), max(-factors, factors))
elif len(factors) != 2:
raise ValueError("factors should be a number or pair of numbers.")
else:
self.factors = (min(factors), max(factors))
self.factor = self.factors[0]
self.nonzero = nonzero
self.channel_wise = channel_wise
self.dtype = dtype
def randomize(self, data: Optional[Any] = None) -> None:
super().randomize(None)
if not self._do_transform:
return None
self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1])
def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor:
"""
Apply the transform to `img`.
"""
if randomize:
self.randomize()
if not self._do_transform:
return img
shifter = StdShiftIntensity(
factor=self.factor, nonzero=self.nonzero, channel_wise=self.channel_wise, dtype=self.dtype
)
return shifter(img=img)
class ScaleIntensity(Transform):
"""
Scale the intensity of input image to the given value range (minv, maxv).
If `minv` and `maxv` not provided, use `factor` to scale image by ``v = v * (1 + factor)``.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(
self,
minv: Optional[float] = 0.0,
maxv: Optional[float] = 1.0,
factor: Optional[float] = None,
channel_wise: bool = False,
dtype: DtypeLike = np.float32,
) -> None:
"""
Args:
minv: minimum value of output data.
maxv: maximum value of output data.
factor: factor scale by ``v = v * (1 + factor)``. In order to use
this parameter, please set `minv` and `maxv` into None.
channel_wise: if True, scale on each channel separately. Please ensure
that the first dimension represents the channel of the image if True.
dtype: output data type, defaults to float32.
"""
self.minv = minv
self.maxv = maxv
self.factor = factor
self.channel_wise = channel_wise
self.dtype = dtype
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
"""
Apply the transform to `img`.
Raises:
ValueError: When ``self.minv=None`` or ``self.maxv=None`` and ``self.factor=None``. Incompatible values.
"""
if self.minv is not None and self.maxv is not None:
if self.channel_wise:
out = [rescale_array(d, self.minv, self.maxv, dtype=self.dtype) for d in img]
return torch.stack(out) if isinstance(img, torch.Tensor) else np.stack(out) # type: ignore
return rescale_array(img, self.minv, self.maxv, dtype=self.dtype)
if self.factor is not None:
ret = img * (1 + self.factor)
ret, *_ = convert_data_type(ret, dtype=self.dtype)
return ret
raise ValueError("Incompatible values: minv=None or maxv=None and factor=None.")
class RandScaleIntensity(RandomizableTransform):
"""
Randomly scale the intensity of input image by ``v = v * (1 + factor)`` where the `factor`
is randomly picked.
"""
backend = ScaleIntensity.backend
def __init__(
self, factors: Union[Tuple[float, float], float], prob: float = 0.1, dtype: DtypeLike = np.float32
) -> None:
"""
Args:
factors: factor range to randomly scale by ``v = v * (1 + factor)``.
if single number, factor value is picked from (-factors, factors).
prob: probability of scale.
dtype: output data type, defaults to float32.
"""
RandomizableTransform.__init__(self, prob)
if isinstance(factors, (int, float)):
self.factors = (min(-factors, factors), max(-factors, factors))
elif len(factors) != 2:
raise ValueError("factors should be a number or pair of numbers.")
else:
self.factors = (min(factors), max(factors))
self.factor = self.factors[0]
self.dtype = dtype
def randomize(self, data: Optional[Any] = None) -> None:
super().randomize(None)
if not self._do_transform:
return None
self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1])
def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor:
"""
Apply the transform to `img`.
"""
if randomize:
self.randomize()
if not self._do_transform:
return img
return ScaleIntensity(minv=None, maxv=None, factor=self.factor, dtype=self.dtype)(img)
class RandBiasField(RandomizableTransform):
"""
Random bias field augmentation for MR images.
The bias field is considered as a linear combination of smoothly varying basis (polynomial)
functions, as described in `Automated Model-Based Tissue Classification of MR Images of the Brain
<https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=811270>`_.
This implementation adapted from `NiftyNet
<https://github.com/NifTK/NiftyNet>`_.
Referred to `Longitudinal segmentation of age-related white matter hyperintensities
<https://www.sciencedirect.com/science/article/pii/S1361841517300257?via%3Dihub>`_.
Args:
degree: degree of freedom of the polynomials. The value should be no less than 1.
Defaults to 3.
coeff_range: range of the random coefficients. Defaults to (0.0, 0.1).
dtype: output data type, defaults to float32.
prob: probability to do random bias field.
"""
backend = [TransformBackends.NUMPY]
def __init__(
self,
degree: int = 3,
coeff_range: Tuple[float, float] = (0.0, 0.1),
dtype: DtypeLike = np.float32,
prob: float = 0.1,
) -> None:
RandomizableTransform.__init__(self, prob)
if degree < 1:
raise ValueError("degree should be no less than 1.")
self.degree = degree
self.coeff_range = coeff_range
self.dtype = dtype
self._coeff = [1.0]
def _generate_random_field(self, spatial_shape: Sequence[int], degree: int, coeff: Sequence[float]):
"""
products of polynomials as bias field estimations
"""
rank = len(spatial_shape)
coeff_mat = np.zeros((degree + 1,) * rank)
coords = [np.linspace(-1.0, 1.0, dim, dtype=np.float32) for dim in spatial_shape]
if rank == 2:
coeff_mat[np.tril_indices(degree + 1)] = coeff
return np.polynomial.legendre.leggrid2d(coords[0], coords[1], coeff_mat)
if rank == 3:
pts: List[List[int]] = [[0, 0, 0]]
for i in range(degree + 1):
for j in range(degree + 1 - i):
for k in range(degree + 1 - i - j):
pts.append([i, j, k])
if len(pts) > 1:
pts = pts[1:]
np_pts = np.stack(pts)
coeff_mat[np_pts[:, 0], np_pts[:, 1], np_pts[:, 2]] = coeff
return np.polynomial.legendre.leggrid3d(coords[0], coords[1], coords[2], coeff_mat)
raise NotImplementedError("only supports 2D or 3D fields")
def randomize(self, img_size: Sequence[int]) -> None:
super().randomize(None)
if not self._do_transform:
return None
n_coeff = int(np.prod([(self.degree + k) / k for k in range(1, len(img_size) + 1)]))
self._coeff = self.R.uniform(*self.coeff_range, n_coeff).tolist()
def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor:
"""
Apply the transform to `img`.
"""
if randomize:
self.randomize(img_size=img.shape[1:])
if not self._do_transform:
return img
num_channels, *spatial_shape = img.shape
_bias_fields = np.stack(
[
self._generate_random_field(spatial_shape=spatial_shape, degree=self.degree, coeff=self._coeff)
for _ in range(num_channels)
],
axis=0,
)
img_np, *_ = convert_data_type(img, np.ndarray)
out = img_np * np.exp(_bias_fields)
out, *_ = convert_to_dst_type(src=out, dst=img, dtype=self.dtype)
return out
class NormalizeIntensity(Transform):
"""
Normalize input based on provided args, using calculated mean and std if not provided.
This transform can normalize only non-zero values or entire image, and can also calculate
mean and std on each channel separately.
When `channel_wise` is True, the first dimension of `subtrahend` and `divisor` should
be the number of image channels if they are not None.
Args:
subtrahend: the amount to subtract by (usually the mean).
divisor: the amount to divide by (usually the standard deviation).
nonzero: whether only normalize non-zero values.
channel_wise: if using calculated mean and std, calculate on each channel separately
or calculate on the entire image directly.
dtype: output data type, defaults to float32.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(
self,
subtrahend: Union[Sequence, NdarrayOrTensor, None] = None,
divisor: Union[Sequence, NdarrayOrTensor, None] = None,
nonzero: bool = False,
channel_wise: bool = False,
dtype: DtypeLike = np.float32,
) -> None:
self.subtrahend = subtrahend
self.divisor = divisor
self.nonzero = nonzero
self.channel_wise = channel_wise
self.dtype = dtype
@staticmethod
def _mean(x):
if isinstance(x, np.ndarray):
return np.mean(x)
x = torch.mean(x.float())
return x.item() if x.numel() == 1 else x
@staticmethod
def _std(x):
if isinstance(x, np.ndarray):
return np.std(x)
x = torch.std(x.float(), unbiased=False)
return x.item() if x.numel() == 1 else x
def _normalize(self, img: NdarrayOrTensor, sub=None, div=None) -> NdarrayOrTensor:
img, *_ = convert_data_type(img, dtype=torch.float32)
if self.nonzero:
slices = img != 0
else:
if isinstance(img, np.ndarray):
slices = np.ones_like(img, dtype=bool)
else:
slices = torch.ones_like(img, dtype=torch.bool)
if not slices.any():
return img
_sub = sub if sub is not None else self._mean(img[slices])
if isinstance(_sub, (torch.Tensor, np.ndarray)):
_sub, *_ = convert_to_dst_type(_sub, img)
_sub = _sub[slices]
_div = div if div is not None else self._std(img[slices])
if np.isscalar(_div):
if _div == 0.0:
_div = 1.0
elif isinstance(_div, (torch.Tensor, np.ndarray)):
_div, *_ = convert_to_dst_type(_div, img)
_div = _div[slices]
_div[_div == 0.0] = 1.0
img[slices] = (img[slices] - _sub) / _div
return img
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
"""
Apply the transform to `img`, assuming `img` is a channel-first array if `self.channel_wise` is True,
"""
if self.channel_wise:
if self.subtrahend is not None and len(self.subtrahend) != len(img):
raise ValueError(f"img has {len(img)} channels, but subtrahend has {len(self.subtrahend)} components.")
if self.divisor is not None and len(self.divisor) != len(img):
raise ValueError(f"img has {len(img)} channels, but divisor has {len(self.divisor)} components.")
for i, d in enumerate(img):
img[i] = self._normalize( # type: ignore
d,
sub=self.subtrahend[i] if self.subtrahend is not None else None,
div=self.divisor[i] if self.divisor is not None else None,
)
else:
img = self._normalize(img, self.subtrahend, self.divisor)
out, *_ = convert_data_type(img, dtype=self.dtype)
return out
class ThresholdIntensity(Transform):
"""
Filter the intensity values of whole image to below threshold or above threshold.
And fill the remaining parts of the image to the `cval` value.
Args:
threshold: the threshold to filter intensity values.
above: filter values above the threshold or below the threshold, default is True.
cval: value to fill the remaining parts of the image, default is 0.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(self, threshold: float, above: bool = True, cval: float = 0.0) -> None:
if not isinstance(threshold, (int, float)):
raise ValueError("threshold must be a float or int number.")
self.threshold = threshold
self.above = above
self.cval = cval
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
"""
Apply the transform to `img`.
"""
mask = img > self.threshold if self.above else img < self.threshold
res = where(mask, img, self.cval)
res, *_ = convert_data_type(res, dtype=img.dtype)
return res
class ScaleIntensityRange(Transform):
"""
Apply specific intensity scaling to the whole numpy array.
Scaling from [a_min, a_max] to [b_min, b_max] with clip option.
Args:
a_min: intensity original range min.
a_max: intensity original range max.
b_min: intensity target range min.
b_max: intensity target range max.
clip: whether to perform clip after scaling.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(self, a_min: float, a_max: float, b_min: float, b_max: float, clip: bool = False) -> None:
self.a_min = a_min
self.a_max = a_max
self.b_min = b_min
self.b_max = b_max
self.clip = clip
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
"""
Apply the transform to `img`.
"""
if self.a_max - self.a_min == 0.0:
warn("Divide by zero (a_min == a_max)", Warning)
return img - self.a_min + self.b_min
img = (img - self.a_min) / (self.a_max - self.a_min)
img = img * (self.b_max - self.b_min) + self.b_min
if self.clip:
img = clip(img, self.b_min, self.b_max)
return img
class AdjustContrast(Transform):
"""
Changes image intensity by gamma. Each pixel/voxel intensity is updated as::
x = ((x - min) / intensity_range) ^ gamma * intensity_range + min
Args:
gamma: gamma value to adjust the contrast as function.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(self, gamma: float) -> None:
if not isinstance(gamma, (int, float)):
raise ValueError("gamma must be a float or int number.")
self.gamma = gamma
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
"""
Apply the transform to `img`.
"""
epsilon = 1e-7
img_min = img.min()
img_range = img.max() - img_min
ret: NdarrayOrTensor = ((img - img_min) / float(img_range + epsilon)) ** self.gamma * img_range + img_min
return ret
class RandAdjustContrast(RandomizableTransform):
"""
Randomly changes image intensity by gamma. Each pixel/voxel intensity is updated as::
x = ((x - min) / intensity_range) ^ gamma * intensity_range + min
Args:
prob: Probability of adjustment.
gamma: Range of gamma values.
If single number, value is picked from (0.5, gamma), default is (0.5, 4.5).
"""
backend = AdjustContrast.backend
def __init__(self, prob: float = 0.1, gamma: Union[Sequence[float], float] = (0.5, 4.5)) -> None:
RandomizableTransform.__init__(self, prob)
if isinstance(gamma, (int, float)):
if gamma <= 0.5:
raise ValueError(
"if gamma is single number, must greater than 0.5 and value is picked from (0.5, gamma)"
)
self.gamma = (0.5, gamma)
elif len(gamma) != 2:
raise ValueError("gamma should be a number or pair of numbers.")
else:
self.gamma = (min(gamma), max(gamma))
self.gamma_value: Optional[float] = None
def randomize(self, data: Optional[Any] = None) -> None:
super().randomize(None)
if not self._do_transform:
return None
self.gamma_value = self.R.uniform(low=self.gamma[0], high=self.gamma[1])
def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor:
"""
Apply the transform to `img`.
"""
if randomize:
self.randomize()
if not self._do_transform:
return img
if self.gamma_value is None:
raise RuntimeError("gamma_value is not set, please call `randomize` function first.")
return AdjustContrast(self.gamma_value)(img)
class ScaleIntensityRangePercentiles(Transform):
"""
Apply range scaling to a numpy array based on the intensity distribution of the input.
By default this transform will scale from [lower_intensity_percentile, upper_intensity_percentile] to [b_min, b_max], where
{lower,upper}_intensity_percentile are the intensity values at the corresponding percentiles of ``img``.
The ``relative`` parameter can also be set to scale from [lower_intensity_percentile, upper_intensity_percentile] to the
lower and upper percentiles of the output range [b_min, b_max]
For example:
.. code-block:: python
:emphasize-lines: 11, 22
image = np.array(
[[[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5]]])
# Scale from lower and upper image intensity percentiles
# to output range [b_min, b_max]
scaler = ScaleIntensityRangePercentiles(10, 90, 0, 200, False, False)
print(scaler(image))
[[[0., 50., 100., 150., 200.],
[0., 50., 100., 150., 200.],
[0., 50., 100., 150., 200.],
[0., 50., 100., 150., 200.],
[0., 50., 100., 150., 200.],
[0., 50., 100., 150., 200.]]]
# Scale from lower and upper image intensity percentiles
# to lower and upper percentiles of the output range [b_min, b_max]
rel_scaler = ScaleIntensityRangePercentiles(10, 90, 0, 200, False, True)
print(rel_scaler(image))
[[[20., 60., 100., 140., 180.],
[20., 60., 100., 140., 180.],
[20., 60., 100., 140., 180.],
[20., 60., 100., 140., 180.],
[20., 60., 100., 140., 180.],
[20., 60., 100., 140., 180.]]]
Args:
lower: lower intensity percentile.
upper: upper intensity percentile.
b_min: intensity target range min.
b_max: intensity target range max.
clip: whether to perform clip after scaling.
relative: whether to scale to the corresponding percentiles of [b_min, b_max].
"""
backend = ScaleIntensityRange.backend
def __init__(
self, lower: float, upper: float, b_min: float, b_max: float, clip: bool = False, relative: bool = False
) -> None:
if lower < 0.0 or lower > 100.0:
raise ValueError("Percentiles must be in the range [0, 100]")
if upper < 0.0 or upper > 100.0:
raise ValueError("Percentiles must be in the range [0, 100]")
self.lower = lower
self.upper = upper
self.b_min = b_min
self.b_max = b_max
self.clip = clip
self.relative = relative
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
"""
Apply the transform to `img`.
"""
a_min: float = percentile(img, self.lower) # type: ignore
a_max: float = percentile(img, self.upper) # type: ignore
b_min = self.b_min
b_max = self.b_max
if self.relative:
b_min = ((self.b_max - self.b_min) * (self.lower / 100.0)) + self.b_min
b_max = ((self.b_max - self.b_min) * (self.upper / 100.0)) + self.b_min
scalar = ScaleIntensityRange(a_min=a_min, a_max=a_max, b_min=b_min, b_max=b_max, clip=False)
img = scalar(img)
if self.clip:
img = clip(img, self.b_min, self.b_max)
return img
class MaskIntensity(Transform):
"""
Mask the intensity values of input image with the specified mask data.
Mask data must have the same spatial size as the input image, and all
the intensity values of input image corresponding to the selected values
in the mask data will keep the original value, others will be set to `0`.
Args:
mask_data: if `mask_data` is single channel, apply to every channel
of input image. if multiple channels, the number of channels must
match the input data. the intensity values of input image corresponding
to the selected values in the mask data will keep the original value,
others will be set to `0`. if None, must specify the `mask_data` at runtime.
select_fn: function to select valid values of the `mask_data`, default is
to select `values > 0`.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(self, mask_data: Optional[NdarrayOrTensor] = None, select_fn: Callable = is_positive) -> None:
self.mask_data = mask_data
self.select_fn = select_fn
def __call__(self, img: NdarrayOrTensor, mask_data: Optional[NdarrayOrTensor] = None) -> NdarrayOrTensor:
"""
Args:
mask_data: if mask data is single channel, apply to every channel
of input image. if multiple channels, the channel number must
match input data. mask_data will be converted to `bool` values
by `mask_data > 0` before applying transform to input image.
Raises:
- ValueError: When both ``mask_data`` and ``self.mask_data`` are None.
- ValueError: When ``mask_data`` and ``img`` channels differ and ``mask_data`` is not single channel.
"""
mask_data = self.mask_data if mask_data is None else mask_data
if mask_data is None:
raise ValueError("must provide the mask_data when initializing the transform or at runtime.")
mask_data_, *_ = convert_to_dst_type(src=mask_data, dst=img)
mask_data_ = self.select_fn(mask_data_)
if mask_data_.shape[0] != 1 and mask_data_.shape[0] != img.shape[0]:
raise ValueError(
"When mask_data is not single channel, mask_data channels must match img, "
f"got img channels={img.shape[0]} mask_data channels={mask_data_.shape[0]}."
)
return img * mask_data_
class SavitzkyGolaySmooth(Transform):
"""
Smooth the input data along the given axis using a Savitzky-Golay filter.
Args:
window_length: Length of the filter window, must be a positive odd integer.
order: Order of the polynomial to fit to each window, must be less than ``window_length``.
axis: Optional axis along which to apply the filter kernel. Default 1 (first spatial dimension).
mode: Optional padding mode, passed to convolution class. ``'zeros'``, ``'reflect'``, ``'replicate'``
or ``'circular'``. Default: ``'zeros'``. See ``torch.nn.Conv1d()`` for more information.
"""
backend = [TransformBackends.TORCH]
def __init__(self, window_length: int, order: int, axis: int = 1, mode: str = "zeros"):
if axis < 0:
raise ValueError("axis must be zero or positive.")
self.window_length = window_length
self.order = order
self.axis = axis
self.mode = mode
self.img_t: torch.Tensor = torch.tensor(0.0)
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
"""
Args:
img: array containing input data. Must be real and in shape [channels, spatial1, spatial2, ...].
Returns:
array containing smoothed result.
"""
self.img_t = convert_to_tensor(img)
# add one to transform axis because a batch axis will be added at dimension 0
savgol_filter = SavitzkyGolayFilter(self.window_length, self.order, self.axis + 1, self.mode)
# convert to Tensor and add Batch axis expected by HilbertTransform
smoothed = savgol_filter(self.img_t.unsqueeze(0)).squeeze(0)
out, *_ = convert_to_dst_type(smoothed, dst=img)
return out
class DetectEnvelope(Transform):
"""
Find the envelope of the input data along the requested axis using a Hilbert transform.
Requires PyTorch 1.7.0+ and the PyTorch FFT module (which is not included in NVIDIA PyTorch Release 20.10).
Args:
axis: Axis along which to detect the envelope. Default 1, i.e. the first spatial dimension.
N: FFT size. Default img.shape[axis]. Input will be zero-padded or truncated to this size along dimension
``axis``.
"""
backend = [TransformBackends.TORCH]
def __init__(self, axis: int = 1, n: Union[int, None] = None) -> None:
if PT_BEFORE_1_7:
raise InvalidPyTorchVersionError("1.7.0", self.__class__.__name__)
if axis < 0:
raise ValueError("axis must be zero or positive.")
self.axis = axis
self.n = n
def __call__(self, img: NdarrayOrTensor):
"""
Args:
img: numpy.ndarray containing input data. Must be real and in shape [channels, spatial1, spatial2, ...].
Returns:
np.ndarray containing envelope of data in img along the specified axis.
"""
img_t: torch.Tensor
img_t, *_ = convert_data_type(img, torch.Tensor) # type: ignore
# add one to transform axis because a batch axis will be added at dimension 0
hilbert_transform = HilbertTransform(self.axis + 1, self.n)
# convert to Tensor and add Batch axis expected by HilbertTransform
out = hilbert_transform(img_t.unsqueeze(0)).squeeze(0).abs()
out, *_ = convert_to_dst_type(src=out, dst=img)
return out
class GaussianSmooth(Transform):
"""
Apply Gaussian smooth to the input data based on specified `sigma` parameter.
A default value `sigma=1.0` is provided for reference.
Args:
sigma: if a list of values, must match the count of spatial dimensions of input data,
and apply every value in the list to 1 spatial dimension. if only 1 value provided,
use it for all spatial dimensions.
approx: discrete Gaussian kernel type, available options are "erf", "sampled", and "scalespace".
see also :py:meth:`monai.networks.layers.GaussianFilter`.
"""
backend = [TransformBackends.TORCH]
def __init__(self, sigma: Union[Sequence[float], float] = 1.0, approx: str = "erf") -> None:
self.sigma = sigma
self.approx = approx
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
img_t: torch.Tensor
img_t, *_ = convert_data_type(img, torch.Tensor, dtype=torch.float) # type: ignore
sigma: Union[Sequence[torch.Tensor], torch.Tensor]
if isinstance(self.sigma, Sequence):
sigma = [torch.as_tensor(s, device=img_t.device) for s in self.sigma]
else:
sigma = torch.as_tensor(self.sigma, device=img_t.device)
gaussian_filter = GaussianFilter(img_t.ndim - 1, sigma, approx=self.approx)
out_t: torch.Tensor = gaussian_filter(img_t.unsqueeze(0)).squeeze(0)
out, *_ = convert_data_type(out_t, type(img), device=img.device if isinstance(img, torch.Tensor) else None)
return out
class RandGaussianSmooth(RandomizableTransform):
"""
Apply Gaussian smooth to the input data based on randomly selected `sigma` parameters.
Args:
sigma_x: randomly select sigma value for the first spatial dimension.
sigma_y: randomly select sigma value for the second spatial dimension if have.
sigma_z: randomly select sigma value for the third spatial dimension if have.
prob: probability of Gaussian smooth.
approx: discrete Gaussian kernel type, available options are "erf", "sampled", and "scalespace".
see also :py:meth:`monai.networks.layers.GaussianFilter`.
"""
backend = GaussianSmooth.backend
def __init__(
self,
sigma_x: Tuple[float, float] = (0.25, 1.5),
sigma_y: Tuple[float, float] = (0.25, 1.5),
sigma_z: Tuple[float, float] = (0.25, 1.5),
prob: float = 0.1,
approx: str = "erf",
) -> None:
RandomizableTransform.__init__(self, prob)
self.sigma_x = sigma_x
self.sigma_y = sigma_y
self.sigma_z = sigma_z
self.approx = approx
self.x = self.sigma_x[0]
self.y = self.sigma_y[0]
self.z = self.sigma_z[0]
def randomize(self, data: Optional[Any] = None) -> None:
super().randomize(None)
if not self._do_transform:
return None
self.x = self.R.uniform(low=self.sigma_x[0], high=self.sigma_x[1])
self.y = self.R.uniform(low=self.sigma_y[0], high=self.sigma_y[1])
self.z = self.R.uniform(low=self.sigma_z[0], high=self.sigma_z[1])
def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor:
if randomize:
self.randomize()
if not self._do_transform:
return img
sigma = ensure_tuple_size(tup=(self.x, self.y, self.z), dim=img.ndim - 1)
return GaussianSmooth(sigma=sigma, approx=self.approx)(img)
class GaussianSharpen(Transform):
"""
Sharpen images using the Gaussian Blur filter.
Referring to: http://scipy-lectures.org/advanced/image_processing/auto_examples/plot_sharpen.html.
The algorithm is shown as below
.. code-block:: python
blurred_f = gaussian_filter(img, sigma1)
filter_blurred_f = gaussian_filter(blurred_f, sigma2)
img = blurred_f + alpha * (blurred_f - filter_blurred_f)
A set of default values `sigma1=3.0`, `sigma2=1.0` and `alpha=30.0` is provide for reference.
Args:
sigma1: sigma parameter for the first gaussian kernel. if a list of values, must match the count
of spatial dimensions of input data, and apply every value in the list to 1 spatial dimension.
if only 1 value provided, use it for all spatial dimensions.
sigma2: sigma parameter for the second gaussian kernel. if a list of values, must match the count
of spatial dimensions of input data, and apply every value in the list to 1 spatial dimension.
if only 1 value provided, use it for all spatial dimensions.
alpha: weight parameter to compute the final result.
approx: discrete Gaussian kernel type, available options are "erf", "sampled", and "scalespace".
see also :py:meth:`monai.networks.layers.GaussianFilter`.
"""
backend = [TransformBackends.TORCH]
def __init__(
self,
sigma1: Union[Sequence[float], float] = 3.0,
sigma2: Union[Sequence[float], float] = 1.0,
alpha: float = 30.0,
approx: str = "erf",
) -> None:
self.sigma1 = sigma1
self.sigma2 = sigma2
self.alpha = alpha
self.approx = approx
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
img_t: torch.Tensor
img_t, *_ = convert_data_type(img, torch.Tensor, dtype=torch.float32) # type: ignore
gf1, gf2 = (
GaussianFilter(img_t.ndim - 1, sigma, approx=self.approx).to(img_t.device)
for sigma in (self.sigma1, self.sigma2)
)
blurred_f = gf1(img_t.unsqueeze(0))
filter_blurred_f = gf2(blurred_f)
out_t: torch.Tensor = (blurred_f + self.alpha * (blurred_f - filter_blurred_f)).squeeze(0)
out, *_ = convert_data_type(out_t, type(img), device=img.device if isinstance(img, torch.Tensor) else None)
return out
class RandGaussianSharpen(RandomizableTransform):
"""
Sharpen images using the Gaussian Blur filter based on randomly selected `sigma1`, `sigma2` and `alpha`.
The algorithm is :py:class:`monai.transforms.GaussianSharpen`.
Args:
sigma1_x: randomly select sigma value for the first spatial dimension of first gaussian kernel.
sigma1_y: randomly select sigma value for the second spatial dimension(if have) of first gaussian kernel.
sigma1_z: randomly select sigma value for the third spatial dimension(if have) of first gaussian kernel.
sigma2_x: randomly select sigma value for the first spatial dimension of second gaussian kernel.
if only 1 value `X` provided, it must be smaller than `sigma1_x` and randomly select from [X, sigma1_x].
sigma2_y: randomly select sigma value for the second spatial dimension(if have) of second gaussian kernel.
if only 1 value `Y` provided, it must be smaller than `sigma1_y` and randomly select from [Y, sigma1_y].
sigma2_z: randomly select sigma value for the third spatial dimension(if have) of second gaussian kernel.
if only 1 value `Z` provided, it must be smaller than `sigma1_z` and randomly select from [Z, sigma1_z].
alpha: randomly select weight parameter to compute the final result.
approx: discrete Gaussian kernel type, available options are "erf", "sampled", and "scalespace".
see also :py:meth:`monai.networks.layers.GaussianFilter`.
prob: probability of Gaussian sharpen.
"""
backend = GaussianSharpen.backend
def __init__(
self,
sigma1_x: Tuple[float, float] = (0.5, 1.0),
sigma1_y: Tuple[float, float] = (0.5, 1.0),
sigma1_z: Tuple[float, float] = (0.5, 1.0),
sigma2_x: Union[Tuple[float, float], float] = 0.5,
sigma2_y: Union[Tuple[float, float], float] = 0.5,
sigma2_z: Union[Tuple[float, float], float] = 0.5,
alpha: Tuple[float, float] = (10.0, 30.0),
approx: str = "erf",
prob: float = 0.1,
) -> None:
RandomizableTransform.__init__(self, prob)
self.sigma1_x = sigma1_x
self.sigma1_y = sigma1_y
self.sigma1_z = sigma1_z
self.sigma2_x = sigma2_x
self.sigma2_y = sigma2_y
self.sigma2_z = sigma2_z
self.alpha = alpha
self.approx = approx
self.x1: Optional[float] = None
self.y1: Optional[float] = None
self.z1: Optional[float] = None
self.x2: Optional[float] = None
self.y2: Optional[float] = None
self.z2: Optional[float] = None
self.a: Optional[float] = None
def randomize(self, data: Optional[Any] = None) -> None:
super().randomize(None)
if not self._do_transform:
return None
self.x1 = self.R.uniform(low=self.sigma1_x[0], high=self.sigma1_x[1])
self.y1 = self.R.uniform(low=self.sigma1_y[0], high=self.sigma1_y[1])
self.z1 = self.R.uniform(low=self.sigma1_z[0], high=self.sigma1_z[1])
sigma2_x = (self.sigma2_x, self.x1) if not isinstance(self.sigma2_x, Iterable) else self.sigma2_x
sigma2_y = (self.sigma2_y, self.y1) if not isinstance(self.sigma2_y, Iterable) else self.sigma2_y
sigma2_z = (self.sigma2_z, self.z1) if not isinstance(self.sigma2_z, Iterable) else self.sigma2_z
self.x2 = self.R.uniform(low=sigma2_x[0], high=sigma2_x[1])
self.y2 = self.R.uniform(low=sigma2_y[0], high=sigma2_y[1])
self.z2 = self.R.uniform(low=sigma2_z[0], high=sigma2_z[1])
self.a = self.R.uniform(low=self.alpha[0], high=self.alpha[1])
def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor:
if randomize:
self.randomize()
if not self._do_transform:
return img
if self.x2 is None or self.y2 is None or self.z2 is None or self.a is None:
raise RuntimeError("please call the `randomize()` function first.")
sigma1 = ensure_tuple_size(tup=(self.x1, self.y1, self.z1), dim=img.ndim - 1)
sigma2 = ensure_tuple_size(tup=(self.x2, self.y2, self.z2), dim=img.ndim - 1)
return GaussianSharpen(sigma1=sigma1, sigma2=sigma2, alpha=self.a, approx=self.approx)(img)
class RandHistogramShift(RandomizableTransform):
"""
Apply random nonlinear transform to the image's intensity histogram.
Args:
num_control_points: number of control points governing the nonlinear intensity mapping.
a smaller number of control points allows for larger intensity shifts. if two values provided, number of
control points selecting from range (min_value, max_value).
prob: probability of histogram shift.
"""
backend = [TransformBackends.NUMPY]
def __init__(self, num_control_points: Union[Tuple[int, int], int] = 10, prob: float = 0.1) -> None:
RandomizableTransform.__init__(self, prob)
if isinstance(num_control_points, int):
if num_control_points <= 2:
raise ValueError("num_control_points should be greater than or equal to 3")
self.num_control_points = (num_control_points, num_control_points)
else:
if len(num_control_points) != 2:
raise ValueError("num_control points should be a number or a pair of numbers")
if min(num_control_points) <= 2:
raise ValueError("num_control_points should be greater than or equal to 3")
self.num_control_points = (min(num_control_points), max(num_control_points))
self.reference_control_points: np.ndarray
self.floating_control_points: np.ndarray
def randomize(self, data: Optional[Any] = None) -> None:
super().randomize(None)
if not self._do_transform:
return None
num_control_point = self.R.randint(self.num_control_points[0], self.num_control_points[1] + 1)
self.reference_control_points = np.linspace(0, 1, num_control_point)
self.floating_control_points = np.copy(self.reference_control_points)
for i in range(1, num_control_point - 1):
self.floating_control_points[i] = self.R.uniform(
self.floating_control_points[i - 1], self.floating_control_points[i + 1]
)
def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor:
if randomize:
self.randomize()
if not self._do_transform:
return img
if self.reference_control_points is None or self.floating_control_points is None:
raise RuntimeError("please call the `randomize()` function first.")
img_np: np.ndarray
img_np, *_ = convert_data_type(img, np.ndarray) # type: ignore
img_min, img_max = img_np.min(), img_np.max()
reference_control_points_scaled = self.reference_control_points * (img_max - img_min) + img_min
floating_control_points_scaled = self.floating_control_points * (img_max - img_min) + img_min
img_np = np.asarray(
np.interp(img_np, reference_control_points_scaled, floating_control_points_scaled), dtype=img_np.dtype
)
img, *_ = convert_to_dst_type(img_np, dst=img)
return img
class GibbsNoise(Transform, Fourier):
"""
The transform applies Gibbs noise to 2D/3D MRI images. Gibbs artifacts
are one of the common type of type artifacts appearing in MRI scans.
The transform is applied to all the channels in the data.
For general information on Gibbs artifacts, please refer to:
`An Image-based Approach to Understanding the Physics of MR Artifacts
<https://pubs.rsna.org/doi/full/10.1148/rg.313105115>`_.
`The AAPM/RSNA Physics Tutorial for Residents
<https://pubs.rsna.org/doi/full/10.1148/radiographics.22.4.g02jl14949>`_
Args:
alpha: Parametrizes the intensity of the Gibbs noise filter applied. Takes
values in the interval [0,1] with alpha = 0 acting as the identity mapping.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
@deprecated_arg(name="as_tensor_output", since="0.6")
def __init__(self, alpha: float = 0.1, as_tensor_output: bool = True) -> None:
if alpha > 1 or alpha < 0:
raise ValueError("alpha must take values in the interval [0, 1].")
self.alpha = alpha
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
n_dims = len(img.shape[1:])
# FT
k = self.shift_fourier(img, n_dims)
# build and apply mask
k = self._apply_mask(k)
# map back
img = self.inv_shift_fourier(k, n_dims)
return img
def _apply_mask(self, k: NdarrayOrTensor) -> NdarrayOrTensor:
"""Builds and applies a mask on the spatial dimensions.
Args:
k: k-space version of the image.
Returns:
masked version of the k-space image.
"""
shape = k.shape[1:]
# compute masking radius and center
r = (1 - self.alpha) * np.max(shape) * np.sqrt(2) / 2.0
center = (np.array(shape) - 1) / 2
# gives list w/ len==self.dim. Each dim gives coordinate in that dimension
coords = np.ogrid[tuple(slice(0, i) for i in shape)]
# need to subtract center coord and then square for Euc distance
coords_from_center_sq = [(coord - c) ** 2 for coord, c in zip(coords, center)]
dist_from_center = np.sqrt(sum(coords_from_center_sq))
mask = dist_from_center <= r
# add channel dimension into mask
mask = np.repeat(mask[None], k.shape[0], axis=0)
if isinstance(k, torch.Tensor):
mask, *_ = convert_data_type(mask, torch.Tensor, device=k.device)
# apply binary mask
k_masked: NdarrayOrTensor
k_masked = k * mask
return k_masked
class RandGibbsNoise(RandomizableTransform):
"""
Naturalistic image augmentation via Gibbs artifacts. The transform
randomly applies Gibbs noise to 2D/3D MRI images. Gibbs artifacts
are one of the common type of type artifacts appearing in MRI scans.
The transform is applied to all the channels in the data.
For general information on Gibbs artifacts, please refer to:
https://pubs.rsna.org/doi/full/10.1148/rg.313105115
https://pubs.rsna.org/doi/full/10.1148/radiographics.22.4.g02jl14949
Args:
prob (float): probability of applying the transform.
alpha (Sequence(float)): Parametrizes the intensity of the Gibbs noise filter applied. Takes
values in the interval [0,1] with alpha = 0 acting as the identity mapping.
If a length-2 list is given as [a,b] then the value of alpha will be
sampled uniformly from the interval [a,b]. 0 <= a <= b <= 1.
"""
backend = GibbsNoise.backend
@deprecated_arg(name="as_tensor_output", since="0.6")
def __init__(self, prob: float = 0.1, alpha: Sequence[float] = (0.0, 1.0), as_tensor_output: bool = True) -> None:
if len(alpha) != 2:
raise ValueError("alpha length must be 2.")
if alpha[1] > 1 or alpha[0] < 0:
raise ValueError("alpha must take values in the interval [0, 1]")
if alpha[0] > alpha[1]:
raise ValueError("When alpha = [a,b] we need a < b.")
self.alpha = alpha
self.sampled_alpha = -1.0 # stores last alpha sampled by randomize()
RandomizableTransform.__init__(self, prob=prob)
def randomize(self, data: Any) -> None:
"""
(1) Set random variable to apply the transform.
(2) Get alpha from uniform distribution.
"""
super().randomize(None)
if not self._do_transform:
return None
self.sampled_alpha = self.R.uniform(self.alpha[0], self.alpha[1])
def __call__(self, img: NdarrayOrTensor, randomize: bool = True):
if randomize:
# randomize application and possibly alpha
self.randomize(None)
if not self._do_transform:
return img
return GibbsNoise(self.sampled_alpha)(img)
class KSpaceSpikeNoise(Transform, Fourier):
"""
Apply localized spikes in `k`-space at the given locations and intensities.
Spike (Herringbone) artifact is a type of data acquisition artifact which
may occur during MRI scans.
For general information on spike artifacts, please refer to:
`AAPM/RSNA physics tutorial for residents: fundamental physics of MR imaging
<https://pubmed.ncbi.nlm.nih.gov/16009826>`_.
`Body MRI artifacts in clinical practice: A physicist's and radiologist's
perspective <https://doi.org/10.1002/jmri.24288>`_.
Args:
loc: spatial location for the spikes. For
images with 3D spatial dimensions, the user can provide (C, X, Y, Z)
to fix which channel C is affected, or (X, Y, Z) to place the same
spike in all channels. For 2D cases, the user can provide (C, X, Y)
or (X, Y).
k_intensity: value for the log-intensity of the
`k`-space version of the image. If one location is passed to ``loc`` or the
channel is not specified, then this argument should receive a float. If
``loc`` is given a sequence of locations, then this argument should
receive a sequence of intensities. This value should be tested as it is
data-dependent. The default values are the 2.5 the mean of the
log-intensity for each channel.
Example:
When working with 4D data, ``KSpaceSpikeNoise(loc = ((3,60,64,32), (64,60,32)), k_intensity = (13,14))``
will place a spike at `[3, 60, 64, 32]` with `log-intensity = 13`, and
one spike per channel located respectively at `[: , 64, 60, 32]`
with `log-intensity = 14`.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
@deprecated_arg(name="as_tensor_output", since="0.6")
def __init__(
self,
loc: Union[Tuple, Sequence[Tuple]],
k_intensity: Optional[Union[Sequence[float], float]] = None,
as_tensor_output: bool = True,
):
self.loc = ensure_tuple(loc)
self.k_intensity = k_intensity
# assert one-to-one relationship between factors and locations
if isinstance(k_intensity, Sequence):
if not isinstance(loc[0], Sequence):
raise ValueError(
"If a sequence is passed to k_intensity, then a sequence of locations must be passed to loc"
)
if len(k_intensity) != len(loc):
raise ValueError("There must be one intensity_factor value for each tuple of indices in loc.")
if isinstance(self.loc[0], Sequence) and k_intensity is not None and not isinstance(self.k_intensity, Sequence):
raise ValueError("There must be one intensity_factor value for each tuple of indices in loc.")
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
"""
Args:
img: image with dimensions (C, H, W) or (C, H, W, D)
"""
# checking that tuples in loc are consistent with img size
self._check_indices(img)
if len(img.shape) < 3:
raise RuntimeError("Image needs a channel direction.")
if isinstance(self.loc[0], int) and len(img.shape) == 4 and len(self.loc) == 2:
raise RuntimeError("Input images of dimension 4 need location tuple to be length 3 or 4")
if isinstance(self.loc[0], Sequence) and len(img.shape) == 4 and min(map(len, self.loc)) == 2:
raise RuntimeError("Input images of dimension 4 need location tuple to be length 3 or 4")
n_dims = len(img.shape[1:])
# FT
k = self.shift_fourier(img, n_dims)
lib = np if isinstance(k, np.ndarray) else torch
log_abs = lib.log(lib.abs(k) + 1e-10) # type: ignore
phase = lib.angle(k) # type: ignore
k_intensity = self.k_intensity
# default log intensity
if k_intensity is None:
k_intensity = tuple(lib.mean(log_abs, axis=tuple(range(-n_dims, 0))) * 2.5) # type: ignore
# highlight
if isinstance(self.loc[0], Sequence):
for idx, val in zip(self.loc, ensure_tuple(k_intensity)):
self._set_spike(log_abs, idx, val)
else:
self._set_spike(log_abs, self.loc, k_intensity)
# map back
k = lib.exp(log_abs) * lib.exp(1j * phase) # type: ignore
img, *_ = convert_to_dst_type(self.inv_shift_fourier(k, n_dims), dst=img)
return img
def _check_indices(self, img) -> None:
"""Helper method to check consistency of self.loc and input image.
Raises assertion error if any index in loc is out of bounds."""
loc = list(self.loc)
if not isinstance(loc[0], Sequence):
loc = [loc]
for i in range(len(loc)):
if len(loc[i]) < len(img.shape):
loc[i] = [0] + list(loc[i])
for i in range(len(img.shape)):
if img.shape[i] <= max(x[i] for x in loc):
raise ValueError(
f"The index value at position {i} of one of the tuples in loc = {self.loc} is out of bounds for current image."
)
def _set_spike(self, k: NdarrayOrTensor, idx: Tuple, val: Union[Sequence[float], float]):
"""
Helper function to introduce a given intensity at given location.
Args:
k: intensity array to alter.
idx: index of location where to apply change.
val: value of intensity to write in.
"""
if len(k.shape) == len(idx):
k[idx] = val[idx[0]] if isinstance(val, Sequence) else val
elif len(k.shape) == 4 and len(idx) == 3:
k[:, idx[0], idx[1], idx[2]] = val # type: ignore
elif len(k.shape) == 3 and len(idx) == 2:
k[:, idx[0], idx[1]] = val # type: ignore
class RandKSpaceSpikeNoise(RandomizableTransform, Fourier):
"""
Naturalistic data augmentation via spike artifacts. The transform applies
localized spikes in `k`-space, and it is the random version of
:py:class:`monai.transforms.KSpaceSpikeNoise`.
Spike (Herringbone) artifact is a type of data acquisition artifact which
may occur during MRI scans. For general information on spike artifacts,
please refer to:
`AAPM/RSNA physics tutorial for residents: fundamental physics of MR imaging
<https://pubmed.ncbi.nlm.nih.gov/16009826>`_.
`Body MRI artifacts in clinical practice: A physicist's and radiologist's
perspective <https://doi.org/10.1002/jmri.24288>`_.
Args:
prob: probability of applying the transform, either on all
channels at once, or channel-wise if ``channel_wise = True``.
intensity_range: pass a tuple (a, b) to sample the log-intensity from the interval (a, b)
uniformly for all channels. Or pass sequence of intervals
((a0, b0), (a1, b1), ...) to sample for each respective channel.
In the second case, the number of 2-tuples must match the number of channels.
Default ranges is `(0.95x, 1.10x)` where `x` is the mean
log-intensity for each channel.
channel_wise: treat each channel independently. True by
default.
Example:
To apply `k`-space spikes randomly with probability 0.5, and
log-intensity sampled from the interval [11, 12] for each channel
independently, one uses
``RandKSpaceSpikeNoise(prob=0.5, intensity_range=(11, 12), channel_wise=True)``
"""
backend = KSpaceSpikeNoise.backend
@deprecated_arg(name="as_tensor_output", since="0.6")
def __init__(
self,
prob: float = 0.1,
intensity_range: Optional[Sequence[Union[Sequence[float], float]]] = None,
channel_wise: bool = True,
as_tensor_output: bool = True,
):
self.intensity_range = intensity_range
self.channel_wise = channel_wise
self.sampled_k_intensity: List = []
self.sampled_locs: List[Tuple] = []
if intensity_range is not None and isinstance(intensity_range[0], Sequence) and not channel_wise:
raise ValueError("When channel_wise = False, intensity_range should be a 2-tuple (low, high) or None.")
super().__init__(prob)
def __call__(self, img: NdarrayOrTensor, randomize: bool = True):
"""
Apply transform to `img`. Assumes data is in channel-first form.
Args:
img: image with dimensions (C, H, W) or (C, H, W, D)
"""
if (
self.intensity_range is not None
and isinstance(self.intensity_range[0], Sequence)
and len(self.intensity_range) != img.shape[0]
):
raise RuntimeError(
"If intensity_range is a sequence of sequences, then there must be one (low, high) tuple for each channel."
)
self.sampled_k_intensity = []
self.sampled_locs = []
if randomize:
intensity_range = self._make_sequence(img)
self.randomize(img, intensity_range)
if not self._do_transform:
return img
return KSpaceSpikeNoise(self.sampled_locs, self.sampled_k_intensity)(img)
def randomize(self, img: NdarrayOrTensor, intensity_range: Sequence[Sequence[float]]) -> None: # type: ignore
"""
Helper method to sample both the location and intensity of the spikes.
When not working channel wise (channel_wise=False) it use the random
variable ``self._do_transform`` to decide whether to sample a location
and intensity.
When working channel wise, the method randomly samples a location and
intensity for each channel depending on ``self._do_transform``.
"""
super().randomize(None)
if not self._do_transform:
return None
if self.channel_wise:
# randomizing per channel
for i, chan in enumerate(img):
self.sampled_locs.append((i,) + tuple(self.R.randint(0, k) for k in chan.shape))
self.sampled_k_intensity.append(self.R.uniform(intensity_range[i][0], intensity_range[i][1]))
else:
# working with all channels together
spatial = tuple(self.R.randint(0, k) for k in img.shape[1:])
self.sampled_locs = [(i,) + spatial for i in range(img.shape[0])]
if isinstance(intensity_range[0], Sequence):
self.sampled_k_intensity = [self.R.uniform(p[0], p[1]) for p in intensity_range]
else:
self.sampled_k_intensity = [self.R.uniform(intensity_range[0], intensity_range[1])] * len(img)
def _make_sequence(self, x: NdarrayOrTensor) -> Sequence[Sequence[float]]:
"""
Formats the sequence of intensities ranges to Sequence[Sequence[float]].
"""
if self.intensity_range is None:
# set default range if one not provided
return self._set_default_range(x)
if not isinstance(self.intensity_range[0], Sequence):
return (ensure_tuple(self.intensity_range),) * x.shape[0]
return ensure_tuple(self.intensity_range)
def _set_default_range(self, img: NdarrayOrTensor) -> Sequence[Sequence[float]]:
"""
Sets default intensity ranges to be sampled.
Args:
img: image to transform.
"""
n_dims = len(img.shape[1:])
k = self.shift_fourier(img, n_dims)
mod = torch if isinstance(k, torch.Tensor) else np
log_abs = mod.log(mod.absolute(k) + 1e-10) # type: ignore
shifted_means = mod.mean(log_abs, dim=tuple(range(-n_dims, 0))) * 2.5 # type: ignore
return tuple((i * 0.95, i * 1.1) for i in shifted_means)
class RandCoarseTransform(RandomizableTransform):
"""
Randomly select coarse regions in the image, then execute transform operations for the regions.
It's the base class of all kinds of region transforms.
Refer to papers: https://arxiv.org/abs/1708.04552
Args:
holes: number of regions to dropout, if `max_holes` is not None, use this arg as the minimum number to
randomly select the expected number of regions.
spatial_size: spatial size of the regions to dropout, if `max_spatial_size` is not None, use this arg
as the minimum spatial size to randomly select size for every region.
if some components of the `spatial_size` are non-positive values, the transform will use the
corresponding components of input img size. For example, `spatial_size=(32, -1)` will be adapted
to `(32, 64)` if the second spatial dimension size of img is `64`.
max_holes: if not None, define the maximum number to randomly select the expected number of regions.
max_spatial_size: if not None, define the maximum spatial size to randomly select size for every region.
if some components of the `max_spatial_size` are non-positive values, the transform will use the
corresponding components of input img size. For example, `max_spatial_size=(32, -1)` will be adapted
to `(32, 64)` if the second spatial dimension size of img is `64`.
prob: probability of applying the transform.
"""
backend = [TransformBackends.NUMPY]
def __init__(
self,
holes: int,
spatial_size: Union[Sequence[int], int],
max_holes: Optional[int] = None,
max_spatial_size: Optional[Union[Sequence[int], int]] = None,
prob: float = 0.1,
) -> None:
RandomizableTransform.__init__(self, prob)
if holes < 1:
raise ValueError("number of holes must be greater than 0.")
self.holes = holes
self.spatial_size = spatial_size
self.max_holes = max_holes
self.max_spatial_size = max_spatial_size
self.hole_coords: List = []
def randomize(self, img_size: Sequence[int]) -> None:
super().randomize(None)
if not self._do_transform:
return None
size = fall_back_tuple(self.spatial_size, img_size)
self.hole_coords = [] # clear previously computed coords
num_holes = self.holes if self.max_holes is None else self.R.randint(self.holes, self.max_holes + 1)
for _ in range(num_holes):
if self.max_spatial_size is not None:
max_size = fall_back_tuple(self.max_spatial_size, img_size)
size = tuple(self.R.randint(low=size[i], high=max_size[i] + 1) for i in range(len(img_size)))
valid_size = get_valid_patch_size(img_size, size)
self.hole_coords.append((slice(None),) + get_random_patch(img_size, valid_size, self.R))
@abstractmethod
def _transform_holes(self, img: np.ndarray) -> np.ndarray:
"""
Transform the randomly selected `self.hole_coords` in input images.
"""
raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.")
def __call__(self, img: NdarrayOrTensor, randomize: bool = True) -> NdarrayOrTensor:
if randomize:
self.randomize(img.shape[1:])
if not self._do_transform:
return img
img_np: np.ndarray
img_np, *_ = convert_data_type(img, np.ndarray) # type: ignore
out = self._transform_holes(img=img_np)
ret, *_ = convert_to_dst_type(src=out, dst=img)
return ret
class RandCoarseDropout(RandCoarseTransform):
"""
Randomly coarse dropout regions in the image, then fill in the rectangular regions with specified value.
Or keep the rectangular regions and fill in the other areas with specified value.
Refer to papers: https://arxiv.org/abs/1708.04552, https://arxiv.org/pdf/1604.07379
And other implementation: https://albumentations.ai/docs/api_reference/augmentations/transforms/
#albumentations.augmentations.transforms.CoarseDropout.
Args:
holes: number of regions to dropout, if `max_holes` is not None, use this arg as the minimum number to
randomly select the expected number of regions.
spatial_size: spatial size of the regions to dropout, if `max_spatial_size` is not None, use this arg
as the minimum spatial size to randomly select size for every region.
if some components of the `spatial_size` are non-positive values, the transform will use the
corresponding components of input img size. For example, `spatial_size=(32, -1)` will be adapted
to `(32, 64)` if the second spatial dimension size of img is `64`.
dropout_holes: if `True`, dropout the regions of holes and fill value, if `False`, keep the holes and
dropout the outside and fill value. default to `True`.
fill_value: target value to fill the dropout regions, if providing a number, will use it as constant
value to fill all the regions. if providing a tuple for the `min` and `max`, will randomly select
value for every pixel / voxel from the range `[min, max)`. if None, will compute the `min` and `max`
value of input image then randomly select value to fill, default to None.
max_holes: if not None, define the maximum number to randomly select the expected number of regions.
max_spatial_size: if not None, define the maximum spatial size to randomly select size for every region.
if some components of the `max_spatial_size` are non-positive values, the transform will use the
corresponding components of input img size. For example, `max_spatial_size=(32, -1)` will be adapted
to `(32, 64)` if the second spatial dimension size of img is `64`.
prob: probability of applying the transform.
"""
def __init__(
self,
holes: int,
spatial_size: Union[Sequence[int], int],
dropout_holes: bool = True,
fill_value: Optional[Union[Tuple[float, float], float]] = None,
max_holes: Optional[int] = None,
max_spatial_size: Optional[Union[Sequence[int], int]] = None,
prob: float = 0.1,
) -> None:
super().__init__(
holes=holes, spatial_size=spatial_size, max_holes=max_holes, max_spatial_size=max_spatial_size, prob=prob
)
self.dropout_holes = dropout_holes
if isinstance(fill_value, (tuple, list)):
if len(fill_value) != 2:
raise ValueError("fill value should contain 2 numbers if providing the `min` and `max`.")
self.fill_value = fill_value
def _transform_holes(self, img: np.ndarray):
"""
Fill the randomly selected `self.hole_coords` in input images.
Please note that we usually only use `self.R` in `randomize()` method, here is a special case.
"""
fill_value = (img.min(), img.max()) if self.fill_value is None else self.fill_value
if self.dropout_holes:
for h in self.hole_coords:
if isinstance(fill_value, (tuple, list)):
img[h] = self.R.uniform(fill_value[0], fill_value[1], size=img[h].shape)
else:
img[h] = fill_value
ret = img
else:
if isinstance(fill_value, (tuple, list)):
ret = self.R.uniform(fill_value[0], fill_value[1], size=img.shape).astype(img.dtype)
else:
ret = np.full_like(img, fill_value)
for h in self.hole_coords:
ret[h] = img[h]
return ret
class RandCoarseShuffle(RandCoarseTransform):
"""
Randomly select regions in the image, then shuffle the pixels within every region.
It shuffles every channel separately.
Refer to paper:
Kang, Guoliang, et al. "Patchshuffle regularization." arXiv preprint arXiv:1707.07103 (2017).
https://arxiv.org/abs/1707.07103
Args:
holes: number of regions to dropout, if `max_holes` is not None, use this arg as the minimum number to
randomly select the expected number of regions.
spatial_size: spatial size of the regions to dropout, if `max_spatial_size` is not None, use this arg
as the minimum spatial size to randomly select size for every region.
if some components of the `spatial_size` are non-positive values, the transform will use the
corresponding components of input img size. For example, `spatial_size=(32, -1)` will be adapted
to `(32, 64)` if the second spatial dimension size of img is `64`.
max_holes: if not None, define the maximum number to randomly select the expected number of regions.
max_spatial_size: if not None, define the maximum spatial size to randomly select size for every region.
if some components of the `max_spatial_size` are non-positive values, the transform will use the
corresponding components of input img size. For example, `max_spatial_size=(32, -1)` will be adapted
to `(32, 64)` if the second spatial dimension size of img is `64`.
prob: probability of applying the transform.
"""
def _transform_holes(self, img: np.ndarray):
"""
Shuffle the content of randomly selected `self.hole_coords` in input images.
Please note that we usually only use `self.R` in `randomize()` method, here is a special case.
"""
for h in self.hole_coords:
# shuffle every channel separately
for i, c in enumerate(img[h]):
patch_channel = c.flatten()
self.R.shuffle(patch_channel)
img[h][i] = patch_channel.reshape(c.shape)
return img
class HistogramNormalize(Transform):
"""
Apply the histogram normalization to input image.
Refer to: https://github.com/facebookresearch/CovidPrognosis/blob/master/covidprognosis/data/transforms.py#L83.
Args:
num_bins: number of the bins to use in histogram, default to `256`. for more details:
https://numpy.org/doc/stable/reference/generated/numpy.histogram.html.
min: the min value to normalize input image, default to `0`.
max: the max value to normalize input image, default to `255`.
mask: if provided, must be ndarray of bools or 0s and 1s, and same shape as `image`.
only points at which `mask==True` are used for the equalization.
can also provide the mask along with img at runtime.
dtype: data type of the output, default to `float32`.
"""
backend = [TransformBackends.NUMPY]
def __init__(
self,
num_bins: int = 256,
min: int = 0,
max: int = 255,
mask: Optional[NdarrayOrTensor] = None,
dtype: DtypeLike = np.float32,
) -> None:
self.num_bins = num_bins
self.min = min
self.max = max
self.mask = mask
self.dtype = dtype
def __call__(self, img: NdarrayOrTensor, mask: Optional[NdarrayOrTensor] = None) -> np.ndarray:
return equalize_hist(
img=img,
mask=mask if mask is not None else self.mask,
num_bins=self.num_bins,
min=self.min,
max=self.max,
dtype=self.dtype,
)
| 40.719
| 131
| 0.632297
|
d97c22fa1c7fc02beec7148230126903c0db0ac7
| 794
|
py
|
Python
|
fixture/session.py
|
tarsic99/Python-training-
|
96da2df5f249f39370295504748b218247f2935c
|
[
"Apache-2.0"
] | null | null | null |
fixture/session.py
|
tarsic99/Python-training-
|
96da2df5f249f39370295504748b218247f2935c
|
[
"Apache-2.0"
] | null | null | null |
fixture/session.py
|
tarsic99/Python-training-
|
96da2df5f249f39370295504748b218247f2935c
|
[
"Apache-2.0"
] | null | null | null |
class SessionHelper:
def __init__(self, app):
self.app = app
def login(self, username, password):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
# OR wd.find_element_by_css_selector("input[type=\"submit\"]").click()
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
| 36.090909
| 78
| 0.642317
|
a5e576e1fe44243e0b1cffc701441313ebf92f04
| 5,856
|
py
|
Python
|
system_permission/models.py
|
nianhuatech/bking-permission
|
1bee84e24d983274883da9741abcff48d546cc55
|
[
"Apache-2.0"
] | 4
|
2021-05-19T02:28:01.000Z
|
2021-12-14T04:02:22.000Z
|
system_permission/models.py
|
nianhuatech/bking-permission
|
1bee84e24d983274883da9741abcff48d546cc55
|
[
"Apache-2.0"
] | null | null | null |
system_permission/models.py
|
nianhuatech/bking-permission
|
1bee84e24d983274883da9741abcff48d546cc55
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云(BlueKing) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
# from django.db import models
from django.db import models
from common_utils.model_to_dicts import convert_obj_to_dicts,convert_objs_to_dicts
"""
权限管理models.操作员模型
"""
class BkingOperator(models.Model):
gender = (
('0', "正常"),
('1', "锁定"),
('2', "无效"),
)
op_id=models.AutoField(u"操作员ID",primary_key=True)
op_name=models.CharField(u"操作员名称",max_length=255)
login_code=models.CharField(u"登录工号",max_length=255)
op_password=models.CharField(u"工号密码",max_length=255)
bill_class=models.CharField(u"业务类别",max_length=255,null=True,blank=True)
photo=models.CharField(u"头像",max_length=255,null=True,blank=True)
region_id=models.CharField(u"地市编码",max_length=255,null=True,blank=True)
county_id=models.CharField(u"区县编码",max_length=255,null=True,blank=True)
org_id=models.CharField(u"归属组织编码",max_length=255,null=True,blank=True)
email=models.CharField(u"邮箱",max_length=255,null=True,blank=True)
phone_id=models.CharField(u"手机号",max_length=255,null=True,blank=True)
status=models.IntegerField(u"账号状态",choices=gender, default=0)
create_date=models.DateTimeField(u"创建时间",auto_now_add=True)
create_op=models.CharField(u"创建人",max_length=20)
upd_date=models.DateTimeField(u"修改时间",auto_now = True)
mark=models.CharField(u"备注",max_length=1000,null=True,blank=True)
def __str__(self):
return self.login_code+'-'+self.op_name
class Meta:
ordering = ["-create_date"]
verbose_name = "操作员"
verbose_name_plural = "操作员"
"""
权限管理models.角色模型
"""
class BkingRole(models.Model):
gender = (
('0', "有效"),
('1', "无效"),
)
gender2 = (
('0', "管理员角色"),
('1', "普通角色"),
)
role_code=models.CharField(u"角色编码",max_length=255)
role_name=models.CharField(u"角色名称",max_length=255)
role_type=models.IntegerField(u"角色类型",choices=gender2,default=0)
status=models.IntegerField(u"角色状态",choices=gender, default=0)
create_date=models.DateTimeField(u"创建时间",auto_now_add=True)
create_op=models.CharField(u"创建人",max_length=10)
upd_date=models.DateTimeField(u"修改时间",auto_now = True)
mark=models.CharField(u"备注",max_length=1000,null=True,blank=True)
def __str__(self):
return self.role_code+'-'+self.role_name
class Meta:
ordering = ["-create_date"]
verbose_name = "角色"
verbose_name_plural = "角色"
"""
权限管理models.权限资源模型
"""
class BkingPriv(models.Model):
gender = (
('0', "有效"),
('1', "无效"),
)
priv_code=models.CharField(u"资源编码",max_length=255)
parent_priv_code=models.CharField(u"父资源编码",max_length=255)
priv_name=models.CharField(u"资源名称",max_length=255)
priv_uri=models.CharField(u"资源URI",null=True,blank=True,max_length=255)
priv_icon=models.CharField(u"资源图标",null=True,blank=True,max_length=255)
priv_sort=models.CharField(u"资源序号",null=True,blank=True,max_length=255)
priv_class=models.CharField(u"资源类别",max_length=255,default="SYSTEM_PERMISSION")
priv_type=models.CharField(u"资源类型",max_length=255)
status=models.IntegerField(u"资源状态",choices=gender, default=0)
create_date=models.DateTimeField(u"创建时间",auto_now_add=True)
create_op=models.CharField(u"创建人",max_length=10)
upd_date=models.DateTimeField(u"修改时间",auto_now = True)
mark=models.CharField(u"备注",max_length=1000,null=True,blank=True)
def __str__(self):
return self.priv_code+'-'+self.priv_name
def getChildrens(self):
try:
return BkingPriv.objects.filter(status=0,parent_priv_code=self.priv_code)
except:
pass
class Meta:
ordering = ["-priv_code"]
verbose_name = "权限资源"
verbose_name_plural = "权限资源"
"""
权限管理models.角色资源授权模型
"""
class BkingRolePrivGrant(models.Model):
gender = (
('0', "有效"),
('1', "无效"),
)
role_code=models.CharField(u"业务ID",max_length=255)
priv_code=models.CharField(u"应用ID",max_length=255)
create_op=models.CharField(u"创建人",max_length=10)
create_date=models.DateTimeField(u"创建时间",auto_now_add=True)
start_date=models.DateTimeField(u"授权开始时间",null=True,blank=True)
end_date=models.DateTimeField(u"授权结束时间",null=True,blank=True)
status=models.IntegerField(u"授权状态",choices=gender, default=0)
def __str__(self):
return self.role_code+'-'+self.priv_code
class Meta:
ordering = ["-create_date"]
verbose_name = "角色资源授权"
verbose_name_plural = "角色资源授权"
"""
基础数据models.账号角色授权模型
"""
class BkingOpRoleGrant(models.Model):
gender = (
('0', "有效"),
('1', "无效"),
)
login_code=models.CharField(u"登陆工号",max_length=255)
role_code=models.CharField(u"角色编码",max_length=255)
create_op=models.CharField(u"创建人",max_length=10)
start_date=models.DateTimeField(u"授权开始时间",null=True,blank=True)
end_date=models.DateTimeField(u"授权结束时间",null=True,blank=True)
status=models.IntegerField(u"授权状态",choices=gender, default=0)
create_date=models.DateTimeField(u"创建时间",auto_now_add=True)
mark=models.CharField(u"备注",max_length=1000,null=True,blank=True)
| 37.299363
| 115
| 0.695184
|
937a776f0cc2b610033d58ac0baa1df69744f3eb
| 352
|
py
|
Python
|
SemesterProject/app.py
|
CJP-12/CMPT-120L-910-20F
|
ec74b248ed1957ceabd3f96e176e0e244220a9cd
|
[
"MIT"
] | null | null | null |
SemesterProject/app.py
|
CJP-12/CMPT-120L-910-20F
|
ec74b248ed1957ceabd3f96e176e0e244220a9cd
|
[
"MIT"
] | null | null | null |
SemesterProject/app.py
|
CJP-12/CMPT-120L-910-20F
|
ec74b248ed1957ceabd3f96e176e0e244220a9cd
|
[
"MIT"
] | null | null | null |
from flask import Flask, redirect, url_for
app = Flask(__name__)
@app.route("/")
def home():
return "<h1>Welcome to my project!<h1>"
@app.route("/<name>")
def user(name):
return f"Hello {name}!"
@app.route("/hidden/")
def admin():
return redirect(url_for("user", name= "<h1>Secret Page!<h1>"))
if __name__ == "__main__":
app.run()
| 18.526316
| 66
| 0.630682
|
d300952d0f0ee537b239a971e3793ecc8e2dc33f
| 1,384
|
py
|
Python
|
misc/extract_feats_roi.py
|
Dorothylyly/SAAT
|
c0113ada53ccb6611089c9b76f342e523b4560bc
|
[
"MIT"
] | 71
|
2020-03-17T10:09:24.000Z
|
2022-03-12T01:11:30.000Z
|
misc/extract_feats_roi.py
|
Dorothylyly/SAAT
|
c0113ada53ccb6611089c9b76f342e523b4560bc
|
[
"MIT"
] | 37
|
2020-06-18T18:09:16.000Z
|
2021-07-30T00:06:35.000Z
|
misc/extract_feats_roi.py
|
Dorothylyly/SAAT
|
c0113ada53ccb6611089c9b76f342e523b4560bc
|
[
"MIT"
] | 18
|
2020-06-17T09:09:29.000Z
|
2021-08-11T09:34:46.000Z
|
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from maskrcnn_benchmark.config import cfg
from predictor import COCODemo
import cv2
import os
import h5py
config_file = "../configs/caffe2/e2e_faster_rcnn_R_101_FPN_1x_caffe2.yaml"
# update the config options with the config file
cfg.merge_from_file(config_file)
# manual override some options
cfg.merge_from_list(["MODEL.DEVICE", "cuda:0"])
coco_demo = COCODemo(
cfg,
min_image_size=800,
confidence_threshold=0.3,
)
if __name__ == '__main__':
frame_path = './Data/Frames'
feat_path = './Data/roi_feat_fc'
vid_names = os.listdir(frame_path)
roi_feats_h5 = h5py.File(os.path.join(feat_path, 'roi_feats.h5'), 'w')
roi_box_h5 = h5py.File(os.path.join(feat_path, 'roi_box.h5'), 'w')
for cnt, vid in enumerate(vid_names):
print('{}/({}, {})'.format(cnt, opt.start_idx, opt.end_idx))
curr_fr_path = os.path.join(frame_path, vid)
for i in range(14, 15):
img = cv2.imread(os.path.join(curr_fr_path, str(i)+'.jpg'))
result, top_preds, top_roi_feats = coco_demo.run_on_opencv_image(img)
if top_roi_feats.shape[0] > 0:
roi_feats_h5.create_dataset(vid, data=top_roi_feats.numpy(), dtype='f4')
roi_box_h5.create_dataset(vid, data=top_preds.bbox.numpy(), dtype='f4')
print('done')
roi_feats_h5.close()
roi_box_h5.close()
| 30.086957
| 77
| 0.71026
|
82b927c82f112eab6286d021bb9d07a919a1ee01
| 1,017
|
py
|
Python
|
others/sub-array-median/median.py
|
tjeubaoit/algorithm
|
a1f2a30e0f736cc3d8b45ed845f724b9a4ed2e9a
|
[
"MIT"
] | null | null | null |
others/sub-array-median/median.py
|
tjeubaoit/algorithm
|
a1f2a30e0f736cc3d8b45ed845f724b9a4ed2e9a
|
[
"MIT"
] | null | null | null |
others/sub-array-median/median.py
|
tjeubaoit/algorithm
|
a1f2a30e0f736cc3d8b45ed845f724b9a4ed2e9a
|
[
"MIT"
] | null | null | null |
import math
def count_median(arr, i, j, n, orders):
size = j + 1 - i
med_pos = math.ceil(size/2)
is_odd = size % 2 != 0
# print(med_pos)
for k in range(0, n):
if orders[k] == med_pos:
break
if is_odd:
return k
else:
k2 = k
while k2 < n:
k2 += 1
if orders[k2] != orders[k]:
break
return (k+k2)/2
def sub_array_median(arr, n):
for i in range(0, len(arr)):
orders = [0] * n
sub_arr = []
for j in range(i, len(arr)):
sub_arr.append(arr[j])
for k in range(arr[j], n):
orders[k] += 1
# print('orders', orders)
med = count_median(arr, i, j, n, orders)
print(sub_arr, med)
def main():
sub_array_median([1, 4, 1, 2, 7, 5, 2], 10)
# arr = [1, 4, 1, 2, 7, 5, 2]
# orders = [0, 2, 4, 4, 5, 6, 6, 7, 7, 7]
# count_median(arr, 0, 7, 10, orders)
if __name__ == '__main__':
main()
| 22.6
| 52
| 0.45821
|
f365a50231511787a4c5eac21350818b3309072f
| 6,642
|
py
|
Python
|
3algo/results_complilation/timely_sumup.py
|
allengrr/deadlock_project
|
933878077c45a7df04daa087407bb2620c064617
|
[
"MIT"
] | null | null | null |
3algo/results_complilation/timely_sumup.py
|
allengrr/deadlock_project
|
933878077c45a7df04daa087407bb2620c064617
|
[
"MIT"
] | null | null | null |
3algo/results_complilation/timely_sumup.py
|
allengrr/deadlock_project
|
933878077c45a7df04daa087407bb2620c064617
|
[
"MIT"
] | 1
|
2021-03-21T17:54:26.000Z
|
2021-03-21T17:54:26.000Z
|
from data import timely as t
import matplotlib.pyplot as plt
import numpy as np
from textwrap import wrap
fig = plt.figure()
ax1 = fig.add_subplot(411)
ax2 = fig.add_subplot(412)
ax3 = fig.add_subplot(413)
ax4 = fig.add_subplot(414)
width = 0.35
title = {2}
algo_dict = {'RMS + Bankers': r'$ALG_1$',
'EDF + Bankers': r'$ALG_2$',
'RMS + wound wait': r'$ALG_3$',
'RMS + wait die': r'$ALG_4$',
'EDF + wound wait': r'$ALG_5$',
'EDF + wait die': r'$ALG_6$'}
timely_dict = {2: {4: [t.timely4_2_4, t.timely5_2_4, t.timely6_2_4],
5: [t.timely4_2_5, t.timely5_2_5, t.timely6_2_5],
6: [t.timely4_2_6, t.timely5_2_6, t.timely6_2_6],
7: [t.timely4_2_7, t.timely5_2_7, t.timely6_2_7]},
3: {4: [t.timely4_3_4, t.timely5_3_4, t.timely6_3_4],
5: [t.timely4_3_5, t.timely5_3_5, t.timely6_3_5],
6: [t.timely4_3_6, t.timely5_3_6, t.timely6_3_6],
7: [t.timely4_3_7, t.timely5_3_7, t.timely6_3_7]},
7: {4: [t.timely4_7_4, t.timely5_7_4, t.timely6_7_4],
5: [t.timely4_7_5, t.timely5_7_5, t.timely6_7_5],
6: [t.timely4_7_6, t.timely5_7_6, t.timely6_7_6],
7: [t.timely4_7_7, t.timely5_7_7, t.timely6_7_7]},
10: {4: [t.timely4_10_4, t.timely5_10_4, t.timely6_10_4],
5: [t.timely4_10_5, t.timely5_10_5, t.timely6_10_5],
6: [t.timely4_10_6, t.timely5_10_6, t.timely6_10_6],
7: [t.timely4_10_7, t.timely5_10_7, t.timely6_10_7]},
12: {4: [t.timely4_12_4, t.timely5_12_4, t.timely6_12_4],
5: [t.timely4_12_5, t.timely5_12_5, t.timely6_12_5],
6: [t.timely4_12_6, t.timely5_12_6, t.timely6_12_6],
7: [t.timely4_12_7, t.timely5_12_7, t.timely6_12_7]},
16: {4: [t.timely4_16_4, t.timely5_16_4, t.timely6_16_4],
5: [t.timely4_16_5, t.timely5_16_5, t.timely6_16_5],
6: [t.timely4_16_6, t.timely5_16_6, t.timely6_16_6],
7: [t.timely4_16_7, t.timely5_16_7, t.timely6_16_7]}
}
untimely_dict = {2: {4: [t.untimely4_2_4, t.untimely5_2_4, t.untimely6_2_4],
5: [t.untimely4_2_5, t.untimely5_2_5, t.untimely6_2_5],
6: [t.untimely4_2_6, t.untimely5_2_6, t.untimely6_2_6],
7: [t.untimely4_2_7, t.untimely5_2_7, t.untimely6_2_7]},
3: {4: [t.untimely4_3_4, t.untimely5_3_4, t.untimely6_3_4],
5: [t.untimely4_3_5, t.untimely5_3_5, t.untimely6_3_5],
6: [t.untimely4_3_6, t.untimely5_3_6, t.untimely6_3_6],
7: [t.untimely4_3_7, t.untimely5_3_7, t.untimely6_3_7]},
7: {4: [t.untimely4_7_4, t.untimely5_7_4, t.untimely6_7_4],
5: [t.untimely4_7_5, t.untimely5_7_5, t.untimely6_7_5],
6: [t.untimely4_7_6, t.untimely5_7_6, t.untimely6_7_6],
7: [t.untimely4_7_7, t.untimely5_7_7, t.untimely6_7_7]},
10: {4: [t.untimely4_10_4, t.untimely5_10_4, t.untimely6_10_4],
5: [t.untimely4_10_5, t.untimely5_10_5, t.untimely6_10_5],
6: [t.untimely4_10_6, t.untimely5_10_6, t.untimely6_10_6],
7: [t.untimely4_10_7, t.untimely5_10_7, t.untimely6_10_7]},
12: {4: [t.untimely4_12_4, t.untimely5_12_4, t.untimely6_12_4],
5: [t.untimely4_12_5, t.untimely5_12_5, t.untimely6_12_5],
6: [t.untimely4_12_6, t.untimely5_12_6, t.untimely6_12_6],
7: [t.untimely4_12_7, t.untimely5_12_7, t.untimely6_12_7]},
16: {4: [t.untimely4_16_4, t.untimely5_16_4, t.untimely6_16_4],
5: [t.untimely4_16_5, t.untimely5_16_5, t.untimely6_16_5],
6: [t.untimely4_16_6, t.untimely5_16_6, t.untimely6_16_6],
7: [t.untimely4_16_7, t.untimely5_16_7, t.untimely6_16_7]}
}
def sum_data(data):
result = {}
for algo in data:
for no in data[algo]:
if no in result:
result[no].append(sum(data[algo][no]))
else:
result[no] = [sum(data[algo][no])]
return result
def total_data():
data_tuple = format_data()
result = {}
for no in data_tuple[0]:
for item_id in range(len(data_tuple[0][no])):
if no in result:
result[no].append(data_tuple[0][no][item_id] + data_tuple[1][no][item_id])
else:
result[no] = [data_tuple[0][no][item_id] + data_tuple[1][no][item_id]]
print(result)
def format_data():
return sum_data(timely_dict), sum_data(untimely_dict)
def percent(value, total):
if value > 0:
return round((value / total) * 100, 2)
else:
return 0
def histogram(timely, untimely, ax, no):
ind = np.arange(len(timely))
p1 = ax.bar(ind, untimely, width, color='r', alpha=0.4)
p2 = ax.bar(ind, timely, width, color='g', bottom=untimely, alpha=0.4)
ax.set_xticks(ind)
ax.set_xticklabels(algo_dict.values())
for i in timely:
j = timely.index(i)
total = i + untimely[j]
ax.text(j, timely[j] + untimely[j], '{}%'.format(percent(i, total)), rotation=0,
ha="center", va="center", bbox=dict(boxstyle="round", ec=(0., 0., 0.), fc=(0.7, 0.9, 1.), ))
ax.text(j, untimely[j], '{}%'.format(percent(untimely[j], total)), rotation=0,
ha="center", va="center", bbox=dict(boxstyle="round", ec=(1., 0.5, 0.5), fc=(1., 0.8, 0.8), ))
ax.legend((p1[0], p2[0]), ('UP', 'TP'), prop={"size":16})
# ax.set_ylabel('\n'.join(wrap(f'Plot for {no} MECs', 8))).set_rotation(0)
ax.set_ylabel("No of Processes", fontsize=15)
for label in ax.get_xticklabels():
label.set_fontsize(16)
ax.xaxis.set_tick_params(labelsize=16)
#ax.set_ylabel('\n'.join(wrap(f'{no} MECs', 8)), rotation=0, fontsize=15, labelpad=30)
axx = ax.twinx()
axx.set_yticklabels([])
axx.set_yticks([])
axx.set_ylabel('\n'.join(wrap(f'{no} MECs', 8)), rotation=0, fontsize=15, labelpad=30)
def plot_av_times():
axes = {ax1: 4, ax2: 5, ax3: 6, ax4: 7}
_data = format_data()
# print(_data)
for i in axes:
histogram(_data[0][axes[i]], _data[1][axes[i]], i, axes[i])
#fig.suptitle('MEC Execution ratio Deadlock Experiment')
plt.show()
total_data()
plot_av_times()
| 44.878378
| 110
| 0.566546
|
9ab0cf2cce40e17130df315b1ca12a8d754a6d99
| 3,747
|
py
|
Python
|
qemu-4.2.0/scripts/tracetool/format/log_stap.py
|
MisaZhu/qemu_raspi
|
50d71ce87bb39470e6725f7428e4b6b9e1ed0359
|
[
"Apache-2.0"
] | null | null | null |
qemu-4.2.0/scripts/tracetool/format/log_stap.py
|
MisaZhu/qemu_raspi
|
50d71ce87bb39470e6725f7428e4b6b9e1ed0359
|
[
"Apache-2.0"
] | null | null | null |
qemu-4.2.0/scripts/tracetool/format/log_stap.py
|
MisaZhu/qemu_raspi
|
50d71ce87bb39470e6725f7428e4b6b9e1ed0359
|
[
"Apache-2.0"
] | 1
|
2020-05-25T09:49:33.000Z
|
2020-05-25T09:49:33.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Generate .stp file that printfs log messages (DTrace with SystemTAP only).
"""
__author__ = "Daniel P. Berrange <berrange@redhat.com>"
__copyright__ = "Copyright (C) 2014-2019, Red Hat, Inc."
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Daniel Berrange"
__email__ = "berrange@redhat.com"
import re
from tracetool import out
from tracetool.backend.dtrace import binary, probeprefix
from tracetool.backend.simple import is_string
from tracetool.format.stap import stap_escape
def global_var_name(name):
return probeprefix().replace(".", "_") + "_" + name
STATE_SKIP = 0
STATE_LITERAL = 1
STATE_MACRO = 2
def c_macro_to_format(macro):
if macro.startswith("PRI"):
return macro[3]
raise Exception("Unhandled macro '%s'" % macro)
def c_fmt_to_stap(fmt):
state = 0
bits = []
literal = ""
macro = ""
escape = 0;
for i in range(len(fmt)):
if fmt[i] == '\\':
if escape:
escape = 0
else:
escape = 1
if state != STATE_LITERAL:
raise Exception("Unexpected escape outside string literal")
literal = literal + fmt[i]
elif fmt[i] == '"' and not escape:
if state == STATE_LITERAL:
state = STATE_SKIP
bits.append(literal)
literal = ""
else:
if state == STATE_MACRO:
bits.append(c_macro_to_format(macro))
state = STATE_LITERAL
elif fmt[i] == ' ' or fmt[i] == '\t':
if state == STATE_MACRO:
bits.append(c_macro_to_format(macro))
macro = ""
state = STATE_SKIP
elif state == STATE_LITERAL:
literal = literal + fmt[i]
else:
escape = 0
if state == STATE_SKIP:
state = STATE_MACRO
if state == STATE_LITERAL:
literal = literal + fmt[i]
else:
macro = macro + fmt[i]
if state == STATE_MACRO:
bits.append(c_macro_to_format(macro))
elif state == STATE_LITERAL:
bits.append(literal)
fmt = re.sub("%(\d*)z(x|u|d)", "%\\1\\2", "".join(bits))
return fmt
def generate(events, backend, group):
out('/* This file is autogenerated by tracetool, do not edit. */',
'')
for event_id, e in enumerate(events):
if 'disable' in e.properties:
continue
out('probe %(probeprefix)s.log.%(name)s = %(probeprefix)s.%(name)s ?',
'{',
probeprefix=probeprefix(),
name=e.name)
# Get references to userspace strings
for type_, name in e.args:
name = stap_escape(name)
if is_string(type_):
out(' try {',
' arg%(name)s_str = %(name)s ? ' +
'user_string_n(%(name)s, 512) : "<null>"',
' } catch {}',
name=name)
# Determine systemtap's view of variable names
fields = ["pid()", "gettimeofday_ns()"]
for type_, name in e.args:
name = stap_escape(name)
if is_string(type_):
fields.append("arg" + name + "_str")
else:
fields.append(name)
# Emit the entire record in a single SystemTap printf()
arg_str = ', '.join(arg for arg in fields)
fmt_str = "%d@%d " + e.name + " " + c_fmt_to_stap(e.fmt) + "\\n"
out(' printf("%(fmt_str)s", %(arg_str)s)',
fmt_str=fmt_str, arg_str=arg_str)
out('}')
out()
| 29.976
| 78
| 0.526021
|
bec6966e7cb02cd8553d87ed49fded3fdc7ab65e
| 4,537
|
py
|
Python
|
tensorflow/python/autograph/utils/testing.py
|
EricLi404/tensorflow
|
23759800d89f7b5362c338d9a3fd72a6810c3e22
|
[
"Apache-2.0"
] | 27
|
2019-01-02T09:36:57.000Z
|
2022-02-21T06:41:51.000Z
|
tensorflow/python/autograph/utils/testing.py
|
EricLi404/tensorflow
|
23759800d89f7b5362c338d9a3fd72a6810c3e22
|
[
"Apache-2.0"
] | 3
|
2019-01-23T11:01:22.000Z
|
2022-02-24T02:53:31.000Z
|
tensorflow/python/autograph/utils/testing.py
|
EricLi404/tensorflow
|
23759800d89f7b5362c338d9a3fd72a6810c3e22
|
[
"Apache-2.0"
] | 11
|
2019-03-02T12:42:23.000Z
|
2021-02-04T12:20:10.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import types
import unittest
from tensorflow.python.eager import def_function
from tensorflow.python.framework import op_callbacks
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class AutoGraphTestCase(test.TestCase):
"""Tests specialized for AutoGraph, which run as tf.functions.
These tests use a staged programming-like approach: most of the test code runs
as-is inside a tf.function, but the assertions are lifted outside the
function, and run with the corresponding function values instead.
For example, the test:
def test_foo(self):
baz = bar();
self.assertEqual(baz, value)
is equivalent to writing:
def test_foo(self):
@tf.function
def test_fn():
baz = bar();
return baz, value
baz_actual, value_actual = test_fn()
self.assertEqual(baz_actual, value_actual)
Only assertions that require evaluation outside the function are lifted
outside the function scope. The rest execute inline, at function creation
time.
"""
def __new__(cls, *args):
obj = super().__new__(cls)
for name in cls.__dict__:
if not name.startswith(unittest.TestLoader.testMethodPrefix):
continue
m = getattr(obj, name)
if callable(m):
wrapper = obj._run_as_tf_function(m)
setattr(obj, name, types.MethodType(wrapper, obj))
return obj
def _op_callback(
self, op_type, inputs, attrs, outputs, op_name=None, graph=None):
self.trace_log.append(op_type)
def _run_as_tf_function(self, fn):
def wrapper(self):
@def_function.function(autograph=False) # Testing autograph itself.
def fn_wrapper():
self.assertions = []
self.graph_assertions = []
self.trace_log = []
fn()
targets = [args for _, args in self.assertions]
return targets
tensors = fn_wrapper()
for assertion in self.graph_assertions:
assertion(fn_wrapper.get_concrete_function().graph)
actuals = self.evaluate(tensors)
for (assertion, _), values in zip(self.assertions, actuals):
assertion(*values)
return wrapper
def variable(self, name, value, dtype):
with ops.init_scope():
if name not in self.variables:
self.variables[name] = variables.Variable(value, dtype=dtype)
self.evaluate(self.variables[name].initializer)
return self.variables[name]
def setUp(self):
super().setUp()
self.variables = {}
self.trace_log = []
op_callbacks.add_op_callback(self._op_callback)
def tearDown(self):
op_callbacks.remove_op_callback(self._op_callback)
self.trace_log = None
self.variables = None
super().tearDown()
def assertGraphContains(self, op_regex, n):
def assertion(graph):
matches = []
for node in graph.as_graph_def().node:
if re.match(op_regex, node.name):
matches.append(node)
for fn in graph.as_graph_def().library.function:
for node_def in fn.node_def:
if re.match(op_regex, node_def.name):
matches.append(node_def)
self.assertLen(matches, n)
self.graph_assertions.append(assertion)
def assertOpCreated(self, op_type):
self.assertIn(op_type, self.trace_log)
def assertOpsNotCreated(self, op_types):
self.assertEmpty(set(op_types) & set(self.trace_log))
def assertNoOpsCreated(self):
self.assertEmpty(self.trace_log)
def assertEqual(self, *args):
self.assertions.append((super().assertEqual, list(args)))
def assertDictEqual(self, *args):
self.assertions.append((super().assertDictEqual, list(args)))
| 30.655405
| 80
| 0.689222
|
d6cb2a0c182c362ec2b62830dca406624cffba3d
| 78
|
py
|
Python
|
agent/python/perper/application/__init__.py
|
aph5nt/perper
|
ca3d63e0d0dd21b950f4ba038f16fd8e3b1ab566
|
[
"MIT"
] | null | null | null |
agent/python/perper/application/__init__.py
|
aph5nt/perper
|
ca3d63e0d0dd21b950f4ba038f16fd8e3b1ab566
|
[
"MIT"
] | null | null | null |
agent/python/perper/application/__init__.py
|
aph5nt/perper
|
ca3d63e0d0dd21b950f4ba038f16fd8e3b1ab566
|
[
"MIT"
] | null | null | null |
from .startup import run, run_notebook
from .context import register_delegate
| 26
| 38
| 0.846154
|
ea3d33d995e67df3bd1f6e450123c15bf432870c
| 3,685
|
py
|
Python
|
plugins/BS440mqtt.py
|
meggiman/BS440
|
d9d195c983f1a12fed3d95e91a24638c2eddb04e
|
[
"MIT"
] | null | null | null |
plugins/BS440mqtt.py
|
meggiman/BS440
|
d9d195c983f1a12fed3d95e91a24638c2eddb04e
|
[
"MIT"
] | null | null | null |
plugins/BS440mqtt.py
|
meggiman/BS440
|
d9d195c983f1a12fed3d95e91a24638c2eddb04e
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# -----------------------------------------------------------------------------------------
# BS440 plugin BS440mqtt.py
# About:
# Send collected data via MQTT (e.g. to Home Assistant)
#
# The corresponding configuration for Home Assistant looks like:
# sensor:
# - platform: mqtt
# state_topic: "bs440/person1/"
# name: "Weight Person 1"
# unit_of_measurement: "kg"
# value_template: '{{ value_json.weight }}'
# - platform: mqtt
# state_topic: "bs440/person1/"
# name: "Body Water Person 1"
# unit_of_measurement: "%"
# value_template: '{{ value_json.tbw }}'
# - platform: mqtt
# state_topic: "bs440/person1/"
# name: "Body fat Person 1"
# unit_of_measurement: "%"
# value_template: '{{ value_json.fat }}'
# - platform: mqtt
# state_topic: "bs440/person1/"
# name: "Muscle Mass Person 1"
# unit_of_measurement: "%"
# value_template: '{{ value_json.muscle }}'
# - platform: mqtt
# state_topic: "bs440/person1/"
# name: "Bone Mass Person 1"
# unit_of_measurement: "kg"
# value_template: '{{ value_json.bone }}'
import logging
import os
import json
import ssl
from configparser import ConfigParser
import paho.mqtt.publish as publish
__author__ = 'jinnerbichler'
__email__ = "j.innerbichler@gmail.com"
__license__ = "EUPL-1.1"
__version__ = "0.0.1"
__status__ = "Development"
# ------------------------------------------------------------------------------------------
logger = logging.getLogger(__name__)
class Plugin:
def __init__(self):
""" Reads config file """
logger.info('Initialising plugin: ' + __name__)
# read ini file from same location as plugin resides, named [pluginname].ini
configfile = os.path.dirname(os.path.realpath(__file__)) + '/' + __name__ + '.ini'
plugin_config = ConfigParser()
plugin_config.read(configfile)
logger.info('Read config from: ' + configfile)
# create configuration arguments for MQTT client
mqtt_config = dict(plugin_config.items('MQTT'))
self.mqtt_args = {'client_id': mqtt_config['client_id'],
'hostname': mqtt_config['hostname'],
'port': int(mqtt_config['port']),
'retain': True}
tls = {}
if 'tls_cert' in mqtt_config:
tls['ca_certs'] = mqtt_config['tls_cert']
if 'tls_version' in mqtt_config:
tls['tls_version'] = ssl.__getattribute__(mqtt_config['tls_version'])
if len(tls) > 0:
self.mqtt_args['tls'] = tls
if 'username' in mqtt_config:
self.mqtt_args['auth'] = {'username': mqtt_config['username'],
'password': mqtt_config['password']}
publish.single(topic='bs440/init/', payload='BS440 initialised', **self.mqtt_args)
def execute(self, globalconfig, persondata, weightdata, bodydata):
""" Publishes weight and body data """
if not persondata or not weightdata or not bodydata:
logger.error('Invalid data...')
return
person_id = str(persondata[0]['person'])
# construct payload
model = globalconfig.get('Scale', 'device_model')
payload = dict(weightdata[0])
payload.update(bodydata[0])
payload.update(persondata[0])
payload['model'] = model
logger.info('Publishing data of person {}'.format(person_id))
publish.single(topic='bs440/person{}/'.format(person_id),
payload=json.dumps(payload),
**self.mqtt_args)
| 34.439252
| 92
| 0.573406
|
f311876acdeb3cd09cf748547850f28442d9d614
| 112,135
|
py
|
Python
|
lib-python/3/test/test_asyncio/test_tasks.py
|
hollmmax/zig
|
d80baa5a5fcbc82b3e2294b398edc20a98737a52
|
[
"MIT"
] | null | null | null |
lib-python/3/test/test_asyncio/test_tasks.py
|
hollmmax/zig
|
d80baa5a5fcbc82b3e2294b398edc20a98737a52
|
[
"MIT"
] | null | null | null |
lib-python/3/test/test_asyncio/test_tasks.py
|
hollmmax/zig
|
d80baa5a5fcbc82b3e2294b398edc20a98737a52
|
[
"MIT"
] | null | null | null |
"""Tests for tasks.py."""
import collections
import contextlib
import contextvars
import functools
import gc
import io
import random
import re
import sys
import types
import textwrap
import traceback
import unittest
import weakref
from unittest import mock
from types import GenericAlias
import asyncio
from asyncio import coroutines
from asyncio import futures
from asyncio import tasks
from test.test_asyncio import utils as test_utils
from test import support
from test.support.script_helper import assert_python_ok
def tearDownModule():
asyncio.set_event_loop_policy(None)
async def coroutine_function():
pass
@contextlib.contextmanager
def set_coroutine_debug(enabled):
coroutines = asyncio.coroutines
old_debug = coroutines._DEBUG
try:
coroutines._DEBUG = enabled
yield
finally:
coroutines._DEBUG = old_debug
def format_coroutine(qualname, state, src, source_traceback, generator=False):
if generator:
state = '%s' % state
else:
state = '%s, defined' % state
if source_traceback is not None:
frame = source_traceback[-1]
return ('coro=<%s() %s at %s> created at %s:%s'
% (qualname, state, src, frame[0], frame[1]))
else:
return 'coro=<%s() %s at %s>' % (qualname, state, src)
def get_innermost_context(exc):
"""
Return information about the innermost exception context in the chain.
"""
depth = 0
while True:
context = exc.__context__
if context is None:
break
exc = context
depth += 1
return (type(exc), exc.args, depth)
class Dummy:
def __repr__(self):
return '<Dummy>'
def __call__(self, *args):
pass
class CoroLikeObject:
def send(self, v):
raise StopIteration(42)
def throw(self, *exc):
pass
def close(self):
pass
def __await__(self):
return self
class BaseTaskTests:
Task = None
Future = None
def new_task(self, loop, coro, name='TestTask'):
return self.__class__.Task(coro, loop=loop, name=name)
def new_future(self, loop):
return self.__class__.Future(loop=loop)
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
self.loop.set_task_factory(self.new_task)
self.loop.create_future = lambda: self.new_future(self.loop)
def test_generic_alias(self):
task = self.__class__.Task[str]
self.assertEqual(task.__args__, (str,))
self.assertIsInstance(task, GenericAlias)
def test_task_cancel_message_getter(self):
async def coro():
pass
t = self.new_task(self.loop, coro())
self.assertTrue(hasattr(t, '_cancel_message'))
self.assertEqual(t._cancel_message, None)
t.cancel('my message')
self.assertEqual(t._cancel_message, 'my message')
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(t)
def test_task_cancel_message_setter(self):
async def coro():
pass
t = self.new_task(self.loop, coro())
t.cancel('my message')
t._cancel_message = 'my new message'
self.assertEqual(t._cancel_message, 'my new message')
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(t)
def test_task_del_collect(self):
class Evil:
def __del__(self):
gc.collect()
async def run():
return Evil()
self.loop.run_until_complete(
asyncio.gather(*[
self.new_task(self.loop, run()) for _ in range(100)
]))
def test_other_loop_future(self):
other_loop = asyncio.new_event_loop()
fut = self.new_future(other_loop)
async def run(fut):
await fut
try:
with self.assertRaisesRegex(RuntimeError,
r'Task .* got Future .* attached'):
self.loop.run_until_complete(run(fut))
finally:
other_loop.close()
def test_task_awaits_on_itself(self):
async def test():
await task
task = asyncio.ensure_future(test(), loop=self.loop)
with self.assertRaisesRegex(RuntimeError,
'Task cannot await on itself'):
self.loop.run_until_complete(task)
def test_task_class(self):
async def notmuch():
return 'ok'
t = self.new_task(self.loop, notmuch())
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'ok')
self.assertIs(t._loop, self.loop)
self.assertIs(t.get_loop(), self.loop)
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
t = self.new_task(loop, notmuch())
self.assertIs(t._loop, loop)
loop.run_until_complete(t)
loop.close()
def test_ensure_future_coroutine(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def notmuch():
return 'ok'
t = asyncio.ensure_future(notmuch(), loop=self.loop)
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'ok')
self.assertIs(t._loop, self.loop)
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
t = asyncio.ensure_future(notmuch(), loop=loop)
self.assertIs(t._loop, loop)
loop.run_until_complete(t)
loop.close()
def test_ensure_future_future(self):
f_orig = self.new_future(self.loop)
f_orig.set_result('ko')
f = asyncio.ensure_future(f_orig)
self.loop.run_until_complete(f)
self.assertTrue(f.done())
self.assertEqual(f.result(), 'ko')
self.assertIs(f, f_orig)
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
with self.assertRaises(ValueError):
f = asyncio.ensure_future(f_orig, loop=loop)
loop.close()
f = asyncio.ensure_future(f_orig, loop=self.loop)
self.assertIs(f, f_orig)
def test_ensure_future_task(self):
async def notmuch():
return 'ok'
t_orig = self.new_task(self.loop, notmuch())
t = asyncio.ensure_future(t_orig)
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'ok')
self.assertIs(t, t_orig)
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
with self.assertRaises(ValueError):
t = asyncio.ensure_future(t_orig, loop=loop)
loop.close()
t = asyncio.ensure_future(t_orig, loop=self.loop)
self.assertIs(t, t_orig)
def test_ensure_future_awaitable(self):
class Aw:
def __init__(self, coro):
self.coro = coro
def __await__(self):
return (yield from self.coro)
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro():
return 'ok'
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
fut = asyncio.ensure_future(Aw(coro()), loop=loop)
loop.run_until_complete(fut)
assert fut.result() == 'ok'
def test_ensure_future_neither(self):
with self.assertRaises(TypeError):
asyncio.ensure_future('ok')
def test_ensure_future_error_msg(self):
loop = asyncio.new_event_loop()
f = self.new_future(self.loop)
with self.assertRaisesRegex(ValueError, 'The future belongs to a '
'different loop than the one specified as '
'the loop argument'):
asyncio.ensure_future(f, loop=loop)
loop.close()
def test_get_stack(self):
T = None
async def foo():
await bar()
async def bar():
# test get_stack()
f = T.get_stack(limit=1)
try:
self.assertEqual(f[0].f_code.co_name, 'foo')
finally:
f = None
# test print_stack()
file = io.StringIO()
T.print_stack(limit=1, file=file)
file.seek(0)
tb = file.read()
self.assertRegex(tb, r'foo\(\) running')
async def runner():
nonlocal T
T = asyncio.ensure_future(foo(), loop=self.loop)
await T
self.loop.run_until_complete(runner())
def test_task_repr(self):
self.loop.set_debug(False)
async def notmuch():
return 'abc'
# test coroutine function
self.assertEqual(notmuch.__name__, 'notmuch')
self.assertRegex(notmuch.__qualname__,
r'\w+.test_task_repr.<locals>.notmuch')
self.assertEqual(notmuch.__module__, __name__)
filename, lineno = test_utils.get_function_source(notmuch)
src = "%s:%s" % (filename, lineno)
# test coroutine object
gen = notmuch()
coro_qualname = 'BaseTaskTests.test_task_repr.<locals>.notmuch'
self.assertEqual(gen.__name__, 'notmuch')
self.assertEqual(gen.__qualname__, coro_qualname)
# test pending Task
t = self.new_task(self.loop, gen)
t.add_done_callback(Dummy())
coro = format_coroutine(coro_qualname, 'running', src,
t._source_traceback, generator=True)
self.assertEqual(repr(t),
"<Task pending name='TestTask' %s cb=[<Dummy>()]>" % coro)
# test cancelling Task
t.cancel() # Does not take immediate effect!
self.assertEqual(repr(t),
"<Task cancelling name='TestTask' %s cb=[<Dummy>()]>" % coro)
# test cancelled Task
self.assertRaises(asyncio.CancelledError,
self.loop.run_until_complete, t)
coro = format_coroutine(coro_qualname, 'done', src,
t._source_traceback)
self.assertEqual(repr(t),
"<Task cancelled name='TestTask' %s>" % coro)
# test finished Task
t = self.new_task(self.loop, notmuch())
self.loop.run_until_complete(t)
coro = format_coroutine(coro_qualname, 'done', src,
t._source_traceback)
self.assertEqual(repr(t),
"<Task finished name='TestTask' %s result='abc'>" % coro)
def test_task_repr_autogenerated(self):
async def notmuch():
return 123
t1 = self.new_task(self.loop, notmuch(), None)
t2 = self.new_task(self.loop, notmuch(), None)
self.assertNotEqual(repr(t1), repr(t2))
match1 = re.match(r"^<Task pending name='Task-(\d+)'", repr(t1))
self.assertIsNotNone(match1)
match2 = re.match(r"^<Task pending name='Task-(\d+)'", repr(t2))
self.assertIsNotNone(match2)
# Autogenerated task names should have monotonically increasing numbers
self.assertLess(int(match1.group(1)), int(match2.group(1)))
self.loop.run_until_complete(t1)
self.loop.run_until_complete(t2)
def test_task_repr_name_not_str(self):
async def notmuch():
return 123
t = self.new_task(self.loop, notmuch())
t.set_name({6})
self.assertEqual(t.get_name(), '{6}')
self.loop.run_until_complete(t)
def test_task_repr_coro_decorator(self):
self.loop.set_debug(False)
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def notmuch():
# notmuch() function doesn't use yield from: it will be wrapped by
# @coroutine decorator
return 123
# test coroutine function
self.assertEqual(notmuch.__name__, 'notmuch')
self.assertRegex(notmuch.__qualname__,
r'\w+.test_task_repr_coro_decorator'
r'\.<locals>\.notmuch')
self.assertEqual(notmuch.__module__, __name__)
# test coroutine object
gen = notmuch()
# On Python >= 3.5, generators now inherit the name of the
# function, as expected, and have a qualified name (__qualname__
# attribute).
coro_name = 'notmuch'
coro_qualname = ('BaseTaskTests.test_task_repr_coro_decorator'
'.<locals>.notmuch')
self.assertEqual(gen.__name__, coro_name)
self.assertEqual(gen.__qualname__, coro_qualname)
# test repr(CoroWrapper)
if coroutines._DEBUG:
# format the coroutine object
if coroutines._DEBUG:
filename, lineno = test_utils.get_function_source(notmuch)
frame = gen._source_traceback[-1]
coro = ('%s() running, defined at %s:%s, created at %s:%s'
% (coro_qualname, filename, lineno,
frame[0], frame[1]))
else:
code = gen.gi_code
coro = ('%s() running at %s:%s'
% (coro_qualname, code.co_filename,
code.co_firstlineno))
self.assertEqual(repr(gen), '<CoroWrapper %s>' % coro)
# test pending Task
t = self.new_task(self.loop, gen)
t.add_done_callback(Dummy())
# format the coroutine object
if coroutines._DEBUG:
src = '%s:%s' % test_utils.get_function_source(notmuch)
else:
code = gen.gi_code
src = '%s:%s' % (code.co_filename, code.co_firstlineno)
coro = format_coroutine(coro_qualname, 'running', src,
t._source_traceback,
generator=not coroutines._DEBUG)
self.assertEqual(repr(t),
"<Task pending name='TestTask' %s cb=[<Dummy>()]>" % coro)
self.loop.run_until_complete(t)
def test_task_repr_wait_for(self):
self.loop.set_debug(False)
async def wait_for(fut):
return await fut
fut = self.new_future(self.loop)
task = self.new_task(self.loop, wait_for(fut))
test_utils.run_briefly(self.loop)
self.assertRegex(repr(task),
'<Task .* wait_for=%s>' % re.escape(repr(fut)))
fut.set_result(None)
self.loop.run_until_complete(task)
def test_task_repr_partial_corowrapper(self):
# Issue #222: repr(CoroWrapper) must not fail in debug mode if the
# coroutine is a partial function
with set_coroutine_debug(True):
self.loop.set_debug(True)
async def func(x, y):
await asyncio.sleep(0)
with self.assertWarns(DeprecationWarning):
partial_func = asyncio.coroutine(functools.partial(func, 1))
task = self.loop.create_task(partial_func(2))
# make warnings quiet
task._log_destroy_pending = False
self.addCleanup(task._coro.close)
coro_repr = repr(task._coro)
expected = (
r'<coroutine object \w+\.test_task_repr_partial_corowrapper'
r'\.<locals>\.func at'
)
self.assertRegex(coro_repr, expected)
def test_task_basics(self):
async def outer():
a = await inner1()
b = await inner2()
return a+b
async def inner1():
return 42
async def inner2():
return 1000
t = outer()
self.assertEqual(self.loop.run_until_complete(t), 1042)
def test_exception_chaining_after_await(self):
# Test that when awaiting on a task when an exception is already
# active, if the task raises an exception it will be chained
# with the original.
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def raise_error():
raise ValueError
async def run():
try:
raise KeyError(3)
except Exception as exc:
task = self.new_task(loop, raise_error())
try:
await task
except Exception as exc:
self.assertEqual(type(exc), ValueError)
chained = exc.__context__
self.assertEqual((type(chained), chained.args),
(KeyError, (3,)))
try:
task = self.new_task(loop, run())
loop.run_until_complete(task)
finally:
loop.close()
def test_exception_chaining_after_await_with_context_cycle(self):
# Check trying to create an exception context cycle:
# https://bugs.python.org/issue40696
has_cycle = None
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def process_exc(exc):
raise exc
async def run():
nonlocal has_cycle
try:
raise KeyError('a')
except Exception as exc:
task = self.new_task(loop, process_exc(exc))
try:
await task
except BaseException as exc:
has_cycle = (exc is exc.__context__)
# Prevent a hang if has_cycle is True.
exc.__context__ = None
try:
task = self.new_task(loop, run())
loop.run_until_complete(task)
finally:
loop.close()
# This also distinguishes from the initial has_cycle=None.
self.assertEqual(has_cycle, False)
def test_cancel(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
yield 0
loop = self.new_test_loop(gen)
async def task():
await asyncio.sleep(10.0)
return 12
t = self.new_task(loop, task())
loop.call_soon(t.cancel)
with self.assertRaises(asyncio.CancelledError):
loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertTrue(t.cancelled())
self.assertFalse(t.cancel())
def test_cancel_with_message_then_future_result(self):
# Test Future.result() after calling cancel() with a message.
cases = [
((), ()),
((None,), ()),
(('my message',), ('my message',)),
# Non-string values should roundtrip.
((5,), (5,)),
]
for cancel_args, expected_args in cases:
with self.subTest(cancel_args=cancel_args):
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def sleep():
await asyncio.sleep(10)
async def coro():
task = self.new_task(loop, sleep())
await asyncio.sleep(0)
task.cancel(*cancel_args)
done, pending = await asyncio.wait([task])
task.result()
task = self.new_task(loop, coro())
with self.assertRaises(asyncio.CancelledError) as cm:
loop.run_until_complete(task)
exc = cm.exception
self.assertEqual(exc.args, ())
actual = get_innermost_context(exc)
self.assertEqual(actual,
(asyncio.CancelledError, expected_args, 2))
def test_cancel_with_message_then_future_exception(self):
# Test Future.exception() after calling cancel() with a message.
cases = [
((), ()),
((None,), ()),
(('my message',), ('my message',)),
# Non-string values should roundtrip.
((5,), (5,)),
]
for cancel_args, expected_args in cases:
with self.subTest(cancel_args=cancel_args):
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def sleep():
await asyncio.sleep(10)
async def coro():
task = self.new_task(loop, sleep())
await asyncio.sleep(0)
task.cancel(*cancel_args)
done, pending = await asyncio.wait([task])
task.exception()
task = self.new_task(loop, coro())
with self.assertRaises(asyncio.CancelledError) as cm:
loop.run_until_complete(task)
exc = cm.exception
self.assertEqual(exc.args, ())
actual = get_innermost_context(exc)
self.assertEqual(actual,
(asyncio.CancelledError, expected_args, 2))
def test_cancel_with_message_before_starting_task(self):
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def sleep():
await asyncio.sleep(10)
async def coro():
task = self.new_task(loop, sleep())
# We deliberately leave out the sleep here.
task.cancel('my message')
done, pending = await asyncio.wait([task])
task.exception()
task = self.new_task(loop, coro())
with self.assertRaises(asyncio.CancelledError) as cm:
loop.run_until_complete(task)
exc = cm.exception
self.assertEqual(exc.args, ())
actual = get_innermost_context(exc)
self.assertEqual(actual,
(asyncio.CancelledError, ('my message',), 2))
def test_cancel_yield(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def task():
yield
yield
return 12
t = self.new_task(self.loop, task())
test_utils.run_briefly(self.loop) # start coro
t.cancel()
self.assertRaises(
asyncio.CancelledError, self.loop.run_until_complete, t)
self.assertTrue(t.done())
self.assertTrue(t.cancelled())
self.assertFalse(t.cancel())
def test_cancel_inner_future(self):
f = self.new_future(self.loop)
async def task():
await f
return 12
t = self.new_task(self.loop, task())
test_utils.run_briefly(self.loop) # start task
f.cancel()
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(t)
self.assertTrue(f.cancelled())
self.assertTrue(t.cancelled())
def test_cancel_both_task_and_inner_future(self):
f = self.new_future(self.loop)
async def task():
await f
return 12
t = self.new_task(self.loop, task())
test_utils.run_briefly(self.loop)
f.cancel()
t.cancel()
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertTrue(f.cancelled())
self.assertTrue(t.cancelled())
def test_cancel_task_catching(self):
fut1 = self.new_future(self.loop)
fut2 = self.new_future(self.loop)
async def task():
await fut1
try:
await fut2
except asyncio.CancelledError:
return 42
t = self.new_task(self.loop, task())
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut1) # White-box test.
fut1.set_result(None)
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut2) # White-box test.
t.cancel()
self.assertTrue(fut2.cancelled())
res = self.loop.run_until_complete(t)
self.assertEqual(res, 42)
self.assertFalse(t.cancelled())
def test_cancel_task_ignoring(self):
fut1 = self.new_future(self.loop)
fut2 = self.new_future(self.loop)
fut3 = self.new_future(self.loop)
async def task():
await fut1
try:
await fut2
except asyncio.CancelledError:
pass
res = await fut3
return res
t = self.new_task(self.loop, task())
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut1) # White-box test.
fut1.set_result(None)
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut2) # White-box test.
t.cancel()
self.assertTrue(fut2.cancelled())
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut3) # White-box test.
fut3.set_result(42)
res = self.loop.run_until_complete(t)
self.assertEqual(res, 42)
self.assertFalse(fut3.cancelled())
self.assertFalse(t.cancelled())
def test_cancel_current_task(self):
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def task():
t.cancel()
self.assertTrue(t._must_cancel) # White-box test.
# The sleep should be cancelled immediately.
await asyncio.sleep(100)
return 12
t = self.new_task(loop, task())
self.assertFalse(t.cancelled())
self.assertRaises(
asyncio.CancelledError, loop.run_until_complete, t)
self.assertTrue(t.done())
self.assertTrue(t.cancelled())
self.assertFalse(t._must_cancel) # White-box test.
self.assertFalse(t.cancel())
def test_cancel_at_end(self):
"""coroutine end right after task is cancelled"""
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def task():
t.cancel()
self.assertTrue(t._must_cancel) # White-box test.
return 12
t = self.new_task(loop, task())
self.assertFalse(t.cancelled())
self.assertRaises(
asyncio.CancelledError, loop.run_until_complete, t)
self.assertTrue(t.done())
self.assertTrue(t.cancelled())
self.assertFalse(t._must_cancel) # White-box test.
self.assertFalse(t.cancel())
def test_cancel_awaited_task(self):
# This tests for a relatively rare condition when
# a task cancellation is requested for a task which is not
# currently blocked, such as a task cancelling itself.
# In this situation we must ensure that whatever next future
# or task the cancelled task blocks on is cancelled correctly
# as well. See also bpo-34872.
loop = asyncio.new_event_loop()
self.addCleanup(lambda: loop.close())
task = nested_task = None
fut = self.new_future(loop)
async def nested():
await fut
async def coro():
nonlocal nested_task
# Create a sub-task and wait for it to run.
nested_task = self.new_task(loop, nested())
await asyncio.sleep(0)
# Request the current task to be cancelled.
task.cancel()
# Block on the nested task, which should be immediately
# cancelled.
await nested_task
task = self.new_task(loop, coro())
with self.assertRaises(asyncio.CancelledError):
loop.run_until_complete(task)
self.assertTrue(task.cancelled())
self.assertTrue(nested_task.cancelled())
self.assertTrue(fut.cancelled())
def assert_text_contains(self, text, substr):
if substr not in text:
raise RuntimeError(f'text {substr!r} not found in:\n>>>{text}<<<')
def test_cancel_traceback_for_future_result(self):
# When calling Future.result() on a cancelled task, check that the
# line of code that was interrupted is included in the traceback.
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def nested():
# This will get cancelled immediately.
await asyncio.sleep(10)
async def coro():
task = self.new_task(loop, nested())
await asyncio.sleep(0)
task.cancel()
await task # search target
task = self.new_task(loop, coro())
try:
loop.run_until_complete(task)
except asyncio.CancelledError:
tb = traceback.format_exc()
self.assert_text_contains(tb, "await asyncio.sleep(10)")
# The intermediate await should also be included.
self.assert_text_contains(tb, "await task # search target")
else:
self.fail('CancelledError did not occur')
def test_cancel_traceback_for_future_exception(self):
# When calling Future.exception() on a cancelled task, check that the
# line of code that was interrupted is included in the traceback.
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def nested():
# This will get cancelled immediately.
await asyncio.sleep(10)
async def coro():
task = self.new_task(loop, nested())
await asyncio.sleep(0)
task.cancel()
done, pending = await asyncio.wait([task])
task.exception() # search target
task = self.new_task(loop, coro())
try:
loop.run_until_complete(task)
except asyncio.CancelledError:
tb = traceback.format_exc()
self.assert_text_contains(tb, "await asyncio.sleep(10)")
# The intermediate await should also be included.
self.assert_text_contains(tb,
"task.exception() # search target")
else:
self.fail('CancelledError did not occur')
def test_stop_while_run_in_complete(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0.1
self.assertAlmostEqual(0.2, when)
when = yield 0.1
self.assertAlmostEqual(0.3, when)
yield 0.1
loop = self.new_test_loop(gen)
x = 0
async def task():
nonlocal x
while x < 10:
await asyncio.sleep(0.1)
x += 1
if x == 2:
loop.stop()
t = self.new_task(loop, task())
with self.assertRaises(RuntimeError) as cm:
loop.run_until_complete(t)
self.assertEqual(str(cm.exception),
'Event loop stopped before Future completed.')
self.assertFalse(t.done())
self.assertEqual(x, 2)
self.assertAlmostEqual(0.3, loop.time())
t.cancel()
self.assertRaises(asyncio.CancelledError, loop.run_until_complete, t)
def test_log_traceback(self):
async def coro():
pass
task = self.new_task(self.loop, coro())
with self.assertRaisesRegex(ValueError, 'can only be set to False'):
task._log_traceback = True
self.loop.run_until_complete(task)
def test_wait(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(0.15, when)
yield 0.15
loop = self.new_test_loop(gen)
a = self.new_task(loop, asyncio.sleep(0.1))
b = self.new_task(loop, asyncio.sleep(0.15))
async def foo():
done, pending = await asyncio.wait([b, a])
self.assertEqual(done, set([a, b]))
self.assertEqual(pending, set())
return 42
res = loop.run_until_complete(self.new_task(loop, foo()))
self.assertEqual(res, 42)
self.assertAlmostEqual(0.15, loop.time())
# Doing it again should take no time and exercise a different path.
res = loop.run_until_complete(self.new_task(loop, foo()))
self.assertAlmostEqual(0.15, loop.time())
self.assertEqual(res, 42)
def test_wait_with_global_loop(self):
def gen():
when = yield
self.assertAlmostEqual(0.01, when)
when = yield 0
self.assertAlmostEqual(0.015, when)
yield 0.015
loop = self.new_test_loop(gen)
a = self.new_task(loop, asyncio.sleep(0.01))
b = self.new_task(loop, asyncio.sleep(0.015))
async def foo():
done, pending = await asyncio.wait([b, a])
self.assertEqual(done, set([a, b]))
self.assertEqual(pending, set())
return 42
asyncio.set_event_loop(loop)
res = loop.run_until_complete(
self.new_task(loop, foo()))
self.assertEqual(res, 42)
def test_wait_duplicate_coroutines(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro(s):
return s
c = coro('test')
task = self.new_task(
self.loop,
asyncio.wait([c, c, coro('spam')]))
with self.assertWarns(DeprecationWarning):
done, pending = self.loop.run_until_complete(task)
self.assertFalse(pending)
self.assertEqual(set(f.result() for f in done), {'test', 'spam'})
def test_wait_errors(self):
self.assertRaises(
ValueError, self.loop.run_until_complete,
asyncio.wait(set()))
# -1 is an invalid return_when value
sleep_coro = asyncio.sleep(10.0)
wait_coro = asyncio.wait([sleep_coro], return_when=-1)
self.assertRaises(ValueError,
self.loop.run_until_complete, wait_coro)
sleep_coro.close()
def test_wait_first_completed(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
when = yield 0
self.assertAlmostEqual(0.1, when)
yield 0.1
loop = self.new_test_loop(gen)
a = self.new_task(loop, asyncio.sleep(10.0))
b = self.new_task(loop, asyncio.sleep(0.1))
task = self.new_task(
loop,
asyncio.wait([b, a], return_when=asyncio.FIRST_COMPLETED))
done, pending = loop.run_until_complete(task)
self.assertEqual({b}, done)
self.assertEqual({a}, pending)
self.assertFalse(a.done())
self.assertTrue(b.done())
self.assertIsNone(b.result())
self.assertAlmostEqual(0.1, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b]))
def test_wait_really_done(self):
# there is possibility that some tasks in the pending list
# became done but their callbacks haven't all been called yet
async def coro1():
await asyncio.sleep(0)
async def coro2():
await asyncio.sleep(0)
await asyncio.sleep(0)
a = self.new_task(self.loop, coro1())
b = self.new_task(self.loop, coro2())
task = self.new_task(
self.loop,
asyncio.wait([b, a], return_when=asyncio.FIRST_COMPLETED))
done, pending = self.loop.run_until_complete(task)
self.assertEqual({a, b}, done)
self.assertTrue(a.done())
self.assertIsNone(a.result())
self.assertTrue(b.done())
self.assertIsNone(b.result())
def test_wait_first_exception(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
yield 0
loop = self.new_test_loop(gen)
# first_exception, task already has exception
a = self.new_task(loop, asyncio.sleep(10.0))
async def exc():
raise ZeroDivisionError('err')
b = self.new_task(loop, exc())
task = self.new_task(
loop,
asyncio.wait([b, a], return_when=asyncio.FIRST_EXCEPTION))
done, pending = loop.run_until_complete(task)
self.assertEqual({b}, done)
self.assertEqual({a}, pending)
self.assertAlmostEqual(0, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b]))
def test_wait_first_exception_in_wait(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
when = yield 0
self.assertAlmostEqual(0.01, when)
yield 0.01
loop = self.new_test_loop(gen)
# first_exception, exception during waiting
a = self.new_task(loop, asyncio.sleep(10.0))
async def exc():
await asyncio.sleep(0.01)
raise ZeroDivisionError('err')
b = self.new_task(loop, exc())
task = asyncio.wait([b, a], return_when=asyncio.FIRST_EXCEPTION)
done, pending = loop.run_until_complete(task)
self.assertEqual({b}, done)
self.assertEqual({a}, pending)
self.assertAlmostEqual(0.01, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b]))
def test_wait_with_exception(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(0.15, when)
yield 0.15
loop = self.new_test_loop(gen)
a = self.new_task(loop, asyncio.sleep(0.1))
async def sleeper():
await asyncio.sleep(0.15)
raise ZeroDivisionError('really')
b = self.new_task(loop, sleeper())
async def foo():
done, pending = await asyncio.wait([b, a])
self.assertEqual(len(done), 2)
self.assertEqual(pending, set())
errors = set(f for f in done if f.exception() is not None)
self.assertEqual(len(errors), 1)
loop.run_until_complete(self.new_task(loop, foo()))
self.assertAlmostEqual(0.15, loop.time())
loop.run_until_complete(self.new_task(loop, foo()))
self.assertAlmostEqual(0.15, loop.time())
def test_wait_with_timeout(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(0.15, when)
when = yield 0
self.assertAlmostEqual(0.11, when)
yield 0.11
loop = self.new_test_loop(gen)
a = self.new_task(loop, asyncio.sleep(0.1))
b = self.new_task(loop, asyncio.sleep(0.15))
async def foo():
done, pending = await asyncio.wait([b, a], timeout=0.11)
self.assertEqual(done, set([a]))
self.assertEqual(pending, set([b]))
loop.run_until_complete(self.new_task(loop, foo()))
self.assertAlmostEqual(0.11, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b]))
def test_wait_concurrent_complete(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(0.15, when)
when = yield 0
self.assertAlmostEqual(0.1, when)
yield 0.1
loop = self.new_test_loop(gen)
a = self.new_task(loop, asyncio.sleep(0.1))
b = self.new_task(loop, asyncio.sleep(0.15))
done, pending = loop.run_until_complete(
asyncio.wait([b, a], timeout=0.1))
self.assertEqual(done, set([a]))
self.assertEqual(pending, set([b]))
self.assertAlmostEqual(0.1, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b]))
def test_wait_with_iterator_of_tasks(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(0.15, when)
yield 0.15
loop = self.new_test_loop(gen)
a = self.new_task(loop, asyncio.sleep(0.1))
b = self.new_task(loop, asyncio.sleep(0.15))
async def foo():
done, pending = await asyncio.wait(iter([b, a]))
self.assertEqual(done, set([a, b]))
self.assertEqual(pending, set())
return 42
res = loop.run_until_complete(self.new_task(loop, foo()))
self.assertEqual(res, 42)
self.assertAlmostEqual(0.15, loop.time())
def test_as_completed(self):
def gen():
yield 0
yield 0
yield 0.01
yield 0
loop = self.new_test_loop(gen)
# disable "slow callback" warning
loop.slow_callback_duration = 1.0
completed = set()
time_shifted = False
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def sleeper(dt, x):
nonlocal time_shifted
yield from asyncio.sleep(dt)
completed.add(x)
if not time_shifted and 'a' in completed and 'b' in completed:
time_shifted = True
loop.advance_time(0.14)
return x
a = sleeper(0.01, 'a')
b = sleeper(0.01, 'b')
c = sleeper(0.15, 'c')
async def foo():
values = []
for f in asyncio.as_completed([b, c, a], loop=loop):
values.append(await f)
return values
with self.assertWarns(DeprecationWarning) as w:
res = loop.run_until_complete(self.new_task(loop, foo()))
self.assertEqual(w.warnings[0].filename, __file__)
self.assertAlmostEqual(0.15, loop.time())
self.assertTrue('a' in res[:2])
self.assertTrue('b' in res[:2])
self.assertEqual(res[2], 'c')
# Doing it again should take no time and exercise a different path.
with self.assertWarns(DeprecationWarning):
res = loop.run_until_complete(self.new_task(loop, foo()))
self.assertAlmostEqual(0.15, loop.time())
def test_as_completed_with_timeout(self):
def gen():
yield
yield 0
yield 0
yield 0.1
loop = self.new_test_loop(gen)
a = loop.create_task(asyncio.sleep(0.1, 'a'))
b = loop.create_task(asyncio.sleep(0.15, 'b'))
async def foo():
values = []
for f in asyncio.as_completed([a, b], timeout=0.12, loop=loop):
if values:
loop.advance_time(0.02)
try:
v = await f
values.append((1, v))
except asyncio.TimeoutError as exc:
values.append((2, exc))
return values
with self.assertWarns(DeprecationWarning):
res = loop.run_until_complete(self.new_task(loop, foo()))
self.assertEqual(len(res), 2, res)
self.assertEqual(res[0], (1, 'a'))
self.assertEqual(res[1][0], 2)
self.assertIsInstance(res[1][1], asyncio.TimeoutError)
self.assertAlmostEqual(0.12, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b]))
def test_as_completed_with_unused_timeout(self):
def gen():
yield
yield 0
yield 0.01
loop = self.new_test_loop(gen)
a = asyncio.sleep(0.01, 'a')
async def foo():
for f in asyncio.as_completed([a], timeout=1, loop=loop):
v = await f
self.assertEqual(v, 'a')
with self.assertWarns(DeprecationWarning):
loop.run_until_complete(self.new_task(loop, foo()))
def test_as_completed_reverse_wait(self):
def gen():
yield 0
yield 0.05
yield 0
loop = self.new_test_loop(gen)
a = asyncio.sleep(0.05, 'a')
b = asyncio.sleep(0.10, 'b')
fs = {a, b}
with self.assertWarns(DeprecationWarning):
futs = list(asyncio.as_completed(fs, loop=loop))
self.assertEqual(len(futs), 2)
x = loop.run_until_complete(futs[1])
self.assertEqual(x, 'a')
self.assertAlmostEqual(0.05, loop.time())
loop.advance_time(0.05)
y = loop.run_until_complete(futs[0])
self.assertEqual(y, 'b')
self.assertAlmostEqual(0.10, loop.time())
def test_as_completed_concurrent(self):
def gen():
when = yield
self.assertAlmostEqual(0.05, when)
when = yield 0
self.assertAlmostEqual(0.05, when)
yield 0.05
loop = self.new_test_loop(gen)
a = asyncio.sleep(0.05, 'a')
b = asyncio.sleep(0.05, 'b')
fs = {a, b}
with self.assertWarns(DeprecationWarning):
futs = list(asyncio.as_completed(fs, loop=loop))
self.assertEqual(len(futs), 2)
waiter = asyncio.wait(futs)
# Deprecation from passing coros in futs to asyncio.wait()
with self.assertWarns(DeprecationWarning):
done, pending = loop.run_until_complete(waiter)
self.assertEqual(set(f.result() for f in done), {'a', 'b'})
def test_as_completed_duplicate_coroutines(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro(s):
return s
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def runner():
result = []
c = coro('ham')
for f in asyncio.as_completed([c, c, coro('spam')],
loop=self.loop):
result.append((yield from f))
return result
with self.assertWarns(DeprecationWarning):
fut = self.new_task(self.loop, runner())
self.loop.run_until_complete(fut)
result = fut.result()
self.assertEqual(set(result), {'ham', 'spam'})
self.assertEqual(len(result), 2)
def test_sleep(self):
def gen():
when = yield
self.assertAlmostEqual(0.05, when)
when = yield 0.05
self.assertAlmostEqual(0.1, when)
yield 0.05
loop = self.new_test_loop(gen)
async def sleeper(dt, arg):
await asyncio.sleep(dt/2)
res = await asyncio.sleep(dt/2, arg)
return res
t = self.new_task(loop, sleeper(0.1, 'yeah'))
loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'yeah')
self.assertAlmostEqual(0.1, loop.time())
def test_sleep_cancel(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
yield 0
loop = self.new_test_loop(gen)
t = self.new_task(loop, asyncio.sleep(10.0, 'yeah'))
handle = None
orig_call_later = loop.call_later
def call_later(delay, callback, *args):
nonlocal handle
handle = orig_call_later(delay, callback, *args)
return handle
loop.call_later = call_later
test_utils.run_briefly(loop)
self.assertFalse(handle._cancelled)
t.cancel()
test_utils.run_briefly(loop)
self.assertTrue(handle._cancelled)
def test_task_cancel_sleeping_task(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(5000, when)
yield 0.1
loop = self.new_test_loop(gen)
async def sleep(dt):
await asyncio.sleep(dt)
async def doit():
sleeper = self.new_task(loop, sleep(5000))
loop.call_later(0.1, sleeper.cancel)
try:
await sleeper
except asyncio.CancelledError:
return 'cancelled'
else:
return 'slept in'
doer = doit()
self.assertEqual(loop.run_until_complete(doer), 'cancelled')
self.assertAlmostEqual(0.1, loop.time())
def test_task_cancel_waiter_future(self):
fut = self.new_future(self.loop)
async def coro():
await fut
task = self.new_task(self.loop, coro())
test_utils.run_briefly(self.loop)
self.assertIs(task._fut_waiter, fut)
task.cancel()
test_utils.run_briefly(self.loop)
self.assertRaises(
asyncio.CancelledError, self.loop.run_until_complete, task)
self.assertIsNone(task._fut_waiter)
self.assertTrue(fut.cancelled())
def test_task_set_methods(self):
async def notmuch():
return 'ko'
gen = notmuch()
task = self.new_task(self.loop, gen)
with self.assertRaisesRegex(RuntimeError, 'not support set_result'):
task.set_result('ok')
with self.assertRaisesRegex(RuntimeError, 'not support set_exception'):
task.set_exception(ValueError())
self.assertEqual(
self.loop.run_until_complete(task),
'ko')
def test_step_result(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def notmuch():
yield None
yield 1
return 'ko'
self.assertRaises(
RuntimeError, self.loop.run_until_complete, notmuch())
def test_step_result_future(self):
# If coroutine returns future, task waits on this future.
class Fut(asyncio.Future):
def __init__(self, *args, **kwds):
self.cb_added = False
super().__init__(*args, **kwds)
def add_done_callback(self, *args, **kwargs):
self.cb_added = True
super().add_done_callback(*args, **kwargs)
fut = Fut(loop=self.loop)
result = None
async def wait_for_future():
nonlocal result
result = await fut
t = self.new_task(self.loop, wait_for_future())
test_utils.run_briefly(self.loop)
self.assertTrue(fut.cb_added)
res = object()
fut.set_result(res)
test_utils.run_briefly(self.loop)
self.assertIs(res, result)
self.assertTrue(t.done())
self.assertIsNone(t.result())
def test_baseexception_during_cancel(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
yield 0
loop = self.new_test_loop(gen)
async def sleeper():
await asyncio.sleep(10)
base_exc = SystemExit()
async def notmutch():
try:
await sleeper()
except asyncio.CancelledError:
raise base_exc
task = self.new_task(loop, notmutch())
test_utils.run_briefly(loop)
task.cancel()
self.assertFalse(task.done())
self.assertRaises(SystemExit, test_utils.run_briefly, loop)
self.assertTrue(task.done())
self.assertFalse(task.cancelled())
self.assertIs(task.exception(), base_exc)
def test_iscoroutinefunction(self):
def fn():
pass
self.assertFalse(asyncio.iscoroutinefunction(fn))
def fn1():
yield
self.assertFalse(asyncio.iscoroutinefunction(fn1))
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def fn2():
yield
self.assertTrue(asyncio.iscoroutinefunction(fn2))
self.assertFalse(asyncio.iscoroutinefunction(mock.Mock()))
def test_yield_vs_yield_from(self):
fut = self.new_future(self.loop)
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def wait_for_future():
yield fut
task = wait_for_future()
with self.assertRaises(RuntimeError):
self.loop.run_until_complete(task)
self.assertFalse(fut.done())
def test_yield_vs_yield_from_generator(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro():
yield
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def wait_for_future():
gen = coro()
try:
yield gen
finally:
gen.close()
task = wait_for_future()
self.assertRaises(
RuntimeError,
self.loop.run_until_complete, task)
def test_coroutine_non_gen_function(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def func():
return 'test'
self.assertTrue(asyncio.iscoroutinefunction(func))
coro = func()
self.assertTrue(asyncio.iscoroutine(coro))
res = self.loop.run_until_complete(coro)
self.assertEqual(res, 'test')
def test_coroutine_non_gen_function_return_future(self):
fut = self.new_future(self.loop)
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def func():
return fut
async def coro():
fut.set_result('test')
t1 = self.new_task(self.loop, func())
t2 = self.new_task(self.loop, coro())
res = self.loop.run_until_complete(t1)
self.assertEqual(res, 'test')
self.assertIsNone(t2.result())
def test_current_task(self):
self.assertIsNone(asyncio.current_task(loop=self.loop))
async def coro(loop):
self.assertIs(asyncio.current_task(loop=loop), task)
self.assertIs(asyncio.current_task(None), task)
self.assertIs(asyncio.current_task(), task)
task = self.new_task(self.loop, coro(self.loop))
self.loop.run_until_complete(task)
self.assertIsNone(asyncio.current_task(loop=self.loop))
def test_current_task_with_interleaving_tasks(self):
self.assertIsNone(asyncio.current_task(loop=self.loop))
fut1 = self.new_future(self.loop)
fut2 = self.new_future(self.loop)
async def coro1(loop):
self.assertTrue(asyncio.current_task(loop=loop) is task1)
await fut1
self.assertTrue(asyncio.current_task(loop=loop) is task1)
fut2.set_result(True)
async def coro2(loop):
self.assertTrue(asyncio.current_task(loop=loop) is task2)
fut1.set_result(True)
await fut2
self.assertTrue(asyncio.current_task(loop=loop) is task2)
task1 = self.new_task(self.loop, coro1(self.loop))
task2 = self.new_task(self.loop, coro2(self.loop))
self.loop.run_until_complete(asyncio.wait((task1, task2)))
self.assertIsNone(asyncio.current_task(loop=self.loop))
# Some thorough tests for cancellation propagation through
# coroutines, tasks and wait().
def test_yield_future_passes_cancel(self):
# Cancelling outer() cancels inner() cancels waiter.
proof = 0
waiter = self.new_future(self.loop)
async def inner():
nonlocal proof
try:
await waiter
except asyncio.CancelledError:
proof += 1
raise
else:
self.fail('got past sleep() in inner()')
async def outer():
nonlocal proof
try:
await inner()
except asyncio.CancelledError:
proof += 100 # Expect this path.
else:
proof += 10
f = asyncio.ensure_future(outer(), loop=self.loop)
test_utils.run_briefly(self.loop)
f.cancel()
self.loop.run_until_complete(f)
self.assertEqual(proof, 101)
self.assertTrue(waiter.cancelled())
def test_yield_wait_does_not_shield_cancel(self):
# Cancelling outer() makes wait() return early, leaves inner()
# running.
proof = 0
waiter = self.new_future(self.loop)
async def inner():
nonlocal proof
await waiter
proof += 1
async def outer():
nonlocal proof
with self.assertWarns(DeprecationWarning):
d, p = await asyncio.wait([inner()])
proof += 100
f = asyncio.ensure_future(outer(), loop=self.loop)
test_utils.run_briefly(self.loop)
f.cancel()
self.assertRaises(
asyncio.CancelledError, self.loop.run_until_complete, f)
waiter.set_result(None)
test_utils.run_briefly(self.loop)
self.assertEqual(proof, 1)
def test_shield_result(self):
inner = self.new_future(self.loop)
outer = asyncio.shield(inner)
inner.set_result(42)
res = self.loop.run_until_complete(outer)
self.assertEqual(res, 42)
def test_shield_exception(self):
inner = self.new_future(self.loop)
outer = asyncio.shield(inner)
test_utils.run_briefly(self.loop)
exc = RuntimeError('expected')
inner.set_exception(exc)
test_utils.run_briefly(self.loop)
self.assertIs(outer.exception(), exc)
def test_shield_cancel_inner(self):
inner = self.new_future(self.loop)
outer = asyncio.shield(inner)
test_utils.run_briefly(self.loop)
inner.cancel()
test_utils.run_briefly(self.loop)
self.assertTrue(outer.cancelled())
def test_shield_cancel_outer(self):
inner = self.new_future(self.loop)
outer = asyncio.shield(inner)
test_utils.run_briefly(self.loop)
outer.cancel()
test_utils.run_briefly(self.loop)
self.assertTrue(outer.cancelled())
self.assertEqual(0, 0 if outer._callbacks is None else len(outer._callbacks))
def test_shield_shortcut(self):
fut = self.new_future(self.loop)
fut.set_result(42)
res = self.loop.run_until_complete(asyncio.shield(fut))
self.assertEqual(res, 42)
def test_shield_effect(self):
# Cancelling outer() does not affect inner().
proof = 0
waiter = self.new_future(self.loop)
async def inner():
nonlocal proof
await waiter
proof += 1
async def outer():
nonlocal proof
await asyncio.shield(inner())
proof += 100
f = asyncio.ensure_future(outer(), loop=self.loop)
test_utils.run_briefly(self.loop)
f.cancel()
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(f)
waiter.set_result(None)
test_utils.run_briefly(self.loop)
self.assertEqual(proof, 1)
def test_shield_gather(self):
child1 = self.new_future(self.loop)
child2 = self.new_future(self.loop)
parent = asyncio.gather(child1, child2)
outer = asyncio.shield(parent)
test_utils.run_briefly(self.loop)
outer.cancel()
test_utils.run_briefly(self.loop)
self.assertTrue(outer.cancelled())
child1.set_result(1)
child2.set_result(2)
test_utils.run_briefly(self.loop)
self.assertEqual(parent.result(), [1, 2])
def test_gather_shield(self):
child1 = self.new_future(self.loop)
child2 = self.new_future(self.loop)
inner1 = asyncio.shield(child1)
inner2 = asyncio.shield(child2)
parent = asyncio.gather(inner1, inner2)
test_utils.run_briefly(self.loop)
parent.cancel()
# This should cancel inner1 and inner2 but bot child1 and child2.
test_utils.run_briefly(self.loop)
self.assertIsInstance(parent.exception(), asyncio.CancelledError)
self.assertTrue(inner1.cancelled())
self.assertTrue(inner2.cancelled())
child1.set_result(1)
child2.set_result(2)
test_utils.run_briefly(self.loop)
def test_as_completed_invalid_args(self):
fut = self.new_future(self.loop)
# as_completed() expects a list of futures, not a future instance
self.assertRaises(TypeError, self.loop.run_until_complete,
asyncio.as_completed(fut, loop=self.loop))
coro = coroutine_function()
self.assertRaises(TypeError, self.loop.run_until_complete,
asyncio.as_completed(coro, loop=self.loop))
coro.close()
def test_wait_invalid_args(self):
fut = self.new_future(self.loop)
# wait() expects a list of futures, not a future instance
self.assertRaises(TypeError, self.loop.run_until_complete,
asyncio.wait(fut))
coro = coroutine_function()
self.assertRaises(TypeError, self.loop.run_until_complete,
asyncio.wait(coro))
coro.close()
# wait() expects at least a future
self.assertRaises(ValueError, self.loop.run_until_complete,
asyncio.wait([]))
def test_corowrapper_mocks_generator(self):
def check():
# A function that asserts various things.
# Called twice, with different debug flag values.
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro():
# The actual coroutine.
self.assertTrue(gen.gi_running)
yield from fut
# A completed Future used to run the coroutine.
fut = self.new_future(self.loop)
fut.set_result(None)
# Call the coroutine.
gen = coro()
# Check some properties.
self.assertTrue(asyncio.iscoroutine(gen))
self.assertIsInstance(gen.gi_frame, types.FrameType)
self.assertFalse(gen.gi_running)
self.assertIsInstance(gen.gi_code, types.CodeType)
# Run it.
self.loop.run_until_complete(gen)
# The frame should have changed.
self.assertIsNone(gen.gi_frame)
# Test with debug flag cleared.
with set_coroutine_debug(False):
check()
# Test with debug flag set.
with set_coroutine_debug(True):
check()
def test_yield_from_corowrapper(self):
with set_coroutine_debug(True):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def t1():
return (yield from t2())
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def t2():
f = self.new_future(self.loop)
self.new_task(self.loop, t3(f))
return (yield from f)
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def t3(f):
f.set_result((1, 2, 3))
task = self.new_task(self.loop, t1())
val = self.loop.run_until_complete(task)
self.assertEqual(val, (1, 2, 3))
def test_yield_from_corowrapper_send(self):
def foo():
a = yield
return a
def call(arg):
cw = asyncio.coroutines.CoroWrapper(foo())
cw.send(None)
try:
cw.send(arg)
except StopIteration as ex:
return ex.args[0]
else:
raise AssertionError('StopIteration was expected')
self.assertEqual(call((1, 2)), (1, 2))
self.assertEqual(call('spam'), 'spam')
def test_corowrapper_weakref(self):
wd = weakref.WeakValueDictionary()
def foo(): yield from []
cw = asyncio.coroutines.CoroWrapper(foo())
wd['cw'] = cw # Would fail without __weakref__ slot.
cw.gen = None # Suppress warning from __del__.
def test_corowrapper_throw(self):
# Issue 429: CoroWrapper.throw must be compatible with gen.throw
def foo():
value = None
while True:
try:
value = yield value
except Exception as e:
value = e
exception = Exception("foo")
cw = asyncio.coroutines.CoroWrapper(foo())
cw.send(None)
self.assertIs(exception, cw.throw(exception))
cw = asyncio.coroutines.CoroWrapper(foo())
cw.send(None)
self.assertIs(exception, cw.throw(Exception, exception))
cw = asyncio.coroutines.CoroWrapper(foo())
cw.send(None)
exception = cw.throw(Exception, "foo")
self.assertIsInstance(exception, Exception)
self.assertEqual(exception.args, ("foo", ))
cw = asyncio.coroutines.CoroWrapper(foo())
cw.send(None)
exception = cw.throw(Exception, "foo", None)
self.assertIsInstance(exception, Exception)
self.assertEqual(exception.args, ("foo", ))
def test_log_destroyed_pending_task(self):
Task = self.__class__.Task
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def kill_me(loop):
future = self.new_future(loop)
yield from future
# at this point, the only reference to kill_me() task is
# the Task._wakeup() method in future._callbacks
raise Exception("code never reached")
mock_handler = mock.Mock()
self.loop.set_debug(True)
self.loop.set_exception_handler(mock_handler)
# schedule the task
coro = kill_me(self.loop)
task = asyncio.ensure_future(coro, loop=self.loop)
self.assertEqual(asyncio.all_tasks(loop=self.loop), {task})
asyncio.set_event_loop(None)
# execute the task so it waits for future
self.loop._run_once()
self.assertEqual(len(self.loop._ready), 0)
# remove the future used in kill_me(), and references to the task
del coro.gi_frame.f_locals['future']
coro = None
source_traceback = task._source_traceback
task = None
# no more reference to kill_me() task: the task is destroyed by the GC
support.gc_collect()
self.assertEqual(asyncio.all_tasks(loop=self.loop), set())
mock_handler.assert_called_with(self.loop, {
'message': 'Task was destroyed but it is pending!',
'task': mock.ANY,
'source_traceback': source_traceback,
})
mock_handler.reset_mock()
@mock.patch('asyncio.base_events.logger')
def test_tb_logger_not_called_after_cancel(self, m_log):
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
async def coro():
raise TypeError
async def runner():
task = self.new_task(loop, coro())
await asyncio.sleep(0.05)
task.cancel()
task = None
loop.run_until_complete(runner())
self.assertFalse(m_log.error.called)
@mock.patch('asyncio.coroutines.logger')
def test_coroutine_never_yielded(self, m_log):
with set_coroutine_debug(True):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro_noop():
pass
tb_filename = __file__
tb_lineno = sys._getframe().f_lineno + 2
# create a coroutine object but don't use it
coro_noop()
support.gc_collect()
self.assertTrue(m_log.error.called)
message = m_log.error.call_args[0][0]
func_filename, func_lineno = test_utils.get_function_source(coro_noop)
regex = (r'^<CoroWrapper %s\(?\)? .* at %s:%s, .*> '
r'was never yielded from\n'
r'Coroutine object created at \(most recent call last, truncated to \d+ last lines\):\n'
r'.*\n'
r' File "%s", line %s, in test_coroutine_never_yielded\n'
r' coro_noop\(\)$'
% (re.escape(coro_noop.__qualname__),
re.escape(func_filename), func_lineno,
re.escape(tb_filename), tb_lineno))
self.assertRegex(message, re.compile(regex, re.DOTALL))
def test_return_coroutine_from_coroutine(self):
"""Return of @asyncio.coroutine()-wrapped function generator object
from @asyncio.coroutine()-wrapped function should have same effect as
returning generator object or Future."""
def check():
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def outer_coro():
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def inner_coro():
return 1
return inner_coro()
result = self.loop.run_until_complete(outer_coro())
self.assertEqual(result, 1)
# Test with debug flag cleared.
with set_coroutine_debug(False):
check()
# Test with debug flag set.
with set_coroutine_debug(True):
check()
def test_task_source_traceback(self):
self.loop.set_debug(True)
task = self.new_task(self.loop, coroutine_function())
lineno = sys._getframe().f_lineno - 1
self.assertIsInstance(task._source_traceback, list)
self.assertEqual(task._source_traceback[-2][:3],
(__file__,
lineno,
'test_task_source_traceback'))
self.loop.run_until_complete(task)
def test_cancel_gather_1(self):
"""Ensure that a gathering future refuses to be cancelled once all
children are done"""
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
fut = self.new_future(loop)
# The indirection fut->child_coro is needed since otherwise the
# gathering task is done at the same time as the child future
def child_coro():
return (yield from fut)
with self.assertWarns(DeprecationWarning):
gather_future = asyncio.gather(child_coro(), loop=loop)
gather_task = asyncio.ensure_future(gather_future, loop=loop)
cancel_result = None
def cancelling_callback(_):
nonlocal cancel_result
cancel_result = gather_task.cancel()
fut.add_done_callback(cancelling_callback)
fut.set_result(42) # calls the cancelling_callback after fut is done()
# At this point the task should complete.
loop.run_until_complete(gather_task)
# Python issue #26923: asyncio.gather drops cancellation
self.assertEqual(cancel_result, False)
self.assertFalse(gather_task.cancelled())
self.assertEqual(gather_task.result(), [42])
def test_cancel_gather_2(self):
cases = [
((), ()),
((None,), ()),
(('my message',), ('my message',)),
# Non-string values should roundtrip.
((5,), (5,)),
]
for cancel_args, expected_args in cases:
with self.subTest(cancel_args=cancel_args):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
async def test():
time = 0
while True:
time += 0.05
with self.assertWarns(DeprecationWarning):
await asyncio.gather(asyncio.sleep(0.05),
return_exceptions=True,
loop=loop)
if time > 1:
return
async def main():
qwe = self.new_task(loop, test())
await asyncio.sleep(0.2)
qwe.cancel(*cancel_args)
await qwe
try:
loop.run_until_complete(main())
except asyncio.CancelledError as exc:
self.assertEqual(exc.args, ())
exc_type, exc_args, depth = get_innermost_context(exc)
self.assertEqual((exc_type, exc_args),
(asyncio.CancelledError, expected_args))
# The exact traceback seems to vary in CI.
self.assertIn(depth, (2, 3))
else:
self.fail('gather did not propagate the cancellation '
'request')
def test_exception_traceback(self):
# See http://bugs.python.org/issue28843
async def foo():
1 / 0
async def main():
task = self.new_task(self.loop, foo())
await asyncio.sleep(0) # skip one loop iteration
self.assertIsNotNone(task.exception().__traceback__)
self.loop.run_until_complete(main())
@mock.patch('asyncio.base_events.logger')
def test_error_in_call_soon(self, m_log):
def call_soon(callback, *args, **kwargs):
raise ValueError
self.loop.call_soon = call_soon
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro():
pass
self.assertFalse(m_log.error.called)
with self.assertRaises(ValueError):
gen = coro()
try:
self.new_task(self.loop, gen)
finally:
gen.close()
gc.collect() # For PyPy or other GCs.
self.assertTrue(m_log.error.called)
message = m_log.error.call_args[0][0]
self.assertIn('Task was destroyed but it is pending', message)
self.assertEqual(asyncio.all_tasks(self.loop), set())
def test_create_task_with_noncoroutine(self):
with self.assertRaisesRegex(TypeError,
"a coroutine was expected, got 123"):
self.new_task(self.loop, 123)
# test it for the second time to ensure that caching
# in asyncio.iscoroutine() doesn't break things.
with self.assertRaisesRegex(TypeError,
"a coroutine was expected, got 123"):
self.new_task(self.loop, 123)
def test_create_task_with_oldstyle_coroutine(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro():
pass
task = self.new_task(self.loop, coro())
self.assertIsInstance(task, self.Task)
self.loop.run_until_complete(task)
# test it for the second time to ensure that caching
# in asyncio.iscoroutine() doesn't break things.
task = self.new_task(self.loop, coro())
self.assertIsInstance(task, self.Task)
self.loop.run_until_complete(task)
def test_create_task_with_async_function(self):
async def coro():
pass
task = self.new_task(self.loop, coro())
self.assertIsInstance(task, self.Task)
self.loop.run_until_complete(task)
# test it for the second time to ensure that caching
# in asyncio.iscoroutine() doesn't break things.
task = self.new_task(self.loop, coro())
self.assertIsInstance(task, self.Task)
self.loop.run_until_complete(task)
def test_create_task_with_asynclike_function(self):
task = self.new_task(self.loop, CoroLikeObject())
self.assertIsInstance(task, self.Task)
self.assertEqual(self.loop.run_until_complete(task), 42)
# test it for the second time to ensure that caching
# in asyncio.iscoroutine() doesn't break things.
task = self.new_task(self.loop, CoroLikeObject())
self.assertIsInstance(task, self.Task)
self.assertEqual(self.loop.run_until_complete(task), 42)
def test_bare_create_task(self):
async def inner():
return 1
async def coro():
task = asyncio.create_task(inner())
self.assertIsInstance(task, self.Task)
ret = await task
self.assertEqual(1, ret)
self.loop.run_until_complete(coro())
def test_bare_create_named_task(self):
async def coro_noop():
pass
async def coro():
task = asyncio.create_task(coro_noop(), name='No-op')
self.assertEqual(task.get_name(), 'No-op')
await task
self.loop.run_until_complete(coro())
def test_context_1(self):
cvar = contextvars.ContextVar('cvar', default='nope')
async def sub():
await asyncio.sleep(0.01)
self.assertEqual(cvar.get(), 'nope')
cvar.set('something else')
async def main():
self.assertEqual(cvar.get(), 'nope')
subtask = self.new_task(loop, sub())
cvar.set('yes')
self.assertEqual(cvar.get(), 'yes')
await subtask
self.assertEqual(cvar.get(), 'yes')
loop = asyncio.new_event_loop()
try:
task = self.new_task(loop, main())
loop.run_until_complete(task)
finally:
loop.close()
def test_context_2(self):
cvar = contextvars.ContextVar('cvar', default='nope')
async def main():
def fut_on_done(fut):
# This change must not pollute the context
# of the "main()" task.
cvar.set('something else')
self.assertEqual(cvar.get(), 'nope')
for j in range(2):
fut = self.new_future(loop)
fut.add_done_callback(fut_on_done)
cvar.set(f'yes{j}')
loop.call_soon(fut.set_result, None)
await fut
self.assertEqual(cvar.get(), f'yes{j}')
for i in range(3):
# Test that task passed its context to add_done_callback:
cvar.set(f'yes{i}-{j}')
await asyncio.sleep(0.001)
self.assertEqual(cvar.get(), f'yes{i}-{j}')
loop = asyncio.new_event_loop()
try:
task = self.new_task(loop, main())
loop.run_until_complete(task)
finally:
loop.close()
self.assertEqual(cvar.get(), 'nope')
def test_context_3(self):
# Run 100 Tasks in parallel, each modifying cvar.
cvar = contextvars.ContextVar('cvar', default=-1)
async def sub(num):
for i in range(10):
cvar.set(num + i)
await asyncio.sleep(random.uniform(0.001, 0.05))
self.assertEqual(cvar.get(), num + i)
async def main():
tasks = []
for i in range(100):
task = loop.create_task(sub(random.randint(0, 10)))
tasks.append(task)
await asyncio.gather(*tasks)
loop = asyncio.new_event_loop()
try:
loop.run_until_complete(main())
finally:
loop.close()
self.assertEqual(cvar.get(), -1)
def test_get_coro(self):
loop = asyncio.new_event_loop()
coro = coroutine_function()
try:
task = self.new_task(loop, coro)
loop.run_until_complete(task)
self.assertIs(task.get_coro(), coro)
finally:
loop.close()
def add_subclass_tests(cls):
BaseTask = cls.Task
BaseFuture = cls.Future
if BaseTask is None or BaseFuture is None:
return cls
class CommonFuture:
def __init__(self, *args, **kwargs):
self.calls = collections.defaultdict(lambda: 0)
super().__init__(*args, **kwargs)
def add_done_callback(self, *args, **kwargs):
self.calls['add_done_callback'] += 1
return super().add_done_callback(*args, **kwargs)
class Task(CommonFuture, BaseTask):
pass
class Future(CommonFuture, BaseFuture):
pass
def test_subclasses_ctask_cfuture(self):
fut = self.Future(loop=self.loop)
async def func():
self.loop.call_soon(lambda: fut.set_result('spam'))
return await fut
task = self.Task(func(), loop=self.loop)
result = self.loop.run_until_complete(task)
self.assertEqual(result, 'spam')
self.assertEqual(
dict(task.calls),
{'add_done_callback': 1})
self.assertEqual(
dict(fut.calls),
{'add_done_callback': 1})
# Add patched Task & Future back to the test case
cls.Task = Task
cls.Future = Future
# Add an extra unit-test
cls.test_subclasses_ctask_cfuture = test_subclasses_ctask_cfuture
# Disable the "test_task_source_traceback" test
# (the test is hardcoded for a particular call stack, which
# is slightly different for Task subclasses)
cls.test_task_source_traceback = None
return cls
class SetMethodsTest:
def test_set_result_causes_invalid_state(self):
Future = type(self).Future
self.loop.call_exception_handler = exc_handler = mock.Mock()
async def foo():
await asyncio.sleep(0.1)
return 10
coro = foo()
task = self.new_task(self.loop, coro)
Future.set_result(task, 'spam')
self.assertEqual(
self.loop.run_until_complete(task),
'spam')
exc_handler.assert_called_once()
exc = exc_handler.call_args[0][0]['exception']
with self.assertRaisesRegex(asyncio.InvalidStateError,
r'step\(\): already done'):
raise exc
coro.close()
def test_set_exception_causes_invalid_state(self):
class MyExc(Exception):
pass
Future = type(self).Future
self.loop.call_exception_handler = exc_handler = mock.Mock()
async def foo():
await asyncio.sleep(0.1)
return 10
coro = foo()
task = self.new_task(self.loop, coro)
Future.set_exception(task, MyExc())
with self.assertRaises(MyExc):
self.loop.run_until_complete(task)
exc_handler.assert_called_once()
exc = exc_handler.call_args[0][0]['exception']
with self.assertRaisesRegex(asyncio.InvalidStateError,
r'step\(\): already done'):
raise exc
coro.close()
@unittest.skipUnless(hasattr(futures, '_CFuture') and
hasattr(tasks, '_CTask'),
'requires the C _asyncio module')
class CTask_CFuture_Tests(BaseTaskTests, SetMethodsTest,
test_utils.TestCase):
Task = getattr(tasks, '_CTask', None)
Future = getattr(futures, '_CFuture', None)
@support.refcount_test
def test_refleaks_in_task___init__(self):
gettotalrefcount = support.get_attribute(sys, 'gettotalrefcount')
async def coro():
pass
task = self.new_task(self.loop, coro())
self.loop.run_until_complete(task)
refs_before = gettotalrefcount()
for i in range(100):
task.__init__(coro(), loop=self.loop)
self.loop.run_until_complete(task)
self.assertAlmostEqual(gettotalrefcount() - refs_before, 0, delta=10)
def test_del__log_destroy_pending_segfault(self):
async def coro():
pass
task = self.new_task(self.loop, coro())
self.loop.run_until_complete(task)
with self.assertRaises(AttributeError):
del task._log_destroy_pending
@unittest.skipUnless(hasattr(futures, '_CFuture') and
hasattr(tasks, '_CTask'),
'requires the C _asyncio module')
@add_subclass_tests
class CTask_CFuture_SubclassTests(BaseTaskTests, test_utils.TestCase):
Task = getattr(tasks, '_CTask', None)
Future = getattr(futures, '_CFuture', None)
@unittest.skipUnless(hasattr(tasks, '_CTask'),
'requires the C _asyncio module')
@add_subclass_tests
class CTaskSubclass_PyFuture_Tests(BaseTaskTests, test_utils.TestCase):
Task = getattr(tasks, '_CTask', None)
Future = futures._PyFuture
@unittest.skipUnless(hasattr(futures, '_CFuture'),
'requires the C _asyncio module')
@add_subclass_tests
class PyTask_CFutureSubclass_Tests(BaseTaskTests, test_utils.TestCase):
Future = getattr(futures, '_CFuture', None)
Task = tasks._PyTask
@unittest.skipUnless(hasattr(tasks, '_CTask'),
'requires the C _asyncio module')
class CTask_PyFuture_Tests(BaseTaskTests, test_utils.TestCase):
Task = getattr(tasks, '_CTask', None)
Future = futures._PyFuture
@unittest.skipUnless(hasattr(futures, '_CFuture'),
'requires the C _asyncio module')
class PyTask_CFuture_Tests(BaseTaskTests, test_utils.TestCase):
Task = tasks._PyTask
Future = getattr(futures, '_CFuture', None)
class PyTask_PyFuture_Tests(BaseTaskTests, SetMethodsTest,
test_utils.TestCase):
Task = tasks._PyTask
Future = futures._PyFuture
@add_subclass_tests
class PyTask_PyFuture_SubclassTests(BaseTaskTests, test_utils.TestCase):
Task = tasks._PyTask
Future = futures._PyFuture
@unittest.skipUnless(hasattr(tasks, '_CTask'),
'requires the C _asyncio module')
class CTask_Future_Tests(test_utils.TestCase):
def test_foobar(self):
class Fut(asyncio.Future):
@property
def get_loop(self):
raise AttributeError
async def coro():
await fut
return 'spam'
self.loop = asyncio.new_event_loop()
try:
fut = Fut(loop=self.loop)
self.loop.call_later(0.1, fut.set_result, 1)
task = self.loop.create_task(coro())
res = self.loop.run_until_complete(task)
finally:
self.loop.close()
self.assertEqual(res, 'spam')
class BaseTaskIntrospectionTests:
_register_task = None
_unregister_task = None
_enter_task = None
_leave_task = None
def test__register_task_1(self):
class TaskLike:
@property
def _loop(self):
return loop
def done(self):
return False
task = TaskLike()
loop = mock.Mock()
self.assertEqual(asyncio.all_tasks(loop), set())
self._register_task(task)
self.assertEqual(asyncio.all_tasks(loop), {task})
self._unregister_task(task)
def test__register_task_2(self):
class TaskLike:
def get_loop(self):
return loop
def done(self):
return False
task = TaskLike()
loop = mock.Mock()
self.assertEqual(asyncio.all_tasks(loop), set())
self._register_task(task)
self.assertEqual(asyncio.all_tasks(loop), {task})
self._unregister_task(task)
def test__register_task_3(self):
class TaskLike:
def get_loop(self):
return loop
def done(self):
return True
task = TaskLike()
loop = mock.Mock()
self.assertEqual(asyncio.all_tasks(loop), set())
self._register_task(task)
self.assertEqual(asyncio.all_tasks(loop), set())
self._unregister_task(task)
def test__enter_task(self):
task = mock.Mock()
loop = mock.Mock()
self.assertIsNone(asyncio.current_task(loop))
self._enter_task(loop, task)
self.assertIs(asyncio.current_task(loop), task)
self._leave_task(loop, task)
def test__enter_task_failure(self):
task1 = mock.Mock()
task2 = mock.Mock()
loop = mock.Mock()
self._enter_task(loop, task1)
with self.assertRaises(RuntimeError):
self._enter_task(loop, task2)
self.assertIs(asyncio.current_task(loop), task1)
self._leave_task(loop, task1)
def test__leave_task(self):
task = mock.Mock()
loop = mock.Mock()
self._enter_task(loop, task)
self._leave_task(loop, task)
self.assertIsNone(asyncio.current_task(loop))
def test__leave_task_failure1(self):
task1 = mock.Mock()
task2 = mock.Mock()
loop = mock.Mock()
self._enter_task(loop, task1)
with self.assertRaises(RuntimeError):
self._leave_task(loop, task2)
self.assertIs(asyncio.current_task(loop), task1)
self._leave_task(loop, task1)
def test__leave_task_failure2(self):
task = mock.Mock()
loop = mock.Mock()
with self.assertRaises(RuntimeError):
self._leave_task(loop, task)
self.assertIsNone(asyncio.current_task(loop))
def test__unregister_task(self):
task = mock.Mock()
loop = mock.Mock()
task.get_loop = lambda: loop
self._register_task(task)
self._unregister_task(task)
self.assertEqual(asyncio.all_tasks(loop), set())
def test__unregister_task_not_registered(self):
task = mock.Mock()
loop = mock.Mock()
self._unregister_task(task)
self.assertEqual(asyncio.all_tasks(loop), set())
class PyIntrospectionTests(test_utils.TestCase, BaseTaskIntrospectionTests):
_register_task = staticmethod(tasks._py_register_task)
_unregister_task = staticmethod(tasks._py_unregister_task)
_enter_task = staticmethod(tasks._py_enter_task)
_leave_task = staticmethod(tasks._py_leave_task)
@unittest.skipUnless(hasattr(tasks, '_c_register_task'),
'requires the C _asyncio module')
class CIntrospectionTests(test_utils.TestCase, BaseTaskIntrospectionTests):
if hasattr(tasks, '_c_register_task'):
_register_task = staticmethod(tasks._c_register_task)
_unregister_task = staticmethod(tasks._c_unregister_task)
_enter_task = staticmethod(tasks._c_enter_task)
_leave_task = staticmethod(tasks._c_leave_task)
else:
_register_task = _unregister_task = _enter_task = _leave_task = None
class BaseCurrentLoopTests:
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
def new_task(self, coro):
raise NotImplementedError
def test_current_task_no_running_loop(self):
self.assertIsNone(asyncio.current_task(loop=self.loop))
def test_current_task_no_running_loop_implicit(self):
with self.assertRaises(RuntimeError):
asyncio.current_task()
def test_current_task_with_implicit_loop(self):
async def coro():
self.assertIs(asyncio.current_task(loop=self.loop), task)
self.assertIs(asyncio.current_task(None), task)
self.assertIs(asyncio.current_task(), task)
task = self.new_task(coro())
self.loop.run_until_complete(task)
self.assertIsNone(asyncio.current_task(loop=self.loop))
class PyCurrentLoopTests(BaseCurrentLoopTests, test_utils.TestCase):
def new_task(self, coro):
return tasks._PyTask(coro, loop=self.loop)
@unittest.skipUnless(hasattr(tasks, '_CTask'),
'requires the C _asyncio module')
class CCurrentLoopTests(BaseCurrentLoopTests, test_utils.TestCase):
def new_task(self, coro):
return getattr(tasks, '_CTask')(coro, loop=self.loop)
class GenericTaskTests(test_utils.TestCase):
def test_future_subclass(self):
self.assertTrue(issubclass(asyncio.Task, asyncio.Future))
@support.cpython_only
def test_asyncio_module_compiled(self):
# Because of circular imports it's easy to make _asyncio
# module non-importable. This is a simple test that will
# fail on systems where C modules were successfully compiled
# (hence the test for _functools etc), but _asyncio somehow didn't.
try:
import _functools
import _json
import _pickle
except ImportError:
self.skipTest('C modules are not available')
else:
try:
import _asyncio
except ImportError:
self.fail('_asyncio module is missing')
class GatherTestsBase:
def setUp(self):
super().setUp()
self.one_loop = self.new_test_loop()
self.other_loop = self.new_test_loop()
self.set_event_loop(self.one_loop, cleanup=False)
def _run_loop(self, loop):
while loop._ready:
test_utils.run_briefly(loop)
def _check_success(self, **kwargs):
a, b, c = [self.one_loop.create_future() for i in range(3)]
fut = asyncio.gather(*self.wrap_futures(a, b, c), **kwargs)
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
b.set_result(1)
a.set_result(2)
self._run_loop(self.one_loop)
self.assertEqual(cb.called, False)
self.assertFalse(fut.done())
c.set_result(3)
self._run_loop(self.one_loop)
cb.assert_called_once_with(fut)
self.assertEqual(fut.result(), [2, 1, 3])
def test_success(self):
self._check_success()
self._check_success(return_exceptions=False)
def test_result_exception_success(self):
self._check_success(return_exceptions=True)
def test_one_exception(self):
a, b, c, d, e = [self.one_loop.create_future() for i in range(5)]
fut = asyncio.gather(*self.wrap_futures(a, b, c, d, e))
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
exc = ZeroDivisionError()
a.set_result(1)
b.set_exception(exc)
self._run_loop(self.one_loop)
self.assertTrue(fut.done())
cb.assert_called_once_with(fut)
self.assertIs(fut.exception(), exc)
# Does nothing
c.set_result(3)
d.cancel()
e.set_exception(RuntimeError())
e.exception()
def test_return_exceptions(self):
a, b, c, d = [self.one_loop.create_future() for i in range(4)]
fut = asyncio.gather(*self.wrap_futures(a, b, c, d),
return_exceptions=True)
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
exc = ZeroDivisionError()
exc2 = RuntimeError()
b.set_result(1)
c.set_exception(exc)
a.set_result(3)
self._run_loop(self.one_loop)
self.assertFalse(fut.done())
d.set_exception(exc2)
self._run_loop(self.one_loop)
self.assertTrue(fut.done())
cb.assert_called_once_with(fut)
self.assertEqual(fut.result(), [3, 1, exc, exc2])
def test_env_var_debug(self):
code = '\n'.join((
'import asyncio.coroutines',
'print(asyncio.coroutines._DEBUG)'))
# Test with -E to not fail if the unit test was run with
# PYTHONASYNCIODEBUG set to a non-empty string
sts, stdout, stderr = assert_python_ok('-E', '-c', code)
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='',
PYTHONDEVMODE='')
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='1',
PYTHONDEVMODE='')
self.assertEqual(stdout.rstrip(), b'True')
sts, stdout, stderr = assert_python_ok('-E', '-c', code,
PYTHONASYNCIODEBUG='1',
PYTHONDEVMODE='')
self.assertEqual(stdout.rstrip(), b'False')
# -X dev
sts, stdout, stderr = assert_python_ok('-E', '-X', 'dev',
'-c', code)
self.assertEqual(stdout.rstrip(), b'True')
class FutureGatherTests(GatherTestsBase, test_utils.TestCase):
def wrap_futures(self, *futures):
return futures
def _check_empty_sequence(self, seq_or_iter):
asyncio.set_event_loop(self.one_loop)
self.addCleanup(asyncio.set_event_loop, None)
fut = asyncio.gather(*seq_or_iter)
self.assertIsInstance(fut, asyncio.Future)
self.assertIs(fut._loop, self.one_loop)
self._run_loop(self.one_loop)
self.assertTrue(fut.done())
self.assertEqual(fut.result(), [])
with self.assertWarns(DeprecationWarning):
fut = asyncio.gather(*seq_or_iter, loop=self.other_loop)
self.assertIs(fut._loop, self.other_loop)
def test_constructor_empty_sequence(self):
self._check_empty_sequence([])
self._check_empty_sequence(())
self._check_empty_sequence(set())
self._check_empty_sequence(iter(""))
def test_constructor_heterogenous_futures(self):
fut1 = self.one_loop.create_future()
fut2 = self.other_loop.create_future()
with self.assertRaises(ValueError):
asyncio.gather(fut1, fut2)
with self.assertRaises(ValueError):
with self.assertWarns(DeprecationWarning):
asyncio.gather(fut1, loop=self.other_loop)
def test_constructor_homogenous_futures(self):
children = [self.other_loop.create_future() for i in range(3)]
fut = asyncio.gather(*children)
self.assertIs(fut._loop, self.other_loop)
self._run_loop(self.other_loop)
self.assertFalse(fut.done())
with self.assertWarns(DeprecationWarning):
fut = asyncio.gather(*children, loop=self.other_loop)
self.assertIs(fut._loop, self.other_loop)
self._run_loop(self.other_loop)
self.assertFalse(fut.done())
def test_one_cancellation(self):
a, b, c, d, e = [self.one_loop.create_future() for i in range(5)]
fut = asyncio.gather(a, b, c, d, e)
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
a.set_result(1)
b.cancel()
self._run_loop(self.one_loop)
self.assertTrue(fut.done())
cb.assert_called_once_with(fut)
self.assertFalse(fut.cancelled())
self.assertIsInstance(fut.exception(), asyncio.CancelledError)
# Does nothing
c.set_result(3)
d.cancel()
e.set_exception(RuntimeError())
e.exception()
def test_result_exception_one_cancellation(self):
a, b, c, d, e, f = [self.one_loop.create_future()
for i in range(6)]
fut = asyncio.gather(a, b, c, d, e, f, return_exceptions=True)
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
a.set_result(1)
zde = ZeroDivisionError()
b.set_exception(zde)
c.cancel()
self._run_loop(self.one_loop)
self.assertFalse(fut.done())
d.set_result(3)
e.cancel()
rte = RuntimeError()
f.set_exception(rte)
res = self.one_loop.run_until_complete(fut)
self.assertIsInstance(res[2], asyncio.CancelledError)
self.assertIsInstance(res[4], asyncio.CancelledError)
res[2] = res[4] = None
self.assertEqual(res, [1, zde, None, 3, None, rte])
cb.assert_called_once_with(fut)
class CoroutineGatherTests(GatherTestsBase, test_utils.TestCase):
def setUp(self):
super().setUp()
asyncio.set_event_loop(self.one_loop)
def wrap_futures(self, *futures):
coros = []
for fut in futures:
async def coro(fut=fut):
return await fut
coros.append(coro())
return coros
def _gather(self, *args, **kwargs):
async def coro():
return asyncio.gather(*args, **kwargs)
return self.one_loop.run_until_complete(coro())
def test_constructor_loop_selection(self):
async def coro():
return 'abc'
gen1 = coro()
gen2 = coro()
fut = asyncio.gather(gen1, gen2)
self.assertIs(fut._loop, self.one_loop)
self.one_loop.run_until_complete(fut)
self.set_event_loop(self.other_loop, cleanup=False)
gen3 = coro()
gen4 = coro()
with self.assertWarns(DeprecationWarning):
fut2 = asyncio.gather(gen3, gen4, loop=self.other_loop)
self.assertIs(fut2._loop, self.other_loop)
self.other_loop.run_until_complete(fut2)
def test_duplicate_coroutines(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro(s):
return s
c = coro('abc')
with self.assertWarns(DeprecationWarning):
fut = asyncio.gather(c, c, coro('def'), c, loop=self.one_loop)
self._run_loop(self.one_loop)
self.assertEqual(fut.result(), ['abc', 'abc', 'def', 'abc'])
def test_cancellation_broadcast(self):
# Cancelling outer() cancels all children.
proof = 0
waiter = self.one_loop.create_future()
async def inner():
nonlocal proof
await waiter
proof += 1
child1 = asyncio.ensure_future(inner(), loop=self.one_loop)
child2 = asyncio.ensure_future(inner(), loop=self.one_loop)
gatherer = None
async def outer():
nonlocal proof, gatherer
gatherer = asyncio.gather(child1, child2)
await gatherer
proof += 100
f = asyncio.ensure_future(outer(), loop=self.one_loop)
test_utils.run_briefly(self.one_loop)
self.assertTrue(f.cancel())
with self.assertRaises(asyncio.CancelledError):
self.one_loop.run_until_complete(f)
self.assertFalse(gatherer.cancel())
self.assertTrue(waiter.cancelled())
self.assertTrue(child1.cancelled())
self.assertTrue(child2.cancelled())
test_utils.run_briefly(self.one_loop)
self.assertEqual(proof, 0)
def test_exception_marking(self):
# Test for the first line marked "Mark exception retrieved."
async def inner(f):
await f
raise RuntimeError('should not be ignored')
a = self.one_loop.create_future()
b = self.one_loop.create_future()
async def outer():
await asyncio.gather(inner(a), inner(b))
f = asyncio.ensure_future(outer(), loop=self.one_loop)
test_utils.run_briefly(self.one_loop)
a.set_result(None)
test_utils.run_briefly(self.one_loop)
b.set_result(None)
test_utils.run_briefly(self.one_loop)
self.assertIsInstance(f.exception(), RuntimeError)
def test_issue46672(self):
with mock.patch(
'asyncio.base_events.BaseEventLoop.call_exception_handler',
):
async def coro(s):
return s
c = coro('abc')
with self.assertRaises(TypeError):
self._gather(c, {})
self._run_loop(self.one_loop)
# NameError should not happen:
self.one_loop.call_exception_handler.assert_not_called()
class RunCoroutineThreadsafeTests(test_utils.TestCase):
"""Test case for asyncio.run_coroutine_threadsafe."""
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop) # Will cleanup properly
async def add(self, a, b, fail=False, cancel=False):
"""Wait 0.05 second and return a + b."""
await asyncio.sleep(0.05)
if fail:
raise RuntimeError("Fail!")
if cancel:
asyncio.current_task(self.loop).cancel()
await asyncio.sleep(0)
return a + b
def target(self, fail=False, cancel=False, timeout=None,
advance_coro=False):
"""Run add coroutine in the event loop."""
coro = self.add(1, 2, fail=fail, cancel=cancel)
future = asyncio.run_coroutine_threadsafe(coro, self.loop)
if advance_coro:
# this is for test_run_coroutine_threadsafe_task_factory_exception;
# otherwise it spills errors and breaks **other** unittests, since
# 'target' is interacting with threads.
# With this call, `coro` will be advanced, so that
# CoroWrapper.__del__ won't do anything when asyncio tests run
# in debug mode.
self.loop.call_soon_threadsafe(coro.send, None)
try:
return future.result(timeout)
finally:
future.done() or future.cancel()
def test_run_coroutine_threadsafe(self):
"""Test coroutine submission from a thread to an event loop."""
future = self.loop.run_in_executor(None, self.target)
result = self.loop.run_until_complete(future)
self.assertEqual(result, 3)
def test_run_coroutine_threadsafe_with_exception(self):
"""Test coroutine submission from a thread to an event loop
when an exception is raised."""
future = self.loop.run_in_executor(None, self.target, True)
with self.assertRaises(RuntimeError) as exc_context:
self.loop.run_until_complete(future)
self.assertIn("Fail!", exc_context.exception.args)
def test_run_coroutine_threadsafe_with_timeout(self):
"""Test coroutine submission from a thread to an event loop
when a timeout is raised."""
callback = lambda: self.target(timeout=0)
future = self.loop.run_in_executor(None, callback)
with self.assertRaises(asyncio.TimeoutError):
self.loop.run_until_complete(future)
test_utils.run_briefly(self.loop)
# Check that there's no pending task (add has been cancelled)
for task in asyncio.all_tasks(self.loop):
self.assertTrue(task.done())
def test_run_coroutine_threadsafe_task_cancelled(self):
"""Test coroutine submission from a thread to an event loop
when the task is cancelled."""
callback = lambda: self.target(cancel=True)
future = self.loop.run_in_executor(None, callback)
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(future)
def test_run_coroutine_threadsafe_task_factory_exception(self):
"""Test coroutine submission from a thread to an event loop
when the task factory raise an exception."""
def task_factory(loop, coro):
raise NameError
run = self.loop.run_in_executor(
None, lambda: self.target(advance_coro=True))
# Set exception handler
callback = test_utils.MockCallback()
self.loop.set_exception_handler(callback)
# Set corrupted task factory
self.addCleanup(self.loop.set_task_factory,
self.loop.get_task_factory())
self.loop.set_task_factory(task_factory)
# Run event loop
with self.assertRaises(NameError) as exc_context:
self.loop.run_until_complete(run)
# Check exceptions
self.assertEqual(len(callback.call_args_list), 1)
(loop, context), kwargs = callback.call_args
self.assertEqual(context['exception'], exc_context.exception)
class SleepTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
self.loop.close()
self.loop = None
super().tearDown()
def test_sleep_zero(self):
result = 0
def inc_result(num):
nonlocal result
result += num
async def coro():
self.loop.call_soon(inc_result, 1)
self.assertEqual(result, 0)
num = await asyncio.sleep(0, result=10)
self.assertEqual(result, 1) # inc'ed by call_soon
inc_result(num) # num should be 11
self.loop.run_until_complete(coro())
self.assertEqual(result, 11)
def test_loop_argument_is_deprecated(self):
# Remove test when loop argument is removed in Python 3.10
with self.assertWarns(DeprecationWarning):
self.loop.run_until_complete(asyncio.sleep(0.01, loop=self.loop))
class WaitTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
self.loop.close()
self.loop = None
super().tearDown()
def test_loop_argument_is_deprecated_in_wait(self):
# Remove test when loop argument is removed in Python 3.10
with self.assertWarns(DeprecationWarning):
self.loop.run_until_complete(
asyncio.wait([coroutine_function()], loop=self.loop))
def test_loop_argument_is_deprecated_in_wait_for(self):
# Remove test when loop argument is removed in Python 3.10
with self.assertWarns(DeprecationWarning):
self.loop.run_until_complete(
asyncio.wait_for(coroutine_function(), 0.01, loop=self.loop))
def test_coro_is_deprecated_in_wait(self):
# Remove test when passing coros to asyncio.wait() is removed in 3.11
with self.assertWarns(DeprecationWarning):
self.loop.run_until_complete(
asyncio.wait([coroutine_function()]))
task = self.loop.create_task(coroutine_function())
with self.assertWarns(DeprecationWarning):
self.loop.run_until_complete(
asyncio.wait([task, coroutine_function()]))
class CompatibilityTests(test_utils.TestCase):
# Tests for checking a bridge between old-styled coroutines
# and async/await syntax
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
self.loop.close()
self.loop = None
super().tearDown()
def test_yield_from_awaitable(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro():
yield from asyncio.sleep(0)
return 'ok'
result = self.loop.run_until_complete(coro())
self.assertEqual('ok', result)
def test_await_old_style_coro(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro1():
return 'ok1'
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def coro2():
yield from asyncio.sleep(0)
return 'ok2'
async def inner():
return await asyncio.gather(coro1(), coro2())
result = self.loop.run_until_complete(inner())
self.assertEqual(['ok1', 'ok2'], result)
def test_debug_mode_interop(self):
# https://bugs.python.org/issue32636
code = textwrap.dedent("""
import asyncio
async def native_coro():
pass
@asyncio.coroutine
def old_style_coro():
yield from native_coro()
asyncio.run(old_style_coro())
""")
assert_python_ok("-Wignore::DeprecationWarning", "-c", code,
PYTHONASYNCIODEBUG="1")
if __name__ == '__main__':
unittest.main()
| 32.597384
| 105
| 0.587515
|
1542c04d163fe24f914047acb4e4d2eafe8162b8
| 1,897
|
py
|
Python
|
sc2env/play_four_towers_friendly_unit.py
|
khlam/sc2env
|
af0d181dd78b09e6bedda7b0e9c98064adb05641
|
[
"MIT"
] | 1
|
2019-01-22T23:36:49.000Z
|
2019-01-22T23:36:49.000Z
|
sc2env/play_four_towers_friendly_unit.py
|
khlam/sc2env
|
af0d181dd78b09e6bedda7b0e9c98064adb05641
|
[
"MIT"
] | 2
|
2019-07-19T21:28:57.000Z
|
2022-03-25T19:14:05.000Z
|
sc2env/play_four_towers_friendly_unit.py
|
khlam/sc2env
|
af0d181dd78b09e6bedda7b0e9c98064adb05641
|
[
"MIT"
] | 1
|
2019-06-26T01:21:14.000Z
|
2019-06-26T01:21:14.000Z
|
import logging
import argparse
import os
from importlib import import_module
from abp.configs import NetworkConfig, ReinforceConfig, EvaluationConfig
from abp.examples.pysc2.four_towers_friendly_units.hra import run_task
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-f', '--folder',
help='The folder containing the config files',
required=True
)
parser.add_argument(
'-m', '--map',
help='Run the specified map',
required=False
)
parser.add_argument(
'--eval',
help="Run only evaluation task",
dest='eval',
action="store_true"
)
parser.add_argument(
'-r', '--render',
help="Render task",
dest='render',
action="store_true"
)
args = parser.parse_args()
evaluation_config_path = os.path.join(args.folder, "evaluation.yml")
evaluation_config = EvaluationConfig.load_from_yaml(evaluation_config_path)
network_config_path = os.path.join(args.folder, "network.yml")
network_config = NetworkConfig.load_from_yaml(network_config_path)
reinforce_config_path = os.path.join(args.folder, "reinforce.yml")
reinforce_config = ReinforceConfig.load_from_yaml(reinforce_config_path)
map_name = args.map
if map_name is None:
print("You are traning the agent for the default map: ")
print("FourTowersWithFriendlyUnitsFixedEnemiesFixedPosition")
else:
print("You are traning the agent for the map: ")
print(map_name)
#print(map_name)
if args.eval:
evaluation_config.training_episodes = 0
network_config.restore_network = True
if args.render:
evaluation_config.render = True
run_task(evaluation_config, network_config, reinforce_config, map_name = map_name)
return 0
if __name__ == '__main__':
main()
| 26.347222
| 86
| 0.67475
|
cf20991a2a3dcc45aef48d49c68d275e3c69d8a5
| 154,023
|
py
|
Python
|
src/app.py
|
hubmapconsortium/entity-api
|
09caa5006a1de17a56d3fba21ef76a7480c374be
|
[
"MIT"
] | 2
|
2021-11-02T20:35:44.000Z
|
2021-12-16T14:58:41.000Z
|
src/app.py
|
hubmapconsortium/entity-api
|
09caa5006a1de17a56d3fba21ef76a7480c374be
|
[
"MIT"
] | 161
|
2019-11-13T16:47:02.000Z
|
2022-03-30T04:05:28.000Z
|
src/app.py
|
hubmapconsortium/entity-api
|
09caa5006a1de17a56d3fba21ef76a7480c374be
|
[
"MIT"
] | 1
|
2021-12-03T17:24:45.000Z
|
2021-12-03T17:24:45.000Z
|
import collections
import yaml
from datetime import datetime
from flask import Flask, g, jsonify, abort, request, Response, redirect, make_response
from neo4j.exceptions import TransactionError
import os
import re
import csv
import requests
import urllib.request
from io import StringIO
# Don't confuse urllib (Python native library) with urllib3 (3rd-party library, requests also uses urllib3)
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from pathlib import Path
import logging
import json
# Local modules
import app_neo4j_queries
import provenance
from schema import schema_manager
from schema import schema_errors
# HuBMAP commons
from hubmap_commons import string_helper
from hubmap_commons import file_helper as hm_file_helper
from hubmap_commons import neo4j_driver
from hubmap_commons import globus_groups
from hubmap_commons.hm_auth import AuthHelper
from hubmap_commons.exceptions import HTTPException
# Set logging fromat and level (default is warning)
# All the API logging is forwarded to the uWSGI server and gets written into the log file `uwsgo-entity-api.log`
# Log rotation is handled via logrotate on the host system with a configuration file
# Do NOT handle log file and rotation via the Python logging to avoid issues with multi-worker processes
logging.basicConfig(format='[%(asctime)s] %(levelname)s in %(module)s: %(message)s', level=logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(__name__)
# Specify the absolute path of the instance folder and use the config file relative to the instance path
app = Flask(__name__, instance_path=os.path.join(os.path.abspath(os.path.dirname(__file__)), 'instance'), instance_relative_config=True)
app.config.from_pyfile('app.cfg')
# Remove trailing slash / from URL base to avoid "//" caused by config with trailing slash
app.config['UUID_API_URL'] = app.config['UUID_API_URL'].strip('/')
app.config['INGEST_API_URL'] = app.config['INGEST_API_URL'].strip('/')
app.config['SEARCH_API_URL'] = app.config['SEARCH_API_URL'].strip('/')
# Suppress InsecureRequestWarning warning when requesting status on https with ssl cert verify disabled
requests.packages.urllib3.disable_warnings(category = InsecureRequestWarning)
####################################################################################################
## Register error handlers
####################################################################################################
# Error handler for 400 Bad Request with custom error message
@app.errorhandler(400)
def http_bad_request(e):
return jsonify(error=str(e)), 400
# Error handler for 401 Unauthorized with custom error message
@app.errorhandler(401)
def http_unauthorized(e):
return jsonify(error=str(e)), 401
# Error handler for 403 Forbidden with custom error message
@app.errorhandler(403)
def http_forbidden(e):
return jsonify(error=str(e)), 403
# Error handler for 404 Not Found with custom error message
@app.errorhandler(404)
def http_not_found(e):
return jsonify(error=str(e)), 404
# Error handler for 500 Internal Server Error with custom error message
@app.errorhandler(500)
def http_internal_server_error(e):
return jsonify(error=str(e)), 500
####################################################################################################
## AuthHelper initialization
####################################################################################################
# Initialize AuthHelper class and ensure singleton
try:
if AuthHelper.isInitialized() == False:
auth_helper_instance = AuthHelper.create(app.config['APP_CLIENT_ID'],
app.config['APP_CLIENT_SECRET'])
logger.info("Initialized AuthHelper class successfully :)")
else:
auth_helper_instance = AuthHelper.instance()
except Exception:
msg = "Failed to initialize the AuthHelper class"
# Log the full stack trace, prepend a line with our message
logger.exception(msg)
####################################################################################################
## Neo4j connection initialization
####################################################################################################
# The neo4j_driver (from commons package) is a singleton module
# This neo4j_driver_instance will be used for application-specifc neo4j queries
# as well as being passed to the schema_manager
try:
neo4j_driver_instance = neo4j_driver.instance(app.config['NEO4J_URI'],
app.config['NEO4J_USERNAME'],
app.config['NEO4J_PASSWORD'])
logger.info("Initialized neo4j_driver module successfully :)")
except Exception:
msg = "Failed to initialize the neo4j_driver module"
# Log the full stack trace, prepend a line with our message
logger.exception(msg)
"""
Close the current neo4j connection at the end of every request
"""
@app.teardown_appcontext
def close_neo4j_driver(error):
if hasattr(g, 'neo4j_driver_instance'):
# Close the driver instance
neo4j_driver.close()
# Also remove neo4j_driver_instance from Flask's application context
g.neo4j_driver_instance = None
####################################################################################################
## Schema initialization
####################################################################################################
try:
# The schema_manager is a singleton module
# Pass in auth_helper_instance, neo4j_driver instance, and file_upload_helper instance
schema_manager.initialize(app.config['SCHEMA_YAML_FILE'],
app.config['UUID_API_URL'],
app.config['INGEST_API_URL'],
app.config['SEARCH_API_URL'],
auth_helper_instance,
neo4j_driver_instance)
logger.info("Initialized schema_manager module successfully :)")
# Use a broad catch-all here
except Exception:
msg = "Failed to initialize the schema_manager module"
# Log the full stack trace, prepend a line with our message
logger.exception(msg)
####################################################################################################
## REFERENCE DOI Redirection
####################################################################################################
## Read tsv file with the REFERENCE entity redirects
## sets the reference_redirects dict which is used
## by the /redirect method below
try:
reference_redirects = {}
url = app.config['REDIRECTION_INFO_URL']
response = requests.get(url)
resp_txt = response.content.decode('utf-8')
cr = csv.reader(resp_txt.splitlines(), delimiter='\t')
first = True
id_column = None
redir_url_column = None
for row in cr:
if first:
first = False
header = row
column = 0
for label in header:
if label == 'hubmap_id': id_column = column
if label == 'data_information_page': redir_url_column = column
column = column + 1
if id_column is None: raise Exception(f"Column hubmap_id not found in {url}")
if redir_url_column is None: raise Exception (f"Column data_information_page not found in {url}")
else:
reference_redirects[row[id_column].upper().strip()] = row[redir_url_column]
rr = redirect('abc', code = 307)
print(rr)
except Exception:
logger.exception("Failed to read tsv file with REFERENCE redirect information")
####################################################################################################
## Constants
####################################################################################################
# For now, don't use the constants from commons
# All lowercase for easy comparision
ACCESS_LEVEL_PUBLIC = 'public'
ACCESS_LEVEL_CONSORTIUM = 'consortium'
ACCESS_LEVEL_PROTECTED = 'protected'
DATASET_STATUS_PUBLISHED = 'published'
COMMA_SEPARATOR = ','
####################################################################################################
## API Endpoints
####################################################################################################
"""
The default route
Returns
-------
str
A welcome message
"""
@app.route('/', methods = ['GET'])
def index():
return "Hello! This is HuBMAP Entity API service :)"
"""
Show status of neo4j connection with the current VERSION and BUILD
Returns
-------
json
A json containing the status details
"""
@app.route('/status', methods = ['GET'])
def get_status():
status_data = {
# Use strip() to remove leading and trailing spaces, newlines, and tabs
'version': (Path(__file__).absolute().parent.parent / 'VERSION').read_text().strip(),
'build': (Path(__file__).absolute().parent.parent / 'BUILD').read_text().strip(),
'neo4j_connection': False
}
# Don't use try/except here
is_connected = app_neo4j_queries.check_connection(neo4j_driver_instance)
if is_connected:
status_data['neo4j_connection'] = True
return jsonify(status_data)
"""
Retrieve the ancestor organ(s) of a given entity
The gateway treats this endpoint as public accessible
Parameters
----------
id : str
The HuBMAP ID (e.g. HBM123.ABCD.456) or UUID of target entity (Dataset/Sample)
Returns
-------
json
List of organs that are ancestors of the given entity
- Only dataset entities can return multiple ancestor organs
as Samples can only have one parent.
- If no organ ancestors are found an empty list is returned
- If requesting the ancestor organ of a Sample of type Organ or Donor/Collection/Upload
a 400 response is returned.
"""
@app.route('/entities/<id>/ancestor-organs', methods = ['GET'])
def get_ancestor_organs(id):
# Token is not required, but if an invalid token provided,
# we need to tell the client with a 401 error
validate_token_if_auth_header_exists(request)
# Use the internal token to query the target entity
# since public entities don't require user token
token = get_internal_token()
# Query target entity against uuid-api and neo4j and return as a dict if exists
entity_dict = query_target_entity(id, token)
normalized_entity_type = entity_dict['entity_type']
# A bit validation
supported_entity_types = ['Sample', 'Dataset']
if normalized_entity_type not in supported_entity_types:
bad_request_error(f"Unable to get the ancestor organs for this: {normalized_entity_type}, supported entity types: {COMMA_SEPARATOR.join(supported_entity_types)}")
if normalized_entity_type == 'Sample' and entity_dict['specimen_type'].lower() == 'organ':
bad_request_error("Unable to get the ancestor organ of an organ.")
if normalized_entity_type == 'Dataset':
# Only published/public datasets don't require token
if entity_dict['status'].lower() != DATASET_STATUS_PUBLISHED:
# Token is required and the user must belong to HuBMAP-READ group
token = get_user_token(request, non_public_access_required = True)
else:
# The `data_access_level` of Sample can only be either 'public' or 'consortium'
if entity_dict['data_access_level'] == ACCESS_LEVEL_CONSORTIUM:
token = get_user_token(request, non_public_access_required = True)
# By now, either the entity is public accessible or the user token has the correct access level
organs = app_neo4j_queries.get_ancestor_organs(neo4j_driver_instance, entity_dict['uuid'])
# Skip executing the trigger method to get Sample.direct_ancestor
properties_to_skip = ['direct_ancestor']
complete_entities_list = schema_manager.get_complete_entities_list(token, organs, properties_to_skip)
# Final result after normalization
final_result = schema_manager.normalize_entities_list_for_response(complete_entities_list)
return jsonify(final_result)
"""
Retrive the metadata information of a given entity by id
The gateway treats this endpoint as public accessible
Result filtering is supported based on query string
For example: /entities/<id>?property=data_access_level
Parameters
----------
id : str
The HuBMAP ID (e.g. HBM123.ABCD.456) or UUID of target entity
Returns
-------
json
All the properties or filtered property of the target entity
"""
@app.route('/entities/<id>', methods = ['GET'])
def get_entity_by_id(id):
# Token is not required, but if an invalid token provided,
# we need to tell the client with a 401 error
validate_token_if_auth_header_exists(request)
# Use the internal token to query the target entity
# since public entities don't require user token
token = get_internal_token()
# Query target entity against uuid-api and neo4j and return as a dict if exists
entity_dict = query_target_entity(id, token)
normalized_entity_type = entity_dict['entity_type']
# Handle Collection retrieval using a different endpoint
if normalized_entity_type == 'Collection':
bad_request_error("Please use another API endpoint `/collections/<id>` to query a collection")
if normalized_entity_type == 'Dataset':
# Only published/public datasets don't require token
if entity_dict['status'].lower() != DATASET_STATUS_PUBLISHED:
# Token is required and the user must belong to HuBMAP-READ group
token = get_user_token(request, non_public_access_required = True)
elif normalized_entity_type == 'Upload':
# Upload doesn't have 'data_access_level' property
# Always require at least consortium group token for accessing Upload
token = get_user_token(request, non_public_access_required = True)
else:
# The `data_access_level` of Donor/Sample can only be either 'public' or 'consortium'
if entity_dict['data_access_level'] == ACCESS_LEVEL_CONSORTIUM:
token = get_user_token(request, non_public_access_required = True)
# By now, either the entity is public accessible or the user token has the correct access level
# We'll need to return all the properties including those
# generated by `on_read_trigger` to have a complete result
# E.g., the 'next_revision_uuid' and 'previous_revision_uuid' being used below
# On entity retrieval, the 'on_read_trigger' doesn't really need a token
complete_dict = schema_manager.get_complete_entity_result(token, entity_dict)
# Additional handlings on dataset revisions
# The rule is that a revision can only be made against a published dataset,
# so it should never occur that a consortium level revision is between two published revisions
# However, the very last dataset revision can be non-published
if normalized_entity_type == 'Dataset':
# The `next_revision_uuid` is only availabe in complete_dict because it's generated by the 'on_read_trigger'
property_to_pop = 'next_revision_uuid'
# When the dataset is published but:
# - The nexus token is valid but the user doesn't belong to HuBMAP-READ group
# - Or the token is valid but doesn't contain group information (auth token or transfer token)
# We need to remove the `next_revision_uuid` from response
# Otherwise, we should send back the `next_revision_uuid` (if exists) when the member belongs to HuBMAP-Read group
if entity_dict['status'].lower() == DATASET_STATUS_PUBLISHED:
if not user_in_hubmap_read_group(request):
if property_to_pop in complete_dict:
revision_entity_dict = query_target_entity(complete_dict[property_to_pop], token)
# Remove the property from the resulting complete_dict
# if the revision is not published
if revision_entity_dict['status'].lower() != DATASET_STATUS_PUBLISHED:
complete_dict.pop(property_to_pop)
# Non-published dataset can NOT have next revisions
else:
if property_to_pop in complete_dict:
# Remove the `next_revision_uuid` from response if it ever exists
complete_dict.pop(property_to_pop)
# Also normalize the result based on schema
final_result = schema_manager.normalize_entity_result_for_response(complete_dict)
# Result filtering based on query string
# The `data_access_level` property is available in all entities Donor/Sample/Dataset
# and this filter is being used by gateway to check the data_access_level for file assets
# The `status` property is only available in Dataset and being used by search-api for revision
result_filtering_accepted_property_keys = ['data_access_level', 'status']
if bool(request.args):
property_key = request.args.get('property')
if property_key is not None:
# Validate the target property
if property_key not in result_filtering_accepted_property_keys:
bad_request_error(f"Only the following property keys are supported in the query string: {COMMA_SEPARATOR.join(result_filtering_accepted_property_keys)}")
if property_key == 'status' and normalized_entity_type != 'Dataset':
bad_request_error(f"Only Dataset supports 'status' property key in the query string")
# Response with the property value directly
# Don't use jsonify() on string value
return complete_dict[property_key]
else:
bad_request_error("The specified query string is not supported. Use '?property=<key>' to filter the result")
else:
# Response with the dict
return jsonify(final_result)
"""
Retrive the full tree above the referenced entity and build the provenance document
The gateway treats this endpoint as public accessible
Parameters
----------
id : str
The HuBMAP ID (e.g. HBM123.ABCD.456) or UUID of target entity
Returns
-------
json
All the provenance details associated with this entity
"""
@app.route('/entities/<id>/provenance', methods = ['GET'])
def get_entity_provenance(id):
# Token is not required, but if an invalid token provided,
# we need to tell the client with a 401 error
validate_token_if_auth_header_exists(request)
# Use the internal token to query the target entity
# since public entities don't require user token
token = get_internal_token()
# Query target entity against uuid-api and neo4j and return as a dict if exists
entity_dict = query_target_entity(id, token)
uuid = entity_dict['uuid']
normalized_entity_type = entity_dict['entity_type']
# A bit validation to prevent Lab or Collection being queried
supported_entity_types = ['Donor', 'Sample', 'Dataset']
if normalized_entity_type not in supported_entity_types:
bad_request_error(f"Unable to get the provenance for this {normalized_entity_type}, supported entity types: {COMMA_SEPARATOR.join(supported_entity_types)}")
if normalized_entity_type == 'Dataset':
# Only published/public datasets don't require token
if entity_dict['status'].lower() != DATASET_STATUS_PUBLISHED:
# Token is required and the user must belong to HuBMAP-READ group
token = get_user_token(request, non_public_access_required = True)
else:
# The `data_access_level` of Donor/Sample can only be either 'public' or 'consortium'
if entity_dict['data_access_level'] == ACCESS_LEVEL_CONSORTIUM:
token = get_user_token(request, non_public_access_required = True)
# By now, either the entity is public accessible or the user token has the correct access level
# Will just proceed to get the provenance information
# Get the `depth` from query string if present and it's used by neo4j query
# to set the maximum number of hops in the traversal
depth = None
if 'depth' in request.args:
depth = int(request.args.get('depth'))
# Convert neo4j json to dict
neo4j_result = app_neo4j_queries.get_provenance(neo4j_driver_instance, uuid, depth)
raw_provenance_dict = dict(neo4j_result['json'])
# Normalize the raw provenance nodes based on the yaml schema
normalized_provenance_dict = {
'relationships': raw_provenance_dict['relationships'],
'nodes': []
}
for node_dict in raw_provenance_dict['nodes']:
# The schema yaml doesn't handle Lab nodes, just leave it as is
if (node_dict['label'] == 'Entity') and (node_dict['entity_type'] != 'Lab'):
# Generate trigger data
# Skip some of the properties that are time-consuming to generate via triggers:
# director_ancestor for Sample, and direct_ancestors for Dataset
# Also skip next_revision_uuid and previous_revision_uuid for Dataset to avoid additional
# checks when the target Dataset is public but the revisions are not public
properties_to_skip = [
'direct_ancestors',
'direct_ancestor',
'next_revision_uuid',
'previous_revision_uuid'
]
# We'll need to return all the properties (except the ones to skip from above list)
# including those generated by `on_read_trigger` to have a complete result
# The 'on_read_trigger' doesn't really need a token
complete_entity_dict = schema_manager.get_complete_entity_result(token, node_dict, properties_to_skip)
# Filter out properties not defined or not to be exposed in the schema yaml
normalized_entity_dict = schema_manager.normalize_entity_result_for_response(complete_entity_dict)
# Now the node to be used by provenance is all regulated by the schema
normalized_provenance_dict['nodes'].append(normalized_entity_dict)
elif node_dict['label'] == 'Activity':
# Normalize Activity nodes too
normalized_activity_dict = schema_manager.normalize_activity_result_for_response(node_dict)
normalized_provenance_dict['nodes'].append(normalized_activity_dict)
else:
# Skip Entity Lab nodes
normalized_provenance_dict['nodes'].append(node_dict)
provenance_json = provenance.get_provenance_history(uuid, normalized_provenance_dict)
# Response with the provenance details
return Response(response = provenance_json, mimetype = "application/json")
"""
Show all the supported entity types
The gateway treats this endpoint as public accessible
Returns
-------
json
A list of all the available entity types defined in the schema yaml
"""
@app.route('/entity-types', methods = ['GET'])
def get_entity_types():
# Token is not required, but if an invalid token provided,
# we need to tell the client with a 401 error
validate_token_if_auth_header_exists(request)
return jsonify(schema_manager.get_all_entity_types())
"""
Retrive all the entity nodes for a given entity type
Result filtering is supported based on query string
For example: /<entity_type>/entities?property=uuid
Parameters
----------
entity_type : str
One of the supported entity types: Dataset, Sample, Donor
Will handle Collection via API endpoint `/collections`
Returns
-------
json
All the entity nodes in a list of the target entity type
"""
@app.route('/<entity_type>/entities', methods = ['GET'])
def get_entities_by_type(entity_type):
final_result = []
# Normalize user provided entity_type
normalized_entity_type = schema_manager.normalize_entity_type(entity_type)
# Validate the normalized_entity_type to ensure it's one of the accepted types
try:
schema_manager.validate_normalized_entity_type(normalized_entity_type)
except schema_errors.InvalidNormalizedEntityTypeException as e:
bad_request_error("Invalid entity type provided: " + entity_type)
# Handle Collections retrieval using a different endpoint
if normalized_entity_type == 'Collection':
bad_request_error("Please use another API endpoint `/collections` to query collections")
# Result filtering based on query string
if bool(request.args):
property_key = request.args.get('property')
if property_key is not None:
result_filtering_accepted_property_keys = ['uuid']
# Validate the target property
if property_key not in result_filtering_accepted_property_keys:
bad_request_error(f"Only the following property keys are supported in the query string: {COMMA_SEPARATOR.join(result_filtering_accepted_property_keys)}")
# Only return a list of the filtered property value of each entity
property_list = app_neo4j_queries.get_entities_by_type(neo4j_driver_instance, normalized_entity_type, property_key)
# Final result
final_result = property_list
else:
bad_request_error("The specified query string is not supported. Use '?property=<key>' to filter the result")
# Return all the details if no property filtering
else:
# Get user token from Authorization header
# Currently the Gateway requires a token for this endpoint
user_token = get_user_token(request)
# Get back a list of entity dicts for the given entity type
entities_list = app_neo4j_queries.get_entities_by_type(neo4j_driver_instance, normalized_entity_type)
# Generate trigger data and merge into a big dict
# Skip some of the properties that are time-consuming to generate via triggers
# direct_ancestor for Sample, direct_ancestors/collections/upload for Dataset,
# datasets for Upload
properties_to_skip = [
'direct_ancestor',
'direct_ancestors',
'collections',
'upload',
'datasets'
]
complete_entities_list = schema_manager.get_complete_entities_list(user_token, entities_list, properties_to_skip)
# Final result after normalization
final_result = schema_manager.normalize_entities_list_for_response(complete_entities_list)
# Response with the final result
return jsonify(final_result)
"""
Retrive the collection detail by id
The gateway treats this endpoint as public accessible
An optional Globus nexus token can be provided in a standard Authentication Bearer header. If a valid token
is provided with group membership in the HuBMAP-Read group any collection matching the id will be returned.
otherwise if no token is provided or a valid token with no HuBMAP-Read group membership then
only a public collection will be returned. Public collections are defined as being published via a DOI
(collection.registered_doi not null) and at least one of the connected datasets is public
(dataset.status == 'Published'). For public collections only connected datasets that are
public are returned with it.
Parameters
----------
id : str
The HuBMAP ID (e.g. HBM123.ABCD.456) or UUID of target collection
Returns
-------
json
The collection detail with a list of connected datasets (only public datasets
if user doesn't have the right access permission)
"""
@app.route('/collections/<id>', methods = ['GET'])
def get_collection(id):
# Token is not required, but if an invalid token provided,
# we need to tell the client with a 401 error
validate_token_if_auth_header_exists(request)
# Use the internal token to query the target collection
# since public collections don't require user token
token = get_internal_token()
# Query target entity against uuid-api and neo4j and return as a dict if exists
collection_dict = query_target_entity(id, token)
# A bit validation
if collection_dict['entity_type'] != 'Collection':
bad_request_error("Target entity of the given id is not a collection")
# Try to get user token from Authorization header
# It's highly possible that there's no token provided
user_token = get_user_token(request)
# The user_token is flask.Response on error
# Without token, the user can only access public collections, modify the collection result
# by only returning public datasets attached to this collection
if isinstance(user_token, Response):
# When the requested collection is not public, send back 401
if ('registered_doi' not in collection_dict) or ('doi_url' not in collection_dict):
# Require a valid token in this case
unauthorized_error("The reqeusted collection is not public, a Globus token with the right access permission is required.")
# Otherwise only return the public datasets attached to this collection
# for Collection.datasets property
complete_dict = get_complete_public_collection_dict(collection_dict)
else:
# When the nexus token is valid, but the user doesn't belong to HuBMAP-READ group
# Or the token is valid but doesn't contain group information (auth token or transfer token)
# Only return the public datasets attached to this Collection
if not user_in_hubmap_read_group(request):
complete_dict = get_complete_public_collection_dict(collection_dict)
else:
# We'll need to return all the properties including those
# generated by `on_read_trigger` to have a complete result
complete_dict = schema_manager.get_complete_entity_result(user_token, collection_dict)
# Will also filter the result based on schema
normalized_complete_dict = schema_manager.normalize_entity_result_for_response(complete_dict)
# Response with the final result
return jsonify(normalized_complete_dict)
"""
Retrive all the public collections
The gateway treats this endpoint as public accessible
Result filtering is supported based on query string
For example: /collections?property=uuid
Only return public collections, for either
- a valid token in HuBMAP-Read group,
- a valid token with no HuBMAP-Read group or
- no token at all
Public collections are defined as being published via a DOI
(collection.registered_doi is not null) and at least one of the connected datasets is published
(dataset.status == 'Published'). For public collections only connected datasets that are
published are returned with it.
Returns
-------
json
A list of all the public collection dictionaries (with attached public datasts)
"""
@app.route('/collections', methods = ['GET'])
def get_collections():
final_result = []
# Token is not required, but if an invalid token provided,
# we need to tell the client with a 401 error
validate_token_if_auth_header_exists(request)
normalized_entity_type = 'Collection'
# Result filtering based on query string
if bool(request.args):
property_key = request.args.get('property')
if property_key is not None:
result_filtering_accepted_property_keys = ['uuid']
# Validate the target property
if property_key not in result_filtering_accepted_property_keys:
bad_request_error(f"Only the following property keys are supported in the query string: {COMMA_SEPARATOR.join(result_filtering_accepted_property_keys)}")
# Only return a list of the filtered property value of each public collection
final_result = app_neo4j_queries.get_public_collections(neo4j_driver_instance, property_key)
else:
bad_request_error("The specified query string is not supported. Use '?property=<key>' to filter the result")
# Return all the details if no property filtering
else:
# Use the internal token since no user token is requried to access public collections
token = get_internal_token()
# Get back a list of public collections dicts
collections_list = app_neo4j_queries.get_public_collections(neo4j_driver_instance)
# Modify the Collection.datasets property for each collection dict
# to contain only public datasets
for collection_dict in collections_list:
# Only return the public datasets attached to this collection for Collection.datasets property
collection_dict = get_complete_public_collection_dict(collection_dict)
# Generate trigger data and merge into a big dict
# and skip some of the properties that are time-consuming to generate via triggers
properties_to_skip = ['datasets']
complete_collections_list = schema_manager.get_complete_entities_list(token, collections_list, properties_to_skip)
# Final result after normalization
final_result = schema_manager.normalize_entities_list_for_response(complete_collections_list)
# Response with the final result
return jsonify(final_result)
"""
Create an entity of the target type in neo4j
Parameters
----------
entity_type : str
One of the target entity types (case-insensitive since will be normalized): Dataset, Donor, Sample, Upload
Returns
-------
json
All the properties of the newly created entity
"""
@app.route('/entities/<entity_type>', methods = ['POST'])
def create_entity(entity_type):
# Get user token from Authorization header
user_token = get_user_token(request)
# Normalize user provided entity_type
normalized_entity_type = schema_manager.normalize_entity_type(entity_type)
# Validate the normalized_entity_type to make sure it's one of the accepted types
try:
schema_manager.validate_normalized_entity_type(normalized_entity_type)
except schema_errors.InvalidNormalizedEntityTypeException as e:
bad_request_error(f"Invalid entity type provided: {entity_type}")
# Execute entity level validator defined in schema yaml before entity creation
# Currently on Dataset and Upload creation require application header
try:
schema_manager.execute_entity_level_validator('before_entity_create_validator', normalized_entity_type, request)
except schema_errors.MissingApplicationHeaderException as e:
bad_request_error(e)
except schema_errors.InvalidApplicationHeaderException as e:
bad_request_error(e)
# Always expect a json body
require_json(request)
# Parse incoming json string into json data(python dict object)
json_data_dict = request.get_json()
# Validate request json against the yaml schema
try:
schema_manager.validate_json_data_against_schema(json_data_dict, normalized_entity_type)
except schema_errors.SchemaValidationException as e:
# No need to log the validation errors
bad_request_error(str(e))
# Execute property level validators defined in schema yaml before entity property creation
# Use empty dict {} to indicate there's no existing_data_dict
try:
schema_manager.execute_property_level_validators('before_property_create_validators', normalized_entity_type, request, {}, json_data_dict)
# Currently only ValueError
except ValueError as e:
bad_request_error(e)
# Sample and Dataset: additional validation, create entity, after_create_trigger
# Collection and Donor: create entity
if normalized_entity_type == 'Sample':
# A bit more validation to ensure if `organ` code is set, the `specimen_type` must be set to "organ"
# Vise versa, if `specimen_type` is set to "organ", `organ` code is required
if ('specimen_type' in json_data_dict) and (json_data_dict['specimen_type'].lower() == 'organ'):
if ('organ' not in json_data_dict) or (json_data_dict['organ'].strip() == ''):
bad_request_error("A valid organ code is required when the specimen_type is organ")
else:
if 'organ' in json_data_dict:
bad_request_error("The specimen_type must be organ when an organ code is provided")
# A bit more validation for new sample to be linked to existing source entity
direct_ancestor_uuid = json_data_dict['direct_ancestor_uuid']
# Check existence of the direct ancestor (either another Sample or Donor)
direct_ancestor_dict = query_target_entity(direct_ancestor_uuid, user_token)
# Creating the ids require organ code to be specified for the samples to be created when the
# sample's direct ancestor is a Donor.
# Must be one of the codes from: https://github.com/hubmapconsortium/search-api/blob/test-release/src/search-schema/data/definitions/enums/organ_types.yaml
if direct_ancestor_dict['entity_type'] == 'Donor':
# `specimen_type` is required on create
if json_data_dict['specimen_type'].lower() != 'organ':
bad_request_error("The specimen_type must be organ since the direct ancestor is a Donor")
# Currently we don't validate the provided organ code though
if ('organ' not in json_data_dict) or (json_data_dict['organ'].strip() == ''):
bad_request_error("A valid organ code is required when the direct ancestor is a Donor")
# Generate 'before_create_triiger' data and create the entity details in Neo4j
merged_dict = create_entity_details(request, normalized_entity_type, user_token, json_data_dict)
elif normalized_entity_type == 'Dataset':
# `direct_ancestor_uuids` is required for creating new Dataset
# Check existence of those direct ancestors
for direct_ancestor_uuid in json_data_dict['direct_ancestor_uuids']:
direct_ancestor_dict = query_target_entity(direct_ancestor_uuid, user_token)
# Also check existence of the previous revision dataset if specified
if 'previous_revision_uuid' in json_data_dict:
previous_version_dict = query_target_entity(json_data_dict['previous_revision_uuid'], user_token)
# Make sure the previous version entity is either a Dataset or Sample
if previous_version_dict['entity_type'] not in ['Dataset', 'Sample']:
bad_request_error(f"The previous_revision_uuid specified for this dataset must be either a Dataset or Sample")
# Also need to validate if the given 'previous_revision_uuid' has already had
# an exisiting next revision
# Only return a list of the uuids, no need to get back the list of dicts
next_revisions_list = app_neo4j_queries.get_next_revisions(neo4j_driver_instance, previous_version_dict['uuid'], 'uuid')
# As long as the list is not empty, tell the users to use a different 'previous_revision_uuid'
if next_revisions_list:
bad_request_error(f"The previous_revision_uuid specified for this dataset has already had a next revision")
# Only published datasets can have revisions made of them. Verify that that status of the Dataset specified
# by previous_revision_uuid is published. Else, bad request error.
if previous_version_dict['status'].lower() != DATASET_STATUS_PUBLISHED:
bad_request_error(f"The previous_revision_uuid specified for this dataset must be 'Published' in order to create a new revision from it")
# Generate 'before_create_triiger' data and create the entity details in Neo4j
merged_dict = create_entity_details(request, normalized_entity_type, user_token, json_data_dict)
else:
# Generate 'before_create_triiger' data and create the entity details in Neo4j
merged_dict = create_entity_details(request, normalized_entity_type, user_token, json_data_dict)
# For Donor: link to parent Lab node
# For Sample: link to existing direct ancestor
# For Dataset: link to direct ancestors
# For Upload: link to parent Lab node
after_create(normalized_entity_type, user_token, merged_dict)
# We'll need to return all the properties including those
# generated by `on_read_trigger` to have a complete result
complete_dict = schema_manager.get_complete_entity_result(user_token, merged_dict)
# Will also filter the result based on schema
normalized_complete_dict = schema_manager.normalize_entity_result_for_response(complete_dict)
# Also index the new entity node in elasticsearch via search-api
reindex_entity(complete_dict['uuid'], user_token)
return jsonify(normalized_complete_dict)
"""
Create multiple samples from the same source entity
Parameters
----------
count : str
The number of samples to be created
Returns
-------
json
All the properties of the newly created entity
"""
@app.route('/entities/multiple-samples/<count>', methods = ['POST'])
def create_multiple_samples(count):
# Get user token from Authorization header
user_token = get_user_token(request)
# Normalize user provided entity_type
normalized_entity_type = 'Sample'
# Always expect a json body
require_json(request)
# Parse incoming json string into json data(python dict object)
json_data_dict = request.get_json()
# Validate request json against the yaml schema
try:
schema_manager.validate_json_data_against_schema(json_data_dict, normalized_entity_type)
except schema_errors.SchemaValidationException as e:
# No need to log the validation errors
bad_request_error(str(e))
# `direct_ancestor_uuid` is required on create
# Check existence of the direct ancestor (either another Sample or Donor)
direct_ancestor_dict = query_target_entity(json_data_dict['direct_ancestor_uuid'], user_token)
# Creating the ids require organ code to be specified for the samples to be created when the
# sample's direct ancestor is a Donor.
# Must be one of the codes from: https://github.com/hubmapconsortium/search-api/blob/test-release/src/search-schema/data/definitions/enums/organ_types.yaml
if direct_ancestor_dict['entity_type'] == 'Donor':
# `specimen_type` is required on create
if json_data_dict['specimen_type'].lower() != 'organ':
bad_request_error("The specimen_type must be organ since the direct ancestor is a Donor")
# Currently we don't validate the provided organ code though
if ('organ' not in json_data_dict) or (not json_data_dict['organ']):
bad_request_error("A valid organ code is required since the direct ancestor is a Donor")
# Generate 'before_create_triiger' data and create the entity details in Neo4j
generated_ids_dict_list = create_multiple_samples_details(request, normalized_entity_type, user_token, json_data_dict, count)
# Also index the each new Sample node in elasticsearch via search-api
for id_dict in generated_ids_dict_list:
reindex_entity(id_dict['uuid'], user_token)
return jsonify(generated_ids_dict_list)
"""
Update the properties of a given entity, no Collection stuff
Parameters
----------
entity_type : str
One of the normalized entity types: Dataset, Collection, Sample, Donor
id : str
The HuBMAP ID (e.g. HBM123.ABCD.456) or UUID of target entity
Returns
-------
json
All the updated properties of the target entity
"""
@app.route('/entities/<id>', methods = ['PUT'])
def update_entity(id):
# Get user token from Authorization header
user_token = get_user_token(request)
# Always expect a json body
require_json(request)
# Parse incoming json string into json data(python dict object)
json_data_dict = request.get_json()
# Get target entity and return as a dict if exists
entity_dict = query_target_entity(id, user_token)
# Normalize user provided entity_type
normalized_entity_type = schema_manager.normalize_entity_type(entity_dict['entity_type'])
# Note, we don't support entity level validators on entity update via PUT
# Only entity create via POST is supported at the entity level
# Validate request json against the yaml schema
# Pass in the entity_dict for missing required key check, this is different from creating new entity
try:
schema_manager.validate_json_data_against_schema(json_data_dict, normalized_entity_type, existing_entity_dict = entity_dict)
except schema_errors.SchemaValidationException as e:
# No need to log the validation errors
bad_request_error(str(e))
# Execute property level validators defined in schema yaml before entity property update
try:
schema_manager.execute_property_level_validators('before_property_update_validators', normalized_entity_type, request, entity_dict, json_data_dict)
except (schema_errors.MissingApplicationHeaderException,
schema_errors.InvalidApplicationHeaderException,
KeyError,
ValueError) as e:
bad_request_error(e)
# Sample, Dataset, and Upload: additional validation, update entity, after_update_trigger
# Collection and Donor: update entity
if normalized_entity_type == 'Sample':
# A bit more validation for updating the sample and the linkage to existing source entity
has_direct_ancestor_uuid = False
if ('direct_ancestor_uuid' in json_data_dict) and json_data_dict['direct_ancestor_uuid']:
has_direct_ancestor_uuid = True
direct_ancestor_uuid = json_data_dict['direct_ancestor_uuid']
# Check existence of the source entity
direct_ancestor_dict = query_target_entity(direct_ancestor_uuid, user_token)
# Also make sure it's either another Sample or a Donor
if direct_ancestor_dict['entity_type'] not in ['Donor', 'Sample']:
bad_request_error(f"The uuid: {direct_ancestor_uuid} is not a Donor neither a Sample, cannot be used as the direct ancestor of this Sample")
# Generate 'before_update_triiger' data and update the entity details in Neo4j
merged_updated_dict = update_entity_details(request, normalized_entity_type, user_token, json_data_dict, entity_dict)
# Handle linkages update via `after_update_trigger` methods
if has_direct_ancestor_uuid:
after_update(normalized_entity_type, user_token, merged_updated_dict)
elif normalized_entity_type == 'Dataset':
# A bit more validation if `direct_ancestor_uuids` provided
has_direct_ancestor_uuids = False
if ('direct_ancestor_uuids' in json_data_dict) and (json_data_dict['direct_ancestor_uuids']):
has_direct_ancestor_uuids = True
# Check existence of those source entities
for direct_ancestor_uuid in json_data_dict['direct_ancestor_uuids']:
direct_ancestor_dict = query_target_entity(direct_ancestor_uuid, user_token)
# Generate 'before_update_trigger' data and update the entity details in Neo4j
merged_updated_dict = update_entity_details(request, normalized_entity_type, user_token, json_data_dict, entity_dict)
# Handle linkages update via `after_update_trigger` methods
if has_direct_ancestor_uuids:
after_update(normalized_entity_type, user_token, merged_updated_dict)
elif normalized_entity_type == 'Upload':
has_dataset_uuids_to_link = False
if ('dataset_uuids_to_link' in json_data_dict) and (json_data_dict['dataset_uuids_to_link']):
has_dataset_uuids_to_link = True
# Check existence of those datasets to be linked
# If one of the datasets to be linked appears to be already linked,
# neo4j query won't create the new linkage due to the use of `MERGE`
for dataset_uuid in json_data_dict['dataset_uuids_to_link']:
dataset_dict = query_target_entity(dataset_uuid, user_token)
# Also make sure it's a Dataset
if dataset_dict['entity_type'] != 'Dataset':
bad_request_error(f"The uuid: {dataset_uuid} is not a Dataset, cannot be linked to this Upload")
has_dataset_uuids_to_unlink = False
if ('dataset_uuids_to_unlink' in json_data_dict) and (json_data_dict['dataset_uuids_to_unlink']):
has_dataset_uuids_to_unlink = True
# Check existence of those datasets to be unlinked
# If one of the datasets to be unlinked appears to be not linked at all,
# the neo4j cypher will simply skip it because it won't match the "MATCH" clause
# So no need to tell the end users that this dataset is not linked
# Let alone checking the entity type to ensure it's a Dataset
for dataset_uuid in json_data_dict['dataset_uuids_to_unlink']:
dataset_dict = query_target_entity(dataset_uuid, user_token)
# Generate 'before_update_trigger' data and update the entity details in Neo4j
merged_updated_dict = update_entity_details(request, normalized_entity_type, user_token, json_data_dict, entity_dict)
# Handle linkages update via `after_update_trigger` methods
if has_dataset_uuids_to_link or has_dataset_uuids_to_unlink:
after_update(normalized_entity_type, user_token, merged_updated_dict)
else:
# Generate 'before_update_triiger' data and update the entity details in Neo4j
merged_updated_dict = update_entity_details(request, normalized_entity_type, user_token, json_data_dict, entity_dict)
# We'll need to return all the properties including those
# generated by `on_read_trigger` to have a complete result
complete_dict = schema_manager.get_complete_entity_result(user_token, merged_updated_dict)
# Will also filter the result based on schema
normalized_complete_dict = schema_manager.normalize_entity_result_for_response(complete_dict)
# How to handle reindex collection?
# Also reindex the updated entity node in elasticsearch via search-api
reindex_entity(entity_dict['uuid'], user_token)
return jsonify(normalized_complete_dict)
"""
Get all ancestors of the given entity
The gateway treats this endpoint as public accessible
Result filtering based on query string
For example: /ancestors/<id>?property=uuid
Parameters
----------
id : str
The HuBMAP ID (e.g. HBM123.ABCD.456) or UUID of given entity
Returns
-------
json
A list of all the ancestors of the target entity
"""
@app.route('/ancestors/<id>', methods = ['GET'])
def get_ancestors(id):
final_result = []
# Token is not required, but if an invalid token provided,
# we need to tell the client with a 401 error
validate_token_if_auth_header_exists(request)
# Use the internal token to query the target entity
# since public entities don't require user token
token = get_internal_token()
# Make sure the id exists in uuid-api and
# the corresponding entity also exists in neo4j
entity_dict = query_target_entity(id, token)
normalized_entity_type = entity_dict['entity_type']
uuid = entity_dict['uuid']
# Collection doesn't have ancestors via Activity nodes
if normalized_entity_type == 'Collection':
bad_request_error(f"Unsupported entity type of id {id}: {normalized_entity_type}")
if normalized_entity_type == 'Dataset':
# Only published/public datasets don't require token
if entity_dict['status'].lower() != DATASET_STATUS_PUBLISHED:
# Token is required and the user must belong to HuBMAP-READ group
token = get_user_token(request, non_public_access_required = True)
elif normalized_entity_type == 'Sample':
# The `data_access_level` of Sample can only be either 'public' or 'consortium'
if entity_dict['data_access_level'] == ACCESS_LEVEL_CONSORTIUM:
token = get_user_token(request, non_public_access_required = True)
else:
# Donor and Upload will always get back an empty list
# becuase their direct ancestor is Lab, which is being skipped by Neo4j query
# So no need to execute the code below
return jsonify(final_result)
# By now, either the entity is public accessible or the user token has the correct access level
# Result filtering based on query string
if bool(request.args):
property_key = request.args.get('property')
if property_key is not None:
result_filtering_accepted_property_keys = ['uuid']
# Validate the target property
if property_key not in result_filtering_accepted_property_keys:
bad_request_error(f"Only the following property keys are supported in the query string: {COMMA_SEPARATOR.join(result_filtering_accepted_property_keys)}")
# Only return a list of the filtered property value of each entity
property_list = app_neo4j_queries.get_ancestors(neo4j_driver_instance, uuid, property_key)
# Final result
final_result = property_list
else:
bad_request_error("The specified query string is not supported. Use '?property=<key>' to filter the result")
# Return all the details if no property filtering
else:
ancestors_list = app_neo4j_queries.get_ancestors(neo4j_driver_instance, uuid)
# Generate trigger data
# Skip some of the properties that are time-consuming to generate via triggers:
# director_ancestor for Sample, direct_ancestors/collections/upload for Dataset
# Also skip next_revision_uuid and previous_revision_uuid for Dataset to avoid additional
# checks when the target Dataset is public but the revisions are not public
properties_to_skip = [
'direct_ancestor',
'direct_ancestors',
'collections',
'upload',
'next_revision_uuid',
'previous_revision_uuid'
]
complete_entities_list = schema_manager.get_complete_entities_list(token, ancestors_list, properties_to_skip)
# Final result after normalization
final_result = schema_manager.normalize_entities_list_for_response(complete_entities_list)
return jsonify(final_result)
"""
Get all descendants of the given entity
Result filtering based on query string
For example: /descendants/<id>?property=uuid
Parameters
----------
id : str
The HuBMAP ID (e.g. HBM123.ABCD.456) or UUID of given entity
Returns
-------
json
A list of all the descendants of the target entity
"""
@app.route('/descendants/<id>', methods = ['GET'])
def get_descendants(id):
final_result = []
# Get user token from Authorization header
user_token = get_user_token(request)
# Make sure the id exists in uuid-api and
# the corresponding entity also exists in neo4j
entity_dict = query_target_entity(id, user_token)
uuid = entity_dict['uuid']
# Collection and Upload don't have descendants via Activity nodes
# No need to check, it'll always return empty list
# Result filtering based on query string
if bool(request.args):
property_key = request.args.get('property')
if property_key is not None:
result_filtering_accepted_property_keys = ['uuid']
# Validate the target property
if property_key not in result_filtering_accepted_property_keys:
bad_request_error(f"Only the following property keys are supported in the query string: {COMMA_SEPARATOR.join(result_filtering_accepted_property_keys)}")
# Only return a list of the filtered property value of each entity
property_list = app_neo4j_queries.get_descendants(neo4j_driver_instance, uuid, property_key)
# Final result
final_result = property_list
else:
bad_request_error("The specified query string is not supported. Use '?property=<key>' to filter the result")
# Return all the details if no property filtering
else:
descendants_list = app_neo4j_queries.get_descendants(neo4j_driver_instance, uuid)
# Generate trigger data and merge into a big dict
# and skip some of the properties that are time-consuming to generate via triggers
# director_ancestor for Sample, and
# direct_ancestors/collections/upload/next_revision_uuid/previous_revision_uuid for Dataset
properties_to_skip = [
'direct_ancestor',
'direct_ancestors',
'collections',
'upload',
'next_revision_uuid',
'previous_revision_uuid'
]
complete_entities_list = schema_manager.get_complete_entities_list(user_token, descendants_list, properties_to_skip)
# Final result after normalization
final_result = schema_manager.normalize_entities_list_for_response(complete_entities_list)
return jsonify(final_result)
"""
Get all parents of the given entity
The gateway treats this endpoint as public accessible
Result filtering based on query string
For example: /parents/<id>?property=uuid
Parameters
----------
id : str
The HuBMAP ID (e.g. HBM123.ABCD.456) or UUID of given entity
Returns
-------
json
A list of all the parents of the target entity
"""
@app.route('/parents/<id>', methods = ['GET'])
def get_parents(id):
final_result = []
# Token is not required, but if an invalid token provided,
# we need to tell the client with a 401 error
validate_token_if_auth_header_exists(request)
# Use the internal token to query the target entity
# since public entities don't require user token
token = get_internal_token()
# Make sure the id exists in uuid-api and
# the corresponding entity also exists in neo4j
entity_dict = query_target_entity(id, token)
normalized_entity_type = entity_dict['entity_type']
uuid = entity_dict['uuid']
# Collection doesn't have ancestors via Activity nodes
if normalized_entity_type == 'Collection':
bad_request_error(f"Unsupported entity type of id {id}: {normalized_entity_type}")
if normalized_entity_type == 'Dataset':
# Only published/public datasets don't require token
if entity_dict['status'].lower() != DATASET_STATUS_PUBLISHED:
# Token is required and the user must belong to HuBMAP-READ group
token = get_user_token(request, non_public_access_required = True)
elif normalized_entity_type == 'Sample':
# The `data_access_level` of Sample can only be either 'public' or 'consortium'
if entity_dict['data_access_level'] == ACCESS_LEVEL_CONSORTIUM:
token = get_user_token(request, non_public_access_required = True)
else:
# Donor and Upload will always get back an empty list
# becuase their direct ancestor is Lab, which is being skipped by Neo4j query
# So no need to execute the code below
return jsonify(final_result)
# By now, either the entity is public accessible or the user token has the correct access level
# Result filtering based on query string
if bool(request.args):
property_key = request.args.get('property')
if property_key is not None:
result_filtering_accepted_property_keys = ['uuid']
# Validate the target property
if property_key not in result_filtering_accepted_property_keys:
bad_request_error(f"Only the following property keys are supported in the query string: {COMMA_SEPARATOR.join(result_filtering_accepted_property_keys)}")
# Only return a list of the filtered property value of each entity
property_list = app_neo4j_queries.get_parents(neo4j_driver_instance, uuid, property_key)
# Final result
final_result = property_list
else:
bad_request_error("The specified query string is not supported. Use '?property=<key>' to filter the result")
# Return all the details if no property filtering
else:
parents_list = app_neo4j_queries.get_parents(neo4j_driver_instance, uuid)
# Generate trigger data
# Skip some of the properties that are time-consuming to generate via triggers:
# director_ancestor for Sample, direct_ancestors/collections/upload for Dataset
# Also skip next_revision_uuid and previous_revision_uuid for Dataset to avoid additional
# checks when the target Dataset is public but the revisions are not public
properties_to_skip = [
'direct_ancestor',
'direct_ancestors',
'collections',
'upload',
'next_revision_uuid',
'previous_revision_uuid'
]
complete_entities_list = schema_manager.get_complete_entities_list(token, parents_list, properties_to_skip)
# Final result after normalization
final_result = schema_manager.normalize_entities_list_for_response(complete_entities_list)
return jsonify(final_result)
"""
Get all chilren of the given entity
Result filtering based on query string
For example: /children/<id>?property=uuid
Parameters
----------
id : str
The HuBMAP ID (e.g. HBM123.ABCD.456) or UUID of given entity
Returns
-------
json
A list of all the children of the target entity
"""
@app.route('/children/<id>', methods = ['GET'])
def get_children(id):
final_result = []
# Get user token from Authorization header
user_token = get_user_token(request)
# Make sure the id exists in uuid-api and
# the corresponding entity also exists in neo4j
entity_dict = query_target_entity(id, user_token)
uuid = entity_dict['uuid']
# Collection and Upload don't have children via Activity nodes
# No need to check, it'll always return empty list
# Result filtering based on query string
if bool(request.args):
property_key = request.args.get('property')
if property_key is not None:
result_filtering_accepted_property_keys = ['uuid']
# Validate the target property
if property_key not in result_filtering_accepted_property_keys:
bad_request_error(f"Only the following property keys are supported in the query string: {COMMA_SEPARATOR.join(result_filtering_accepted_property_keys)}")
# Only return a list of the filtered property value of each entity
property_list = app_neo4j_queries.get_children(neo4j_driver_instance, uuid, property_key)
# Final result
final_result = property_list
else:
bad_request_error("The specified query string is not supported. Use '?property=<key>' to filter the result")
# Return all the details if no property filtering
else:
children_list = app_neo4j_queries.get_children(neo4j_driver_instance, uuid)
# Generate trigger data and merge into a big dict
# and skip some of the properties that are time-consuming to generate via triggers
# director_ancestor for Sample, and
# direct_ancestors/collections/upload/next_revision_uuid/previous_revision_uuid for Dataset
properties_to_skip = [
'direct_ancestor',
'direct_ancestors',
'collections',
'upload',
'next_revision_uuid',
'previous_revision_uuid'
]
complete_entities_list = schema_manager.get_complete_entities_list(user_token, children_list, properties_to_skip)
# Final result after normalization
final_result = schema_manager.normalize_entities_list_for_response(complete_entities_list)
return jsonify(final_result)
"""
Get all previous revisions of the given entity
Result filtering based on query string
For example: /previous_revisions/<id>?property=uuid
Parameters
----------
id : str
The HuBMAP ID (e.g. HBM123.ABCD.456) or UUID of given entity
Returns
-------
json
A list of entities that are the previous revisions of the target entity
"""
@app.route('/previous_revisions/<id>', methods = ['GET'])
def get_previous_revisions(id):
# Get user token from Authorization header
user_token = get_user_token(request)
# Make sure the id exists in uuid-api and
# the corresponding entity also exists in neo4j
entity_dict = query_target_entity(id, user_token)
uuid = entity_dict['uuid']
# Result filtering based on query string
if bool(request.args):
property_key = request.args.get('property')
if property_key is not None:
result_filtering_accepted_property_keys = ['uuid']
# Validate the target property
if property_key not in result_filtering_accepted_property_keys:
bad_request_error(f"Only the following property keys are supported in the query string: {COMMA_SEPARATOR.join(result_filtering_accepted_property_keys)}")
# Only return a list of the filtered property value of each entity
property_list = app_neo4j_queries.get_previous_revisions(neo4j_driver_instance, uuid, property_key)
# Final result
final_result = property_list
else:
bad_request_error("The specified query string is not supported. Use '?property=<key>' to filter the result")
# Return all the details if no property filtering
else:
descendants_list = app_neo4j_queries.get_previous_revisions(neo4j_driver_instance, uuid)
# Generate trigger data and merge into a big dict
# and skip some of the properties that are time-consuming to generate via triggers
# datasts for Collection, director_ancestor for Sample, and direct_ancestors for Dataset
properties_to_skip = ['collections', 'upload', 'direct_ancestors']
complete_entities_list = schema_manager.get_complete_entities_list(user_token, descendants_list, properties_to_skip)
# Final result after normalization
final_result = schema_manager.normalize_entities_list_for_response(complete_entities_list)
return jsonify(final_result)
"""
Get all next revisions of the given entity
Result filtering based on query string
For example: /next_revisions/<id>?property=uuid
Parameters
----------
id : str
The HuBMAP ID (e.g. HBM123.ABCD.456) or UUID of given entity
Returns
-------
json
A list of entities that are the next revisions of the target entity
"""
@app.route('/next_revisions/<id>', methods = ['GET'])
def get_next_revisions(id):
# Get user token from Authorization header
user_token = get_user_token(request)
# Make sure the id exists in uuid-api and
# the corresponding entity also exists in neo4j
entity_dict = query_target_entity(id, user_token)
uuid = entity_dict['uuid']
# Result filtering based on query string
if bool(request.args):
property_key = request.args.get('property')
if property_key is not None:
result_filtering_accepted_property_keys = ['uuid']
# Validate the target property
if property_key not in result_filtering_accepted_property_keys:
bad_request_error(f"Only the following property keys are supported in the query string: {COMMA_SEPARATOR.join(result_filtering_accepted_property_keys)}")
# Only return a list of the filtered property value of each entity
property_list = app_neo4j_queries.get_next_revisions(neo4j_driver_instance, uuid, property_key)
# Final result
final_result = property_list
else:
bad_request_error("The specified query string is not supported. Use '?property=<key>' to filter the result")
# Return all the details if no property filtering
else:
descendants_list = app_neo4j_queries.get_next_revisions(neo4j_driver_instance, uuid)
# Generate trigger data and merge into a big dict
# and skip some of the properties that are time-consuming to generate via triggers
# datasts for Collection, director_ancestor for Sample, and direct_ancestors for Dataset
properties_to_skip = ['collections', 'upload', 'direct_ancestors']
complete_entities_list = schema_manager.get_complete_entities_list(user_token, descendants_list, properties_to_skip)
# Final result after normalization
final_result = schema_manager.normalize_entities_list_for_response(complete_entities_list)
return jsonify(final_result)
"""
Link the given list of datasets to the target collection
JSON request body example:
{
"dataset_uuids": [
"fb6757b606ac35be7fa85062fde9c2e1",
"81a9fa68b2b4ea3e5f7cb17554149473",
"3ac0768d61c6c84f0ec59d766e123e05",
"0576b972e074074b4c51a61c3d17a6e3"
]
}
Parameters
----------
collection_uuid : str
The UUID of target collection
Returns
-------
json
JSON string containing a success message with 200 status code
"""
@app.route('/collections/<collection_uuid>/add-datasets', methods = ['PUT'])
def add_datasets_to_collection(collection_uuid):
# Get user token from Authorization header
user_token = get_user_token(request)
# Query target entity against uuid-api and neo4j and return as a dict if exists
entity_dict = query_target_entity(collection_uuid, user_token)
if entity_dict['entity_type'] != 'Collection':
bad_request_error(f"The UUID provided in URL is not a Collection: {collection_uuid}")
# Always expect a json body
require_json(request)
# Parse incoming json string into json data(python list object)
json_data_dict = request.get_json()
if 'dataset_uuids' not in json_data_dict:
bad_request_error("Missing 'dataset_uuids' key in the request JSON.")
if not json_data_dict['dataset_uuids']:
bad_request_error("JSON field 'dataset_uuids' can not be empty list.")
# Now we have a list of uuids
dataset_uuids_list = json_data_dict['dataset_uuids']
# Make sure all the given uuids are datasets
for dataset_uuid in dataset_uuids_list:
entity_dict = query_target_entity(dataset_uuid, user_token)
if entity_dict['entity_type'] != 'Dataset':
bad_request_error(f"The UUID provided in JSON is not a Dataset: {dataset_uuid}")
try:
app_neo4j_queries.add_datasets_to_collection(neo4j_driver_instance, collection_uuid, dataset_uuids_list)
except TransactionError:
msg = "Failed to create the linkage between the given datasets and the target collection"
# Log the full stack trace, prepend a line with our message
logger.exception(msg)
# Terminate and let the users know
internal_server_error(msg)
# Send response with success message
return jsonify(message = "Successfully added all the specified datasets to the target collection")
"""
Redirect a request from a doi service for a dataset or collection
The gateway treats this endpoint as public accessible
Parameters
----------
id : str
The HuBMAP ID (e.g. HBM123.ABCD.456) or UUID of the target entity
"""
# To continue supporting the already published collection DOIs
@app.route('/collection/redirect/<id>', methods = ['GET'])
# New route
@app.route('/doi/redirect/<id>', methods = ['GET'])
def doi_redirect(id):
# Use the internal token to query the target entity
# since public entities don't require user token
token = get_internal_token()
# Query target entity against uuid-api and neo4j and return as a dict if exists
entity_dict = query_target_entity(id, token)
entity_type = entity_dict['entity_type']
# Only for collection
if entity_type not in ['Collection', 'Dataset']:
bad_request_error("The target entity of the specified id must be a Collection or Dataset")
uuid = entity_dict['uuid']
# URL template
redirect_url = app.config['DOI_REDIRECT_URL']
if (redirect_url.lower().find('<entity_type>') == -1) or (redirect_url.lower().find('<identifier>') == -1):
# Log the full stack trace, prepend a line with our message
msg = "Incorrect configuration value for 'DOI_REDIRECT_URL'"
logger.exception(msg)
internal_server_error(msg)
rep_entity_type_pattern = re.compile(re.escape('<entity_type>'), re.RegexFlag.IGNORECASE)
redirect_url = rep_entity_type_pattern.sub(entity_type.lower(), redirect_url)
rep_identifier_pattern = re.compile(re.escape('<identifier>'), re.RegexFlag.IGNORECASE)
redirect_url = rep_identifier_pattern.sub(uuid, redirect_url)
resp = Response("page has moved", 307)
resp.headers['Location'] = redirect_url
return resp
"""
Redirection method created for REFERENCE organ DOI redirection, but can be for others if needed
The gateway treats this endpoint as public accessible
Parameters
----------
hmid : str
The HuBMAP ID (e.g. HBM123.ABCD.456)
"""
@app.route('/redirect/<hmid>', methods = ['GET'])
def redirect(hmid):
cid = hmid.upper().strip()
if cid in reference_redirects:
redir_url = reference_redirects[cid]
resp = Response("page has moved", 307)
resp.headers['Location'] = redir_url
return resp
else:
return Response(f"{hmid} not found.", 404)
"""
Get the Globus URL to the given Dataset or Upload
The gateway treats this endpoint as public accessible
It will provide a Globus URL to the dataset/upload directory in of three Globus endpoints based on the access
level of the user (public, consortium or protected), public only, of course, if no token is provided.
If a dataset/upload isn't found a 404 will be returned. There is a chance that a 500 can be returned, but not
likely under normal circumstances, only for a misconfigured or failing in some way endpoint.
If the Auth token is provided but is expired or invalid a 401 is returned. If access to the dataset/upload
is not allowed for the user (or lack of user) a 403 is returned.
Parameters
----------
id : str
The HuBMAP ID (e.g. HBM123.ABCD.456) or UUID of given entity
Returns
-------
Response
200 with the Globus Application URL to the directory of dataset/upload
404 Dataset/Upload not found
403 Access Forbidden
401 Unauthorized (bad or expired token)
500 Unexpected server or other error
"""
# Thd old routes for backward compatibility - will be deprecated eventually
@app.route('/entities/dataset/globus-url/<id>', methods = ['GET'])
@app.route('/dataset/globus-url/<id>', methods = ['GET'])
# New route
@app.route('/entities/<id>/globus-url', methods = ['GET'])
def get_globus_url(id):
# Token is not required, but if an invalid token provided,
# we need to tell the client with a 401 error
validate_token_if_auth_header_exists(request)
# Use the internal token to query the target entity
# since public entities don't require user token
token = get_internal_token()
# Query target entity against uuid-api and neo4j and return as a dict if exists
# Then retrieve the allowable data access level (public, protected or consortium)
# for the dataset and HuBMAP Component ID that the dataset belongs to
entity_dict = query_target_entity(id, token)
uuid = entity_dict['uuid']
normalized_entity_type = entity_dict['entity_type']
# Only for Dataset and Upload
if normalized_entity_type not in ['Dataset', 'Upload']:
bad_request_error("The target entity of the specified id is not a Dataset nor a Upload")
# Upload doesn't have this 'data_access_level' property, we treat it as 'protected'
# For Dataset, if no access level is present, default to protected too
if not 'data_access_level' in entity_dict or string_helper.isBlank(entity_dict['data_access_level']):
entity_data_access_level = ACCESS_LEVEL_PROTECTED
else:
entity_data_access_level = entity_dict['data_access_level']
# Get the globus groups info based on the groups json file in commons package
globus_groups_info = globus_groups.get_globus_groups_info()
groups_by_id_dict = globus_groups_info['by_id']
if not 'group_uuid' in entity_dict or string_helper.isBlank(entity_dict['group_uuid']):
msg = f"The 'group_uuid' property is not set for {normalized_entity_type} with uuid: {uuid}"
logger.exception(msg)
internal_server_error(msg)
group_uuid = entity_dict['group_uuid']
# Validate the group_uuid
try:
schema_manager.validate_entity_group_uuid(group_uuid)
except schema_errors.NoDataProviderGroupException:
msg = f"Invalid 'group_uuid': {group_uuid} for {normalized_entity_type} with uuid: {uuid}"
logger.exception(msg)
internal_server_error(msg)
group_name = groups_by_id_dict[group_uuid]['displayname']
try:
# Get user data_access_level based on token if provided
# If no Authorization header, default user_info['data_access_level'] == 'public'
# The user_info contains HIGHEST access level of the user based on the token
# This call raises an HTTPException with a 401 if any auth issues encountered
user_info = auth_helper_instance.getUserDataAccessLevel(request)
# If returns HTTPException with a 401, expired/invalid token
except HTTPException:
unauthorized_error("The provided token is invalid or expired")
# The user is in the Globus group with full access to thie dataset,
# so they have protected level access to it
if ('hmgroupids' in user_info) and (group_uuid in user_info['hmgroupids']):
user_data_access_level = ACCESS_LEVEL_PROTECTED
else:
if not 'data_access_level' in user_info:
msg = f"Unexpected error, data access level could not be found for user trying to access {normalized_entity_type} id: {id}"
logger.exception(msg)
return internal_server_error(msg)
user_data_access_level = user_info['data_access_level'].lower()
#construct the Globus URL based on the highest level of access that the user has
#and the level of access allowed for the dataset
#the first "if" checks to see if the user is a member of the Consortium group
#that allows all access to this dataset, if so send them to the "protected"
#endpoint even if the user doesn't have full access to all protected data
globus_server_uuid = None
dir_path = ''
# Note: `entity_data_access_level` for Upload is always default to 'protected'
# public access
if entity_data_access_level == ACCESS_LEVEL_PUBLIC:
globus_server_uuid = app.config['GLOBUS_PUBLIC_ENDPOINT_UUID']
access_dir = access_level_prefix_dir(app.config['PUBLIC_DATA_SUBDIR'])
dir_path = dir_path + access_dir + "/"
# consortium access
elif (entity_data_access_level == ACCESS_LEVEL_CONSORTIUM) and (not user_data_access_level == ACCESS_LEVEL_PUBLIC):
globus_server_uuid = app.config['GLOBUS_CONSORTIUM_ENDPOINT_UUID']
access_dir = access_level_prefix_dir(app.config['CONSORTIUM_DATA_SUBDIR'])
dir_path = dir_path + access_dir + group_name + "/"
# protected access
elif (entity_data_access_level == ACCESS_LEVEL_PROTECTED) and (user_data_access_level == ACCESS_LEVEL_PROTECTED):
globus_server_uuid = app.config['GLOBUS_PROTECTED_ENDPOINT_UUID']
access_dir = access_level_prefix_dir(app.config['PROTECTED_DATA_SUBDIR'])
dir_path = dir_path + access_dir + group_name + "/"
if globus_server_uuid is None:
forbidden_error("Access not granted")
dir_path = dir_path + uuid + "/"
dir_path = urllib.parse.quote(dir_path, safe='')
#https://app.globus.org/file-manager?origin_id=28bbb03c-a87d-4dd7-a661-7ea2fb6ea631&origin_path=%2FIEC%20Testing%20Group%2F03584b3d0f8b46de1b629f04be156879%2F
url = hm_file_helper.ensureTrailingSlashURL(app.config['GLOBUS_APP_BASE_URL']) + "file-manager?origin_id=" + globus_server_uuid + "&origin_path=" + dir_path
return Response(url, 200)
"""
Retrive the latest (newest) revision of a Dataset
Public/Consortium access rules apply - if no token/consortium access then
must be for a public dataset and the returned Dataset must be the latest public version.
Parameters
----------
id : str
The HuBMAP ID (e.g. HBM123.ABCD.456) or UUID of target entity
Returns
-------
json
The detail of the latest revision dataset if exists
Otherwise an empty JSON object {}
"""
@app.route('/datasets/<id>/latest-revision', methods = ['GET'])
def get_dataset_latest_revision(id):
# Token is not required, but if an invalid token provided,
# we need to tell the client with a 401 error
validate_token_if_auth_header_exists(request)
# Use the internal token to query the target entity
# since public entities don't require user token
token = get_internal_token()
# Query target entity against uuid-api and neo4j and return as a dict if exists
entity_dict = query_target_entity(id, token)
normalized_entity_type = entity_dict['entity_type']
uuid = entity_dict['uuid']
# Only for Dataset
if normalized_entity_type != 'Dataset':
bad_request_error("The entity of given id is not a Dataset")
latest_revision_dict = {}
# Only published/public datasets don't require token
if entity_dict['status'].lower() != DATASET_STATUS_PUBLISHED:
# Token is required and the user must belong to HuBMAP-READ group
token = get_user_token(request, non_public_access_required = True)
latest_revision_dict = app_neo4j_queries.get_dataset_latest_revision(neo4j_driver_instance, uuid)
else:
# Default to the latest "public" revision dataset
# when no token or not a valid HuBMAP-Read token
latest_revision_dict = app_neo4j_queries.get_dataset_latest_revision(neo4j_driver_instance, uuid, public = True)
# Send back the real latest revision dataset if a valid HuBMAP-Read token presents
if user_in_hubmap_read_group(request):
latest_revision_dict = app_neo4j_queries.get_dataset_latest_revision(neo4j_driver_instance, uuid)
# We'll need to return all the properties including those
# generated by `on_read_trigger` to have a complete result
# E.g., the 'previous_revision_uuid'
# Here we skip the 'next_revision_uuid' property becase when the "public" latest revision dataset
# is not the real latest revision, we don't want the users to see it
properties_to_skip = [
'next_revision_uuid'
]
# On entity retrieval, the 'on_read_trigger' doesn't really need a token
complete_dict = schema_manager.get_complete_entity_result(token, latest_revision_dict, properties_to_skip)
# Also normalize the result based on schema
final_result = schema_manager.normalize_entity_result_for_response(complete_dict)
# Response with the dict
return jsonify(final_result)
"""
Retrive the calculated revision number of a Dataset
The calculated revision is number is based on the [:REVISION_OF] relationships
to the oldest dataset in a revision chain.
Where the oldest dataset = 1 and each newer version is incremented by one (1, 2, 3 ...)
Public/Consortium access rules apply, if is for a non-public dataset
and no token or a token without membership in HuBMAP-Read group is sent with the request
then a 403 response should be returned.
Parameters
----------
id : str
The HuBMAP ID (e.g. HBM123.ABCD.456) or UUID of target entity
Returns
-------
int
The calculated revision number
"""
@app.route('/datasets/<id>/revision', methods = ['GET'])
def get_dataset_revision_number(id):
# Token is not required, but if an invalid token provided,
# we need to tell the client with a 401 error
validate_token_if_auth_header_exists(request)
# Use the internal token to query the target entity
# since public entities don't require user token
token = get_internal_token()
# Query target entity against uuid-api and neo4j and return as a dict if exists
entity_dict = query_target_entity(id, token)
normalized_entity_type = entity_dict['entity_type']
# Only for Dataset
if normalized_entity_type != 'Dataset':
bad_request_error("The entity of given id is not a Dataset")
# Only published/public datasets don't require token
if entity_dict['status'].lower() != DATASET_STATUS_PUBLISHED:
# Token is required and the user must belong to HuBMAP-READ group
token = get_user_token(request, non_public_access_required = True)
# By now, either the entity is public accessible or
# the user token has the correct access level
revision_number = app_neo4j_queries.get_dataset_revision_number(neo4j_driver_instance, entity_dict['uuid'])
# Response with the integer
return jsonify(revision_number)
"""
Retract a published dataset with a retraction reason and sub status
Takes as input a json body with required fields "retracted_reason" and "sub_status".
Authorization handled by gateway. Only token of HuBMAP-Data-Admin group can use this call.
Technically, the same can be achieved by making a PUT call to the generic entity update endpoint
with using a HuBMAP-Data-Admin group token. But doing this is strongly discouraged because we'll
need to add more validators to ensure when "retracted_reason" is provided, there must be a
"sub_status" filed and vise versa. So consider this call a special use case of entity update.
Parameters
----------
id : str
The HuBMAP ID (e.g. HBM123.ABCD.456) or UUID of target dataset
Returns
-------
dict
The updated dataset details
"""
@app.route('/datasets/<id>/retract', methods=['PUT'])
def retract_dataset(id):
# Always expect a json body
require_json(request)
# Parse incoming json string into json data(python dict object)
json_data_dict = request.get_json()
# Use beblow application-level validations to avoid complicating schema validators
# The 'retraction_reason' and `sub_status` are the only required/allowed fields. No other fields allowed.
# Must enforce this rule otherwise we'll need to run after update triggers if any other fields
# get passed in (which should be done using the generic entity update call)
if 'retraction_reason' not in json_data_dict:
bad_request_error("Missing required field: retraction_reason")
if 'sub_status' not in json_data_dict:
bad_request_error("Missing required field: sub_status")
if len(json_data_dict) > 2:
bad_request_error("Only retraction_reason and sub_status are allowed fields")
# Must be a HuBMAP-Data-Admin group token
token = get_user_token(request)
# Retrieves the neo4j data for a given entity based on the id supplied.
# The normalized entity-type from this entity is checked to be a dataset
# If the entity is not a dataset and the dataset is not published, cannot retract
entity_dict = query_target_entity(id, token)
normalized_entity_type = entity_dict['entity_type']
# A bit more application-level validation
if normalized_entity_type != 'Dataset':
bad_request_error("The entity of given id is not a Dataset")
# Validate request json against the yaml schema
# The given value of `sub_status` is being validated at this step
try:
schema_manager.validate_json_data_against_schema(json_data_dict, normalized_entity_type, existing_entity_dict = entity_dict)
except schema_errors.SchemaValidationException as e:
# No need to log the validation errors
bad_request_error(str(e))
# Execute property level validators defined in schema yaml before entity property update
try:
schema_manager.execute_property_level_validators('before_property_update_validators', normalized_entity_type, request, entity_dict, json_data_dict)
except (schema_errors.MissingApplicationHeaderException,
schema_errors.InvalidApplicationHeaderException,
KeyError,
ValueError) as e:
bad_request_error(e)
# No need to call after_update() afterwards because retraction doesn't call any after_update_trigger methods
merged_updated_dict = update_entity_details(request, normalized_entity_type, token, json_data_dict, entity_dict)
complete_dict = schema_manager.get_complete_entity_result(token, merged_updated_dict)
# Will also filter the result based on schema
normalized_complete_dict = schema_manager.normalize_entity_result_for_response(complete_dict)
# Also reindex the updated entity node in elasticsearch via search-api
reindex_entity(entity_dict['uuid'], token)
return jsonify(normalized_complete_dict)
"""
Retrieve a list of all revisions of a dataset from the id of any dataset in the chain.
E.g: If there are 5 revisions, and the id for revision 4 is given, a list of revisions
1-5 will be returned in reverse order (newest first). Non-public access is only required to
retrieve information on non-published datasets. Output will be a list of dictionaries. Each dictionary
contains the dataset revision number and its uuid. Optionally, the full dataset can be included for each.
By default, only the revision number and uuid is included. To include the full dataset, the query
parameter "include_dataset" can be given with the value of "true". If this parameter is not included or
is set to false, the dataset will not be included. For example, to include the full datasets for each revision,
use '/datasets/<id>/revisions?include_dataset=true'. To omit the datasets, either set include_dataset=false, or
simply do not include this parameter.
Parameters
----------
id : str
The HuBMAP ID (e.g. HBM123.ABCD.456) or UUID of target dataset
Returns
-------
list
The list of revision datasets
"""
@app.route('/datasets/<id>/revisions', methods=['GET'])
def get_revisions_list(id):
# By default, do not return dataset. Only return dataset if return_dataset is true
show_dataset = False
if bool(request.args):
include_dataset = request.args.get('include_dataset')
if (include_dataset is not None) and (include_dataset.lower() == 'true'):
show_dataset = True
# Token is not required, but if an invalid token provided,
# we need to tell the client with a 401 error
validate_token_if_auth_header_exists(request)
# Use the internal token to query the target entity
# since public entities don't require user token
token = get_internal_token()
# Query target entity against uuid-api and neo4j and return as a dict if exists
entity_dict = query_target_entity(id, token)
normalized_entity_type = entity_dict['entity_type']
# Only for Dataset
if normalized_entity_type != 'Dataset':
bad_request_error("The entity of given id is not a Dataset")
# Only published/public datasets don't require token
if entity_dict['status'].lower() != DATASET_STATUS_PUBLISHED:
# Token is required and the user must belong to HuBMAP-READ group
token = get_user_token(request, non_public_access_required=True)
# By now, either the entity is public accessible or
# the user token has the correct access level
# Get the all the sorted (DESC based on creation timestamp) revisions
sorted_revisions_list = app_neo4j_queries.get_sorted_revisions(neo4j_driver_instance, entity_dict['uuid'])
# Skip some of the properties that are time-consuming to generate via triggers
# direct_ancestors, collections, and upload for Dataset
properties_to_skip = [
'direct_ancestors',
'collections',
'upload'
]
complete_revisions_list = schema_manager.get_complete_entities_list(token, sorted_revisions_list, properties_to_skip)
normalized_revisions_list = schema_manager.normalize_entities_list_for_response(complete_revisions_list)
# Only check the very last revision (the first revision dict since normalized_revisions_list is already sorted DESC)
# to determine if send it back or not
if not user_in_hubmap_read_group(request):
latest_revision = normalized_revisions_list[0]
if latest_revision['status'].lower() != DATASET_STATUS_PUBLISHED:
normalized_revisions_list.pop(0)
# Also hide the 'next_revision_uuid' of the second last revision from response
if 'next_revision_uuid' in normalized_revisions_list[0]:
normalized_revisions_list[0].pop('next_revision_uuid')
# Now all we need to do is to compose the result list
results = []
revision_number = len(normalized_revisions_list)
for revision in normalized_revisions_list:
result = {
'revision_number': revision_number,
'dataset_uuid': revision['uuid']
}
if show_dataset:
result['dataset'] = revision
results.append(result)
revision_number -= 1
return jsonify(results)
"""
Get all organs associated with a given dataset
The gateway treats this endpoint as public accessible
Parameters
----------
id : str
The HuBMAP ID (e.g. HBM123.ABCD.456) or UUID of given entity
Returns
-------
json
a list of all the organs associated with the target dataset
"""
@app.route('/datasets/<id>/organs', methods=['GET'])
def get_associated_organs_from_dataset(id):
# Token is not required, but if an invalid token provided,
# we need to tell the client with a 401 error
validate_token_if_auth_header_exists(request)
# Use the internal token to query the target entity
# since public entities don't require user token
token = get_internal_token()
# Query target entity against uuid-api and neo4j and return as a dict if exists
entity_dict = query_target_entity(id, token)
normalized_entity_type = entity_dict['entity_type']
# Only for Dataset
if normalized_entity_type != 'Dataset':
bad_request_error("The entity of given id is not a Dataset")
# published/public datasets don't require token
if entity_dict['status'].lower() != DATASET_STATUS_PUBLISHED:
# Token is required and the user must belong to HuBMAP-READ group
token = get_user_token(request, non_public_access_required=True)
# By now, either the entity is public accessible or
# the user token has the correct access level
associated_organs = app_neo4j_queries.get_associated_organs_from_dataset(neo4j_driver_instance, entity_dict['uuid'])
# If there are zero items in the list associated organs, then there are no associated
# Organs and a 404 will be returned.
if len(associated_organs) < 1:
not_found_error("the dataset does not have any associated organs")
complete_entities_list = schema_manager.get_complete_entities_list(token, associated_organs)
# Final result after normalization
final_result = schema_manager.normalize_entities_list_for_response(complete_entities_list)
return jsonify(final_result)
"""
Get the complete provenance info for all datasets
Authorization handled by gateway. HuBMAP-Read group is required for this call.
Returns
-------
json
an array of each datatset's provenance info
tsv
a text file of tab separated values where each row is a dataset and the columns include all its prov info
"""
@app.route('/datasets/prov-info', methods=['GET'])
def get_prov_info():
# String constants from app.cfg
GLOBUS_GROUPS_URL = 'https://raw.githubusercontent.com/hubmapconsortium/commons/test-release/hubmap_commons/hubmap-globus-groups.json'
HEADER_DATASET_UUID = 'dataset_uuid'
HEADER_DATASET_HUBMAP_ID = 'dataset_hubmap_id'
HEADER_DATASET_STATUS = 'dataset_status'
HEADER_DATASET_GROUP_NAME = 'dataset_group_name'
HEADER_DATASET_GROUP_UUID = 'dataset_group_uuid'
HEADER_DATASET_DATE_TIME_CREATED = 'dataset_date_time_created'
HEADER_DATASET_CREATED_BY_EMAIL = 'dataset_created_by_email'
HEADER_DATASET_DATE_TIME_MODIFIED = 'dataset_date_time_modified'
HEADER_DATASET_MODIFIED_BY_EMAIL = 'dataset_modified_by_email'
HEADER_DATASET_LAB_ID = 'dataset_lab_id'
HEADER_DATASET_DATA_TYPES = 'dataset_data_types'
HEADER_DATASET_PORTAL_URL = 'dataset_portal_url'
HEADER_FIRST_SAMPLE_HUBMAP_ID = 'first_sample_hubmap_id'
HEADER_FIRST_SAMPLE_SUBMISSION_ID = 'first_sample_submission_id'
HEADER_FIRST_SAMPLE_UUID = 'first_sample_uuid'
HEADER_FIRST_SAMPLE_TYPE = 'first_sample_type'
HEADER_FIRST_SAMPLE_PORTAL_URL = 'first_sample_portal_url'
HEADER_ORGAN_HUBMAP_ID = 'organ_hubmap_id'
HEADER_ORGAN_SUBMISSION_ID = 'organ_submission_id'
HEADER_ORGAN_UUID = 'organ_uuid'
HEADER_ORGAN_TYPE = 'organ_type'
HEADER_DONOR_HUBMAP_ID = 'donor_hubmap_id'
HEADER_DONOR_SUBMISSION_ID = 'donor_submission_id'
HEADER_DONOR_UUID = 'donor_uuid'
HEADER_DONOR_GROUP_NAME = 'donor_group_name'
HEADER_RUI_LOCATION_HUBMAP_ID = 'rui_location_hubmap_id'
HEADER_RUI_LOCATION_SUBMISSION_ID = 'rui_location_submission_id'
HEADER_RUI_LOCATION_UUID = 'rui_location_uuid'
HEADER_SAMPLE_METADATA_HUBMAP_ID = 'sample_metadata_hubmap_id'
HEADER_SAMPLE_METADATA_SUBMISSION_ID = 'sample_metadata_submission_id'
HEADER_SAMPLE_METADATA_UUID = 'sample_metadata_uuid'
headers = [
HEADER_DATASET_UUID, HEADER_DATASET_HUBMAP_ID, HEADER_DATASET_STATUS, HEADER_DATASET_GROUP_NAME,
HEADER_DATASET_GROUP_UUID, HEADER_DATASET_DATE_TIME_CREATED, HEADER_DATASET_CREATED_BY_EMAIL,
HEADER_DATASET_DATE_TIME_MODIFIED, HEADER_DATASET_MODIFIED_BY_EMAIL, HEADER_DATASET_LAB_ID,
HEADER_DATASET_DATA_TYPES, HEADER_DATASET_PORTAL_URL, HEADER_FIRST_SAMPLE_HUBMAP_ID,
HEADER_FIRST_SAMPLE_SUBMISSION_ID, HEADER_FIRST_SAMPLE_UUID, HEADER_FIRST_SAMPLE_TYPE,
HEADER_FIRST_SAMPLE_PORTAL_URL, HEADER_ORGAN_HUBMAP_ID, HEADER_ORGAN_SUBMISSION_ID, HEADER_ORGAN_UUID,
HEADER_ORGAN_TYPE, HEADER_DONOR_HUBMAP_ID, HEADER_DONOR_SUBMISSION_ID, HEADER_DONOR_UUID,
HEADER_DONOR_GROUP_NAME, HEADER_RUI_LOCATION_HUBMAP_ID, HEADER_RUI_LOCATION_SUBMISSION_ID,
HEADER_RUI_LOCATION_UUID, HEADER_SAMPLE_METADATA_HUBMAP_ID, HEADER_SAMPLE_METADATA_SUBMISSION_ID,
HEADER_SAMPLE_METADATA_UUID
]
# Processing and validating query parameters
accepted_arguments = ['format', 'organ', 'has_rui_info', 'dataset_status', 'group_uuid']
return_json = False
param_dict = {}
if bool(request.args):
for argument in request.args:
if argument not in accepted_arguments:
bad_request_error(f"{argument} is an unrecognized argument.")
return_format = request.args.get('format')
if return_format is not None:
if return_format.lower() not in ['json', 'tsv']:
bad_request_error(
"Invalid Format. Accepted formats are json and tsv. If no format is given, TSV will be the default")
if return_format.lower() == 'json':
return_json = True
group_uuid = request.args.get('group_uuid')
if group_uuid is not None:
groups_by_id_dict = globus_groups.get_globus_groups_info()['by_id']
if group_uuid not in groups_by_id_dict:
bad_request_error(
f"Invalid Group UUID. Group must be located at {GLOBUS_GROUPS_URL}")
if not groups_by_id_dict[group_uuid]['data_provider']:
bad_request_error(f"Invalid Group UUID. Group must be a data provider")
param_dict['group_uuid'] = group_uuid
organ = request.args.get('organ')
if organ is not None:
validate_organ_code(organ)
param_dict['organ'] = organ
has_rui_info = request.args.get('has_rui_info')
if has_rui_info is not None:
if has_rui_info.lower() not in ['true', 'false']:
bad_request_error("Invalid value for 'has_rui_info'. Only values of true or false are acceptable")
param_dict['has_rui_info'] = has_rui_info
dataset_status = request.args.get('dataset_status')
if dataset_status is not None:
if dataset_status.lower() not in ['new', 'qa', 'published']:
bad_request_error("Invalid Dataset Status. Must be 'new', 'qa', or 'published' Case-Insensitive")
param_dict['dataset_status'] = dataset_status
# Instantiation of the list dataset_prov_list
dataset_prov_list = []
# Call to app_neo4j_queries to prepare and execute the database query
prov_info = app_neo4j_queries.get_prov_info(neo4j_driver_instance, param_dict)
# Each dataset's provinence info is placed into a dictionary
for dataset in prov_info:
internal_dict = collections.OrderedDict()
internal_dict[HEADER_DATASET_UUID] = dataset['uuid']
internal_dict[HEADER_DATASET_HUBMAP_ID] = dataset['hubmap_id']
internal_dict[HEADER_DATASET_STATUS] = dataset['status']
internal_dict[HEADER_DATASET_GROUP_NAME] = dataset['group_name']
internal_dict[HEADER_DATASET_GROUP_UUID] = dataset['group_uuid']
internal_dict[HEADER_DATASET_DATE_TIME_CREATED] = datetime.fromtimestamp(int(dataset['created_timestamp']/1000.0))
internal_dict[HEADER_DATASET_CREATED_BY_EMAIL] = dataset['created_by_user_email']
internal_dict[HEADER_DATASET_DATE_TIME_MODIFIED] = datetime.fromtimestamp(int(dataset['last_modified_timestamp']/1000.0))
internal_dict[HEADER_DATASET_MODIFIED_BY_EMAIL] = dataset['last_modified_user_email']
internal_dict[HEADER_DATASET_DATA_TYPES] = dataset['data_types']
# If return_format was not equal to json, json arrays must be converted into comma separated lists for the tsv
if return_json is False:
internal_dict[HEADER_DATASET_DATA_TYPES] = ",".join(dataset['data_types'])
internal_dict[HEADER_DATASET_PORTAL_URL] = app.config['DOI_REDIRECT_URL'].replace('<entity_type>', 'dataset').replace('<identifier>', dataset['uuid'])
# first_sample properties are retrieved from its own dictionary
if dataset['first_sample'] is not None:
first_sample_hubmap_id_list = []
first_sample_submission_id_list = []
first_sample_uuid_list = []
first_sample_type_list = []
first_sample_portal_url_list = []
for item in dataset['first_sample']:
first_sample_hubmap_id_list.append(item['hubmap_id'])
first_sample_submission_id_list.append(item['submission_id'])
first_sample_uuid_list.append(item['uuid'])
first_sample_type_list.append(item['specimen_type'])
first_sample_portal_url_list.append(app.config['DOI_REDIRECT_URL'].replace('<entity_type>', 'sample').replace('<identifier>', item['uuid']))
internal_dict[HEADER_FIRST_SAMPLE_HUBMAP_ID] = first_sample_hubmap_id_list
internal_dict[HEADER_FIRST_SAMPLE_SUBMISSION_ID] = first_sample_submission_id_list
internal_dict[HEADER_FIRST_SAMPLE_UUID] = first_sample_uuid_list
internal_dict[HEADER_FIRST_SAMPLE_TYPE] = first_sample_type_list
internal_dict[HEADER_FIRST_SAMPLE_PORTAL_URL] = first_sample_portal_url_list
if return_json is False:
internal_dict[HEADER_FIRST_SAMPLE_HUBMAP_ID] = ",".join(first_sample_hubmap_id_list)
internal_dict[HEADER_FIRST_SAMPLE_SUBMISSION_ID] = ",".join(first_sample_submission_id_list)
internal_dict[HEADER_FIRST_SAMPLE_UUID] = ",".join(first_sample_uuid_list)
internal_dict[HEADER_FIRST_SAMPLE_TYPE] = ",".join(first_sample_type_list)
internal_dict[HEADER_FIRST_SAMPLE_PORTAL_URL] = ",".join(first_sample_portal_url_list)
# distinct_organ properties are retrieved from its own dictionary
if dataset['distinct_organ'] is not None:
distinct_organ_hubmap_id_list = []
distinct_organ_submission_id_list = []
distinct_organ_uuid_list = []
distinct_organ_type_list = []
for item in dataset['distinct_organ']:
distinct_organ_hubmap_id_list.append(item['hubmap_id'])
distinct_organ_submission_id_list.append(item['submission_id'])
distinct_organ_uuid_list.append(item['uuid'])
distinct_organ_type_list.append(item['organ'])
internal_dict[HEADER_ORGAN_HUBMAP_ID] = distinct_organ_hubmap_id_list
internal_dict[HEADER_ORGAN_SUBMISSION_ID] = distinct_organ_submission_id_list
internal_dict[HEADER_ORGAN_UUID] = distinct_organ_uuid_list
internal_dict[HEADER_ORGAN_TYPE] = distinct_organ_type_list
if return_json is False:
internal_dict[HEADER_ORGAN_HUBMAP_ID] = ",".join(distinct_organ_hubmap_id_list)
internal_dict[HEADER_ORGAN_SUBMISSION_ID] = ",".join(distinct_organ_submission_id_list)
internal_dict[HEADER_ORGAN_UUID] = ",".join(distinct_organ_uuid_list)
internal_dict[HEADER_ORGAN_TYPE] = ",".join(distinct_organ_type_list)
# distinct_donor properties are retrieved from its own dictionary
if dataset['distinct_donor'] is not None:
distinct_donor_hubmap_id_list = []
distinct_donor_submission_id_list = []
distinct_donor_uuid_list = []
distinct_donor_group_name_list = []
for item in dataset['distinct_donor']:
distinct_donor_hubmap_id_list.append(item['hubmap_id'])
distinct_donor_submission_id_list.append(item['submission_id'])
distinct_donor_uuid_list.append(item['uuid'])
distinct_donor_group_name_list.append(item['group_name'])
internal_dict[HEADER_DONOR_HUBMAP_ID] = distinct_donor_hubmap_id_list
internal_dict[HEADER_DONOR_SUBMISSION_ID] = distinct_donor_submission_id_list
internal_dict[HEADER_DONOR_UUID] = distinct_donor_uuid_list
internal_dict[HEADER_DONOR_GROUP_NAME] = distinct_donor_group_name_list
if return_json is False:
internal_dict[HEADER_DONOR_HUBMAP_ID] = ",".join(distinct_donor_hubmap_id_list)
internal_dict[HEADER_DONOR_SUBMISSION_ID] = ",".join(distinct_donor_submission_id_list)
internal_dict[HEADER_DONOR_UUID] = ",".join(distinct_donor_uuid_list)
internal_dict[HEADER_DONOR_GROUP_NAME] = ",".join(distinct_donor_group_name_list)
# distinct_rui_sample properties are retrieved from its own dictionary
if dataset['distinct_rui_sample'] is not None:
rui_location_hubmap_id_list = []
rui_location_submission_id_list = []
rui_location_uuid_list = []
for item in dataset['distinct_rui_sample']:
rui_location_hubmap_id_list.append(item['hubmap_id'])
rui_location_submission_id_list.append(item['submission_id'])
rui_location_uuid_list.append(item['uuid'])
internal_dict[HEADER_RUI_LOCATION_HUBMAP_ID] = rui_location_hubmap_id_list
internal_dict[HEADER_RUI_LOCATION_SUBMISSION_ID] = rui_location_submission_id_list
internal_dict[HEADER_RUI_LOCATION_UUID] = rui_location_uuid_list
if return_json is False:
internal_dict[HEADER_RUI_LOCATION_HUBMAP_ID] = ",".join(rui_location_hubmap_id_list)
internal_dict[HEADER_RUI_LOCATION_SUBMISSION_ID] = ",".join(rui_location_submission_id_list)
internal_dict[HEADER_RUI_LOCATION_UUID] = ",".join(rui_location_uuid_list)
# distinct_metasample properties are retrieved from its own dictionary
if dataset['distinct_metasample'] is not None:
metasample_hubmap_id_list = []
metasample_submission_id_list = []
metasample_uuid_list = []
for item in dataset['distinct_metasample']:
metasample_hubmap_id_list.append(item['hubmap_id'])
metasample_submission_id_list.append(item['submission_id'])
metasample_uuid_list.append(item['uuid'])
internal_dict[HEADER_SAMPLE_METADATA_HUBMAP_ID] = metasample_hubmap_id_list
internal_dict[HEADER_SAMPLE_METADATA_SUBMISSION_ID] = metasample_submission_id_list
internal_dict[HEADER_SAMPLE_METADATA_UUID] = metasample_uuid_list
if return_json is False:
internal_dict[HEADER_SAMPLE_METADATA_HUBMAP_ID] = ",".join(metasample_hubmap_id_list)
internal_dict[HEADER_SAMPLE_METADATA_SUBMISSION_ID] = ",".join(metasample_submission_id_list)
internal_dict[HEADER_SAMPLE_METADATA_UUID] = ",".join(metasample_uuid_list)
# Each dataset's dictionary is added to the list to be returned
dataset_prov_list.append(internal_dict)
# if return_json is true, this dictionary is ready to be returned already
if return_json:
return jsonify(dataset_prov_list)
# if return_json is false, the data must be converted to be returned as a tsv
else:
new_tsv_file = StringIO()
writer = csv.DictWriter(new_tsv_file, fieldnames=headers, delimiter='\t')
writer.writeheader()
writer.writerows(dataset_prov_list)
new_tsv_file.seek(0)
output = Response(new_tsv_file, mimetype='text/tsv')
output.headers['Content-Disposition'] = 'attachment; filename=prov-info.tsv'
return output
@app.route('/datasets/<id>/prov-info', methods=['GET'])
def get_prov_info_for_dataset(id):
# Token is not required, but if an invalid token provided,
# we need to tell the client with a 401 error
validate_token_if_auth_header_exists(request)
# Use the internal token to query the target entity
# since public entities don't require user token
token = get_internal_token()
# Query target entity against uuid-api and neo4j and return as a dict if exists
entity_dict = query_target_entity(id, token)
normalized_entity_type = entity_dict['entity_type']
# Only for Dataset
if normalized_entity_type != 'Dataset':
bad_request_error("The entity of given id is not a Dataset")
# published/public datasets don't require token
if entity_dict['status'].lower() != DATASET_STATUS_PUBLISHED:
# Token is required and the user must belong to HuBMAP-READ group
token = get_user_token(request, non_public_access_required=True)
return_json = False
dataset_prov_list = []
if bool(request.args):
return_format = request.args.get('format')
if (return_format is not None) and (return_format.lower() == 'json'):
return_json = True
HEADER_DATASET_UUID = 'dataset_uuid'
HEADER_DATASET_HUBMAP_ID = 'dataset_hubmap_id'
HEADER_DATASET_STATUS = 'dataset_status'
HEADER_DATASET_GROUP_NAME = 'dataset_group_name'
HEADER_DATASET_GROUP_UUID = 'dataset_group_uuid'
HEADER_DATASET_DATE_TIME_CREATED = 'dataset_date_time_created'
HEADER_DATASET_CREATED_BY_EMAIL = 'dataset_created_by_email'
HEADER_DATASET_DATE_TIME_MODIFIED = 'dataset_date_time_modified'
HEADER_DATASET_MODIFIED_BY_EMAIL = 'dataset_modified_by_email'
HEADER_DATASET_LAB_ID = 'dataset_lab_id'
HEADER_DATASET_DATA_TYPES = 'dataset_data_types'
HEADER_DATASET_PORTAL_URL = 'dataset_portal_url'
HEADER_FIRST_SAMPLE_HUBMAP_ID = 'first_sample_hubmap_id'
HEADER_FIRST_SAMPLE_SUBMISSION_ID = 'first_sample_submission_id'
HEADER_FIRST_SAMPLE_UUID = 'first_sample_uuid'
HEADER_FIRST_SAMPLE_TYPE = 'first_sample_type'
HEADER_FIRST_SAMPLE_PORTAL_URL = 'first_sample_portal_url'
HEADER_ORGAN_HUBMAP_ID = 'organ_hubmap_id'
HEADER_ORGAN_SUBMISSION_ID = 'organ_submission_id'
HEADER_ORGAN_UUID = 'organ_uuid'
HEADER_ORGAN_TYPE = 'organ_type'
HEADER_DONOR_HUBMAP_ID = 'donor_hubmap_id'
HEADER_DONOR_SUBMISSION_ID = 'donor_submission_id'
HEADER_DONOR_UUID = 'donor_uuid'
HEADER_DONOR_GROUP_NAME = 'donor_group_name'
HEADER_RUI_LOCATION_HUBMAP_ID = 'rui_location_hubmap_id'
HEADER_RUI_LOCATION_SUBMISSION_ID = 'rui_location_submission_id'
HEADER_RUI_LOCATION_UUID = 'rui_location_uuid'
HEADER_SAMPLE_METADATA_HUBMAP_ID = 'sample_metadata_hubmap_id'
HEADER_SAMPLE_METADATA_SUBMISSION_ID = 'sample_metadata_submission_id'
HEADER_SAMPLE_METADATA_UUID = 'sample_metadata_uuid'
headers = [
HEADER_DATASET_UUID, HEADER_DATASET_HUBMAP_ID, HEADER_DATASET_STATUS, HEADER_DATASET_GROUP_NAME,
HEADER_DATASET_GROUP_UUID, HEADER_DATASET_DATE_TIME_CREATED, HEADER_DATASET_CREATED_BY_EMAIL,
HEADER_DATASET_DATE_TIME_MODIFIED, HEADER_DATASET_MODIFIED_BY_EMAIL, HEADER_DATASET_LAB_ID,
HEADER_DATASET_DATA_TYPES, HEADER_DATASET_PORTAL_URL, HEADER_FIRST_SAMPLE_HUBMAP_ID,
HEADER_FIRST_SAMPLE_SUBMISSION_ID, HEADER_FIRST_SAMPLE_UUID, HEADER_FIRST_SAMPLE_TYPE,
HEADER_FIRST_SAMPLE_PORTAL_URL, HEADER_ORGAN_HUBMAP_ID, HEADER_ORGAN_SUBMISSION_ID, HEADER_ORGAN_UUID,
HEADER_ORGAN_TYPE, HEADER_DONOR_HUBMAP_ID, HEADER_DONOR_SUBMISSION_ID, HEADER_DONOR_UUID,
HEADER_DONOR_GROUP_NAME, HEADER_RUI_LOCATION_HUBMAP_ID, HEADER_RUI_LOCATION_SUBMISSION_ID,
HEADER_RUI_LOCATION_UUID, HEADER_SAMPLE_METADATA_HUBMAP_ID, HEADER_SAMPLE_METADATA_SUBMISSION_ID,
HEADER_SAMPLE_METADATA_UUID
]
hubmap_ids = schema_manager.get_hubmap_ids(id, token)
# Get the target uuid if all good
uuid = hubmap_ids['hm_uuid']
dataset = app_neo4j_queries.get_individual_prov_info(neo4j_driver_instance, uuid)
if dataset is None:
bad_request_error("Query For this Dataset Returned no Records. Make sure this is a Primary Dataset")
internal_dict = collections.OrderedDict()
internal_dict[HEADER_DATASET_HUBMAP_ID] = dataset['hubmap_id']
internal_dict[HEADER_DATASET_UUID] = dataset['uuid']
internal_dict[HEADER_DATASET_STATUS] = dataset['status']
internal_dict[HEADER_DATASET_GROUP_NAME] = dataset['group_name']
internal_dict[HEADER_DATASET_GROUP_UUID] = dataset['group_uuid']
internal_dict[HEADER_DATASET_DATE_TIME_CREATED] = datetime.fromtimestamp(int(dataset['created_timestamp'] / 1000.0))
internal_dict[HEADER_DATASET_CREATED_BY_EMAIL] = dataset['created_by_user_email']
internal_dict[HEADER_DATASET_DATE_TIME_MODIFIED] = datetime.fromtimestamp(
int(dataset['last_modified_timestamp'] / 1000.0))
internal_dict[HEADER_DATASET_MODIFIED_BY_EMAIL] = dataset['last_modified_user_email']
internal_dict[HEADER_DATASET_DATA_TYPES] = dataset['data_types']
if return_json is False:
internal_dict[HEADER_DATASET_DATA_TYPES] = ",".join(dataset['data_types'])
internal_dict[HEADER_DATASET_PORTAL_URL] = app.config['DOI_REDIRECT_URL'].replace('<entity_type>', 'dataset').replace(
'<identifier>', dataset['uuid'])
if dataset['first_sample'] is not None:
first_sample_hubmap_id_list = []
first_sample_submission_id_list = []
first_sample_uuid_list = []
first_sample_type_list = []
first_sample_portal_url_list = []
for item in dataset['first_sample']:
first_sample_hubmap_id_list.append(item['hubmap_id'])
first_sample_submission_id_list.append(item['submission_id'])
first_sample_uuid_list.append(item['uuid'])
first_sample_type_list.append(item['specimen_type'])
first_sample_portal_url_list.append(
app.config['DOI_REDIRECT_URL'].replace('<entity_type>', 'sample').replace('<identifier>', item['uuid']))
internal_dict[HEADER_FIRST_SAMPLE_HUBMAP_ID] = first_sample_hubmap_id_list
internal_dict[HEADER_FIRST_SAMPLE_SUBMISSION_ID] = first_sample_submission_id_list
internal_dict[HEADER_FIRST_SAMPLE_UUID] = first_sample_uuid_list
internal_dict[HEADER_FIRST_SAMPLE_TYPE] = first_sample_type_list
internal_dict[HEADER_FIRST_SAMPLE_PORTAL_URL] = first_sample_portal_url_list
if return_json is False:
internal_dict[HEADER_FIRST_SAMPLE_HUBMAP_ID] = ",".join(first_sample_hubmap_id_list)
internal_dict[HEADER_FIRST_SAMPLE_SUBMISSION_ID] = ",".join(first_sample_submission_id_list)
internal_dict[HEADER_FIRST_SAMPLE_UUID] = ",".join(first_sample_uuid_list)
internal_dict[HEADER_FIRST_SAMPLE_TYPE] = ",".join(first_sample_type_list)
internal_dict[HEADER_FIRST_SAMPLE_PORTAL_URL] = ",".join(first_sample_portal_url_list)
if dataset['distinct_organ'] is not None:
distinct_organ_hubmap_id_list = []
distinct_organ_submission_id_list = []
distinct_organ_uuid_list = []
distinct_organ_type_list = []
for item in dataset['distinct_organ']:
distinct_organ_hubmap_id_list.append(item['hubmap_id'])
distinct_organ_submission_id_list.append(item['submission_id'])
distinct_organ_uuid_list.append(item['uuid'])
distinct_organ_type_list.append(item['organ'])
internal_dict[HEADER_ORGAN_HUBMAP_ID] = distinct_organ_hubmap_id_list
internal_dict[HEADER_ORGAN_SUBMISSION_ID] = distinct_organ_submission_id_list
internal_dict[HEADER_ORGAN_UUID] = distinct_organ_uuid_list
internal_dict[HEADER_ORGAN_TYPE] = distinct_organ_type_list
if return_json is False:
internal_dict[HEADER_ORGAN_HUBMAP_ID] = ",".join(distinct_organ_hubmap_id_list)
internal_dict[HEADER_ORGAN_SUBMISSION_ID] = ",".join(distinct_organ_submission_id_list)
internal_dict[HEADER_ORGAN_UUID] = ",".join(distinct_organ_uuid_list)
internal_dict[HEADER_ORGAN_TYPE] = ",".join(distinct_organ_type_list)
if dataset['distinct_donor'] is not None:
distinct_donor_hubmap_id_list = []
distinct_donor_submission_id_list = []
distinct_donor_uuid_list = []
distinct_donor_group_name_list = []
for item in dataset['distinct_donor']:
distinct_donor_hubmap_id_list.append(item['hubmap_id'])
distinct_donor_submission_id_list.append(item['submission_id'])
distinct_donor_uuid_list.append(item['uuid'])
distinct_donor_group_name_list.append(item['group_name'])
internal_dict[HEADER_DONOR_HUBMAP_ID] = distinct_donor_hubmap_id_list
internal_dict[HEADER_DONOR_SUBMISSION_ID] = distinct_donor_submission_id_list
internal_dict[HEADER_DONOR_UUID] = distinct_donor_uuid_list
internal_dict[HEADER_DONOR_GROUP_NAME] = distinct_donor_group_name_list
if return_json is False:
internal_dict[HEADER_DONOR_HUBMAP_ID] = ",".join(distinct_donor_hubmap_id_list)
internal_dict[HEADER_DONOR_SUBMISSION_ID] = ",".join(distinct_donor_submission_id_list)
internal_dict[HEADER_DONOR_UUID] = ",".join(distinct_donor_uuid_list)
internal_dict[HEADER_DONOR_GROUP_NAME] = ",".join(distinct_donor_group_name_list)
if dataset['distinct_rui_sample'] is not None:
rui_location_hubmap_id_list = []
rui_location_submission_id_list = []
rui_location_uuid_list = []
for item in dataset['distinct_rui_sample']:
rui_location_hubmap_id_list.append(item['hubmap_id'])
rui_location_submission_id_list.append(item['submission_id'])
rui_location_uuid_list.append(item['uuid'])
internal_dict[HEADER_RUI_LOCATION_HUBMAP_ID] = rui_location_hubmap_id_list
internal_dict[HEADER_RUI_LOCATION_SUBMISSION_ID] = rui_location_submission_id_list
internal_dict[HEADER_RUI_LOCATION_UUID] = rui_location_uuid_list
if return_json is False:
internal_dict[HEADER_RUI_LOCATION_HUBMAP_ID] = ",".join(rui_location_hubmap_id_list)
internal_dict[HEADER_RUI_LOCATION_SUBMISSION_ID] = ",".join(rui_location_submission_id_list)
internal_dict[HEADER_RUI_LOCATION_UUID] = ",".join(rui_location_uuid_list)
if dataset['distinct_metasample'] is not None:
metasample_hubmap_id_list = []
metasample_submission_id_list = []
metasample_uuid_list = []
for item in dataset['distinct_metasample']:
metasample_hubmap_id_list.append(item['hubmap_id'])
metasample_submission_id_list.append(item['submission_id'])
metasample_uuid_list.append(item['uuid'])
internal_dict[HEADER_SAMPLE_METADATA_HUBMAP_ID] = metasample_hubmap_id_list
internal_dict[HEADER_SAMPLE_METADATA_SUBMISSION_ID] = metasample_submission_id_list
internal_dict[HEADER_SAMPLE_METADATA_UUID] = metasample_uuid_list
if return_json is False:
internal_dict[HEADER_SAMPLE_METADATA_HUBMAP_ID] = ",".join(metasample_hubmap_id_list)
internal_dict[HEADER_SAMPLE_METADATA_SUBMISSION_ID] = ",".join(metasample_submission_id_list)
internal_dict[HEADER_SAMPLE_METADATA_UUID] = ",".join(metasample_uuid_list)
dataset_prov_list.append(internal_dict)
if return_json:
return jsonify(dataset_prov_list[0])
else:
new_tsv_file = StringIO()
writer = csv.DictWriter(new_tsv_file, fieldnames=headers, delimiter='\t')
writer.writeheader()
writer.writerows(dataset_prov_list)
new_tsv_file.seek(0)
output = Response(new_tsv_file, mimetype='text/tsv')
output.headers['Content-Disposition'] = 'attachment; filename=prov-info.tsv'
return output
####################################################################################################
## Internal Functions
####################################################################################################
"""
Throws error for 400 Bad Reqeust with message
Parameters
----------
err_msg : str
The custom error message to return to end users
"""
def bad_request_error(err_msg):
abort(400, description = err_msg)
"""
Throws error for 401 Unauthorized with message
Parameters
----------
err_msg : str
The custom error message to return to end users
"""
def unauthorized_error(err_msg):
abort(401, description = err_msg)
"""
Throws error for 403 Forbidden with message
Parameters
----------
err_msg : str
The custom error message to return to end users
"""
def forbidden_error(err_msg):
abort(403, description = err_msg)
"""
Throws error for 404 Not Found with message
Parameters
----------
err_msg : str
The custom error message to return to end users
"""
def not_found_error(err_msg):
abort(404, description = err_msg)
"""
Throws error for 500 Internal Server Error with message
Parameters
----------
err_msg : str
The custom error message to return to end users
"""
def internal_server_error(err_msg):
abort(500, description = err_msg)
"""
Parase the token from Authorization header
Parameters
----------
request : falsk.request
The flask http request object
non_public_access_required : bool
If a non-public access token is required by the request, default to False
Returns
-------
str
The token string if valid
"""
def get_user_token(request, non_public_access_required = False):
# Get user token from Authorization header
# getAuthorizationTokens() also handles MAuthorization header but we are not using that here
try:
user_token = auth_helper_instance.getAuthorizationTokens(request.headers)
except Exception:
msg = "Failed to parse the Authorization token by calling commons.auth_helper.getAuthorizationTokens()"
# Log the full stack trace, prepend a line with our message
logger.exception(msg)
internal_server_error(msg)
# Further check the validity of the token if required non-public access
if non_public_access_required:
# When the token is a flask.Response instance,
# it MUST be a 401 error with message.
# That's how commons.auth_helper.getAuthorizationTokens() was designed
if isinstance(user_token, Response):
# We wrap the message in a json and send back to requester as 401 too
# The Response.data returns binary string, need to decode
unauthorized_error(user_token.get_data().decode())
# By now the token is already a valid token
# But we also need to ensure the user belongs to HuBMAP-Read group
# in order to access the non-public entity
# Return a 403 response if the user doesn't belong to HuBMAP-READ group
if not user_in_hubmap_read_group(request):
forbidden_error("Access not granted")
return user_token
"""
Check if the user with token is in the HuBMAP-READ group
Parameters
----------
request : falsk.request
The flask http request object that containing the Authorization header
with a valid Globus nexus token for checking group information
Returns
-------
bool
True if the user belongs to HuBMAP-READ group, otherwise False
"""
def user_in_hubmap_read_group(request):
try:
# The property 'hmgroupids' is ALWASYS in the output with using schema_manager.get_user_info()
# when the token in request is a nexus_token
user_info = schema_manager.get_user_info(request)
hubmap_read_group_uuid = auth_helper_instance.groupNameToId('HuBMAP-READ')['uuid']
except Exception as e:
# Log the full stack trace, prepend a line with our message
logger.exception(e)
# If the token is not a nexus token, no group information available
# The commons.hm_auth.AuthCache would return a Response with 500 error message
# We treat such cases as the user not in the HuBMAP-READ group
return False
return (hubmap_read_group_uuid in user_info['hmgroupids'])
"""
Validate the provided token when Authorization header presents
Parameters
----------
request : flask.request object
The Flask http request object
"""
def validate_token_if_auth_header_exists(request):
# No matter if token is required or not, when an invalid token provided,
# we need to tell the client with a 401 error
# HTTP header names are case-insensitive
# request.headers.get('Authorization') returns None if the header doesn't exist
if request.headers.get('Authorization') is not None:
user_token = get_user_token(request)
# When the Authoriztion header provided but the user_token is a flask.Response instance,
# it MUST be a 401 error with message.
# That's how commons.auth_helper.getAuthorizationTokens() was designed
if isinstance(user_token, Response):
# We wrap the message in a json and send back to requester as 401 too
# The Response.data returns binary string, need to decode
unauthorized_error(user_token.get_data().decode())
# Also check if the parased token is invalid or expired
# Set the second paremeter as False to skip group check
user_info = auth_helper_instance.getUserInfo(user_token, False)
if isinstance(user_info, Response):
unauthorized_error(user_info.get_data().decode())
"""
Get the token for internal use only
Returns
-------
str
The token string
"""
def get_internal_token():
return auth_helper_instance.getProcessSecret()
"""
Return the complete collection dict for a given raw collection dict
Parameters
----------
collection_dict : dict
The raw collection dict returned by Neo4j
Returns
-------
dict
A dictionary of complete collection detail with all the generated 'on_read_trigger' data
The generated Collection.datasts contains only public datasets
if user/token doesn't have the right access permission
"""
def get_complete_public_collection_dict(collection_dict):
# Use internal token to query entity since
# no user token is required to access a public collection
token = get_internal_token()
# Collection.datasets is transient property and generated by the trigger method
# We'll need to return all the properties including those
# generated by `on_read_trigger` to have a complete result
complete_dict = schema_manager.get_complete_entity_result(token, collection_dict)
# Loop through Collection.datasets and only return the published/public datasets
public_datasets = []
for dataset in complete_dict['datasets']:
if dataset['status'].lower() == DATASET_STATUS_PUBLISHED:
public_datasets.append(dataset)
# Modify the result and only show the public datasets in this collection
complete_dict['datasets'] = public_datasets
return complete_dict
"""
Generate 'before_create_triiger' data and create the entity details in Neo4j
Parameters
----------
request : flask.Request object
The incoming request
normalized_entity_type : str
One of the normalized entity types: Dataset, Collection, Sample, Donor
user_token: str
The user's globus nexus token
json_data_dict: dict
The json request dict from user input
Returns
-------
dict
A dict of all the newly created entity detials
"""
def create_entity_details(request, normalized_entity_type, user_token, json_data_dict):
# Get user info based on request
user_info_dict = schema_manager.get_user_info(request)
# Create new ids for the new entity
try:
new_ids_dict_list = schema_manager.create_hubmap_ids(normalized_entity_type, json_data_dict, user_token, user_info_dict)
new_ids_dict = new_ids_dict_list[0]
# When group_uuid is provided by user, it can be invalid
except schema_errors.NoDataProviderGroupException:
# Log the full stack trace, prepend a line with our message
if 'group_uuid' in json_data_dict:
msg = "Invalid 'group_uuid' value, can't create the entity"
else:
msg = "The user does not have the correct Globus group associated with, can't create the entity"
logger.exception(msg)
bad_request_error(msg)
except schema_errors.UnmatchedDataProviderGroupException:
msg = "The user does not belong to the given Globus group, can't create the entity"
logger.exception(msg)
forbidden_error(msg)
except schema_errors.MultipleDataProviderGroupException:
msg = "The user has mutiple Globus groups associated with, please specify one using 'group_uuid'"
logger.exception(msg)
bad_request_error(msg)
except KeyError as e:
logger.exception(e)
bad_request_error(e)
except requests.exceptions.RequestException as e:
msg = f"Failed to create new HuBMAP ids via the uuid-api service"
logger.exception(msg)
# Due to the use of response.raise_for_status() in schema_manager.create_hubmap_ids()
# we can access the status codes from the exception
status_code = e.response.status_code
if status_code == 400:
bad_request_error(e.response.text)
if status_code == 404:
not_found_error(e.response.text)
else:
internal_server_error(e.response.text)
# Merge all the above dictionaries and pass to the trigger methods
new_data_dict = {**json_data_dict, **user_info_dict, **new_ids_dict}
try:
# Use {} since no existing dict
generated_before_create_trigger_data_dict = schema_manager.generate_triggered_data('before_create_trigger', normalized_entity_type, user_token, {}, new_data_dict)
# If one of the before_create_trigger methods fails, we can't create the entity
except schema_errors.BeforeCreateTriggerException:
# Log the full stack trace, prepend a line with our message
msg = "Failed to execute one of the 'before_create_trigger' methods, can't create the entity"
logger.exception(msg)
internal_server_error(msg)
except schema_errors.NoDataProviderGroupException:
# Log the full stack trace, prepend a line with our message
if 'group_uuid' in json_data_dict:
msg = "Invalid 'group_uuid' value, can't create the entity"
else:
msg = "The user does not have the correct Globus group associated with, can't create the entity"
logger.exception(msg)
bad_request_error(msg)
except schema_errors.UnmatchedDataProviderGroupException:
# Log the full stack trace, prepend a line with our message
msg = "The user does not belong to the given Globus group, can't create the entity"
logger.exception(msg)
forbidden_error(msg)
except schema_errors.MultipleDataProviderGroupException:
# Log the full stack trace, prepend a line with our message
msg = "The user has mutiple Globus groups associated with, please specify one using 'group_uuid'"
logger.exception(msg)
bad_request_error(msg)
# If something wrong with file upload
except schema_errors.FileUploadException as e:
logger.exception(e)
internal_server_error(e)
except KeyError as e:
# Log the full stack trace, prepend a line with our message
logger.exception(e)
bad_request_error(e)
except Exception as e:
logger.exception(e)
internal_server_error(e)
# Merge the user json data and generated trigger data into one dictionary
merged_dict = {**json_data_dict, **generated_before_create_trigger_data_dict}
# Filter out the merged_dict by getting rid of the transitent properties (not to be stored)
# and properties with None value
# Meaning the returned target property key is different from the original key
# in the trigger method, e.g., Donor.image_files_to_add
filtered_merged_dict = schema_manager.remove_transient_and_none_values(merged_dict, normalized_entity_type)
# Create new entity
try:
# Important: `entity_dict` is the resulting neo4j dict, Python list and dicts are stored
# as string expression literals in it. That's why properties like entity_dict['direct_ancestor_uuids']
# will need to use ast.literal_eval() in the schema_triggers.py
entity_dict = app_neo4j_queries.create_entity(neo4j_driver_instance, normalized_entity_type, filtered_merged_dict)
except TransactionError:
msg = "Failed to create the new " + normalized_entity_type
# Log the full stack trace, prepend a line with our message
logger.exception(msg)
# Terminate and let the users know
internal_server_error(msg)
# Important: use `entity_dict` instead of `filtered_merged_dict` to keep consistent with the stored
# string expression literals of Python list/dict being used with entity update, e.g., `image_files`
# Important: the same property keys in entity_dict will overwrite the same key in json_data_dict
# and this is what we wanted. Adding json_data_dict back is to include those `transient` properties
# provided in the JSON input but not stored in neo4j, and will be needed for after_create_trigger/after_update_trigger,
# e.g., `previous_revision_uuid`, `direct_ancestor_uuids`
# Add user_info_dict because it may be used by after_update_trigger methods
merged_final_dict = {**json_data_dict, **entity_dict, **user_info_dict}
# Note: return merged_final_dict instead of entity_dict because
# it contains all the user json data that the generated that entity_dict may not have
return merged_final_dict
"""
Create multiple sample nodes and relationships with the source entity node
Parameters
----------
request : flask.Request object
The incoming request
normalized_entity_type : str
One of the normalized entity types: Dataset, Collection, Sample, Donor
user_token: str
The user's globus nexus token
json_data_dict: dict
The json request dict from user input
count : int
The number of samples to create
Returns
-------
list
A list of all the newly generated ids via uuid-api
"""
def create_multiple_samples_details(request, normalized_entity_type, user_token, json_data_dict, count):
# Get user info based on request
user_info_dict = schema_manager.get_user_info(request)
# Create new ids for the new entity
try:
new_ids_dict_list = schema_manager.create_hubmap_ids(normalized_entity_type, json_data_dict, user_token, user_info_dict, count)
# When group_uuid is provided by user, it can be invalid
except schema_errors.NoDataProviderGroupException:
# Log the full stack trace, prepend a line with our message
if 'group_uuid' in json_data_dict:
msg = "Invalid 'group_uuid' value, can't create the entity"
else:
msg = "The user does not have the correct Globus group associated with, can't create the entity"
logger.exception(msg)
bad_request_error(msg)
except schema_errors.UnmatchedDataProviderGroupException:
# Log the full stack trace, prepend a line with our message
msg = "The user does not belong to the given Globus group, can't create the entity"
logger.exception(msg)
forbidden_error(msg)
except schema_errors.MultipleDataProviderGroupException:
# Log the full stack trace, prepend a line with our message
msg = "The user has mutiple Globus groups associated with, please specify one using 'group_uuid'"
logger.exception(msg)
bad_request_error(msg)
except KeyError as e:
# Log the full stack trace, prepend a line with our message
logger.exception(e)
bad_request_error(e)
except requests.exceptions.RequestException as e:
msg = f"Failed to create new HuBMAP ids via the uuid-api service"
logger.exception(msg)
# Due to the use of response.raise_for_status() in schema_manager.create_hubmap_ids()
# we can access the status codes from the exception
status_code = e.response.status_code
if status_code == 400:
bad_request_error(e.response.text)
if status_code == 404:
not_found_error(e.response.text)
else:
internal_server_error(e.response.text)
# Use the same json_data_dict and user_info_dict for each sample
# Only difference is the `uuid` and `hubmap_id` that are generated
# Merge all the dictionaries and pass to the trigger methods
new_data_dict = {**json_data_dict, **user_info_dict, **new_ids_dict_list[0]}
# Instead of calling generate_triggered_data() for each sample, we'll just call it on the first sample
# since all other samples will share the same generated data except `uuid` and `hubmap_id`
# A bit performance improvement
try:
# Use {} since no existing dict
generated_before_create_trigger_data_dict = schema_manager.generate_triggered_data('before_create_trigger', normalized_entity_type, user_token, {}, new_data_dict)
# If one of the before_create_trigger methods fails, we can't create the entity
except schema_errors.BeforeCreateTriggerException:
# Log the full stack trace, prepend a line with our message
msg = "Failed to execute one of the 'before_create_trigger' methods, can't create the entity"
logger.exception(msg)
internal_server_error(msg)
except schema_errors.NoDataProviderGroupException:
# Log the full stack trace, prepend a line with our message
if 'group_uuid' in json_data_dict:
msg = "Invalid 'group_uuid' value, can't create the entity"
else:
msg = "The user does not have the correct Globus group associated with, can't create the entity"
logger.exception(msg)
bad_request_error(msg)
except schema_errors.UnmatchedDataProviderGroupException:
# Log the full stack trace, prepend a line with our message
msg = "The user does not belong to the given Globus group, can't create the entity"
logger.exception(msg)
forbidden_error(msg)
except schema_errors.MultipleDataProviderGroupException:
# Log the full stack trace, prepend a line with our message
msg = "The user has mutiple Globus groups associated with, please specify one using 'group_uuid'"
logger.exception(msg)
bad_request_error(msg)
except KeyError as e:
# Log the full stack trace, prepend a line with our message
logger.exception(e)
bad_request_error(e)
except Exception as e:
logger.exception(e)
internal_server_error(e)
# Merge the user json data and generated trigger data into one dictionary
merged_dict = {**json_data_dict, **generated_before_create_trigger_data_dict}
# Filter out the merged_dict by getting rid of the transitent properties (not to be stored)
# and properties with None value
# Meaning the returned target property key is different from the original key
# in the trigger method, e.g., Donor.image_files_to_add
filtered_merged_dict = schema_manager.remove_transient_and_none_values(merged_dict, normalized_entity_type)
samples_dict_list = []
for new_ids_dict in new_ids_dict_list:
# Just overwrite the `uuid` and `hubmap_id` that are generated
# All other generated properties will stay the same across all samples
sample_dict = {**filtered_merged_dict, **new_ids_dict}
# Add to the list
samples_dict_list.append(sample_dict)
# Generate property values for the only one Activity node
activity_data_dict = schema_manager.generate_activity_data(normalized_entity_type, user_token, user_info_dict)
# Create new sample nodes and needed relationships as well as activity node in one transaction
try:
# No return value
app_neo4j_queries.create_multiple_samples(neo4j_driver_instance, samples_dict_list, activity_data_dict, json_data_dict['direct_ancestor_uuid'])
except TransactionError:
msg = "Failed to create multiple samples"
# Log the full stack trace, prepend a line with our message
logger.exception(msg)
# Terminate and let the users know
internal_server_error(msg)
# Return the generated ids for UI
return new_ids_dict_list
"""
Execute 'after_create_triiger' methods
Parameters
----------
normalized_entity_type : str
One of the normalized entity types: Dataset, Collection, Sample, Donor
user_token: str
The user's globus nexus token
merged_data_dict: dict
The merged dict that contains the entity dict newly created and
information from user request json that are not stored in Neo4j
"""
def after_create(normalized_entity_type, user_token, merged_data_dict):
try:
# 'after_create_trigger' and 'after_update_trigger' don't generate property values
# It just returns the empty dict, no need to assign value
# Use {} since no new dict
schema_manager.generate_triggered_data('after_create_trigger', normalized_entity_type, user_token, merged_data_dict, {})
except schema_errors.AfterCreateTriggerException:
# Log the full stack trace, prepend a line with our message
msg = "The entity has been created, but failed to execute one of the 'after_create_trigger' methods"
logger.exception(msg)
internal_server_error(msg)
except Exception as e:
logger.exception(e)
internal_server_error(e)
"""
Generate 'before_create_triiger' data and create the entity details in Neo4j
Parameters
----------
request : flask.Request object
The incoming request
normalized_entity_type : str
One of the normalized entity types: Dataset, Collection, Sample, Donor
user_token: str
The user's globus nexus token
json_data_dict: dict
The json request dict
existing_entity_dict: dict
Dict of the exiting entity information
Returns
-------
dict
A dict of all the updated entity detials
"""
def update_entity_details(request, normalized_entity_type, user_token, json_data_dict, existing_entity_dict):
# Get user info based on request
user_info_dict = schema_manager.get_user_info(request)
# Merge user_info_dict and the json_data_dict for passing to the trigger methods
new_data_dict = {**user_info_dict, **json_data_dict}
try:
generated_before_update_trigger_data_dict = schema_manager.generate_triggered_data('before_update_trigger', normalized_entity_type, user_token, existing_entity_dict, new_data_dict)
# If something wrong with file upload
except schema_errors.FileUploadException as e:
logger.exception(e)
internal_server_error(e)
# If one of the before_update_trigger methods fails, we can't update the entity
except schema_errors.BeforeUpdateTriggerException:
# Log the full stack trace, prepend a line with our message
msg = "Failed to execute one of the 'before_update_trigger' methods, can't update the entity"
logger.exception(msg)
internal_server_error(msg)
except Exception as e:
logger.exception(e)
internal_server_error(e)
# Merge dictionaries
merged_dict = {**json_data_dict, **generated_before_update_trigger_data_dict}
# Filter out the merged_dict by getting rid of the transitent properties (not to be stored)
# and properties with None value
# Meaning the returned target property key is different from the original key
# in the trigger method, e.g., Donor.image_files_to_add
filtered_merged_dict = schema_manager.remove_transient_and_none_values(merged_dict, normalized_entity_type)
# By now the filtered_merged_dict contains all user updates and all triggered data to be added to the entity node
# Any properties in filtered_merged_dict that are not on the node will be added.
# Any properties not in filtered_merged_dict that are on the node will be left as is.
# Any properties that are in both filtered_merged_dict and the node will be replaced in the node. However, if any property in the map is null, it will be removed from the node.
# Update the exisiting entity
try:
updated_entity_dict = app_neo4j_queries.update_entity(neo4j_driver_instance, normalized_entity_type, filtered_merged_dict, existing_entity_dict['uuid'])
except TransactionError:
msg = "Failed to update the entity with id " + id
# Log the full stack trace, prepend a line with our message
logger.exception(msg)
# Terminate and let the users know
internal_server_error(msg)
# Important: use `updated_entity_dict` instead of `filtered_merged_dict` to keep consistent with the stored
# string expression literals of Python list/dict being used with entity update, e.g., `image_files`
# Important: the same property keys in entity_dict will overwrite the same key in json_data_dict
# and this is what we wanted. Adding json_data_dict back is to include those `transient` properties
# provided in the JSON input but not stored in neo4j, and will be needed for after_create_trigger/after_update_trigger,
# e.g., `previous_revision_uuid`, `direct_ancestor_uuids`
# Add user_info_dict because it may be used by after_update_trigger methods
merged_final_dict = {**json_data_dict, **updated_entity_dict, **user_info_dict}
# Use merged_final_dict instead of merged_dict because
# merged_dict only contains properties to be updated, not all properties
return merged_final_dict
"""
Execute 'after_update_triiger' methods
Parameters
----------
normalized_entity_type : str
One of the normalized entity types: Dataset, Collection, Sample, Donor
user_token: str
The user's globus nexus token
entity_dict: dict
The entity dict newly updated
"""
def after_update(normalized_entity_type, user_token, entity_dict):
try:
# 'after_create_trigger' and 'after_update_trigger' don't generate property values
# It just returns the empty dict, no need to assign value
# Use {} sicne no new dict
schema_manager.generate_triggered_data('after_update_trigger', normalized_entity_type, user_token, entity_dict, {})
except schema_errors.AfterUpdateTriggerException:
# Log the full stack trace, prepend a line with our message
msg = "The entity information has been updated, but failed to execute one of the 'after_update_trigger' methods"
logger.exception(msg)
internal_server_error(msg)
except Exception as e:
logger.exception(e)
internal_server_error(e)
"""
Get target entity dict
Parameters
----------
id : str
The uuid or hubmap_id of target entity
user_token: str
The user's globus nexus token from the incoming request
Returns
-------
dict
A dictionary of entity details returned from neo4j
"""
def query_target_entity(id, user_token):
try:
"""
The dict returned by uuid-api that contains all the associated ids, e.g.:
{
"ancestor_id": "23c0ffa90648358e06b7ac0c5673ccd2",
"ancestor_ids":[
"23c0ffa90648358e06b7ac0c5673ccd2"
],
"email": "marda@ufl.edu",
"hm_uuid": "1785aae4f0fb8f13a56d79957d1cbedf",
"hubmap_id": "HBM966.VNKN.965",
"submission_id": "UFL0007",
"time_generated": "2020-10-19 15:52:02",
"type": "DONOR",
"user_id": "694c6f6a-1deb-41a6-880f-d1ad8af3705f"
}
"""
hubmap_ids = schema_manager.get_hubmap_ids(id, user_token)
# Get the target uuid if all good
uuid = hubmap_ids['hm_uuid']
entity_dict = app_neo4j_queries.get_entity(neo4j_driver_instance, uuid)
# The uuid exists via uuid-api doesn't mean it's also in Neo4j
if not entity_dict:
not_found_error(f"Entity of id: {id} not found in Neo4j")
return entity_dict
except requests.exceptions.RequestException as e:
# Due to the use of response.raise_for_status() in schema_manager.get_hubmap_ids()
# we can access the status codes from the exception
status_code = e.response.status_code
if status_code == 400:
bad_request_error(e.response.text)
if status_code == 404:
not_found_error(e.response.text)
else:
internal_server_error(e.response.text)
"""
Always expect a json body from user request
request : Flask request object
The Flask request passed from the API endpoint
"""
def require_json(request):
if not request.is_json:
bad_request_error("A json body and appropriate Content-Type header are required")
"""
Make a call to search-api to reindex this entity node in elasticsearch
Parameters
----------
uuid : str
The uuid of the target entity
user_token: str
The user's globus nexus token
"""
def reindex_entity(uuid, user_token):
try:
logger.info(f"Making a call to search-api to reindex uuid: {uuid}")
headers = create_request_headers(user_token)
response = requests.put(app.config['SEARCH_API_URL'] + "/reindex/" + uuid, headers = headers)
# The reindex takes time, so 202 Accepted response status code indicates that
# the request has been accepted for processing, but the processing has not been completed
if response.status_code == 202:
logger.info(f"The search-api has accepted the reindex request for uuid: {uuid}")
else:
logger.error(f"The search-api failed to initialize the reindex for uuid: {uuid}")
except Exception:
msg = f"Failed to send the reindex request to search-api for entity with uuid: {uuid}"
# Log the full stack trace, prepend a line with our message
logger.exception(msg)
# Terminate and let the users know
internal_server_error(msg)
"""
Create a dict of HTTP Authorization header with Bearer token for making calls to uuid-api
Parameters
----------
user_token: str
The user's globus nexus token
Returns
-------
dict
The headers dict to be used by requests
"""
def create_request_headers(user_token):
auth_header_name = 'Authorization'
auth_scheme = 'Bearer'
headers_dict = {
# Don't forget the space between scheme and the token value
auth_header_name: auth_scheme + ' ' + user_token
}
return headers_dict
"""
Ensure the access level dir with leading and trailing slashes
dir_name : str
The name of the sub directory corresponding to each access level
Returns
-------
str
One of the formatted dir path string: /public/, /protected/, /consortium/
"""
def access_level_prefix_dir(dir_name):
if string_helper.isBlank(dir_name):
return ''
return hm_file_helper.ensureTrailingSlashURL(hm_file_helper.ensureBeginningSlashURL(dir_name))
"""
Ensures that a given organ code matches what is found on the organ_types yaml document
organ_code : str
Returns nothing. Raises bad_request_error is organ code not found on organ_types.yaml
"""
def validate_organ_code(organ_code):
ORGAN_YAML_URL = 'https://raw.githubusercontent.com/hubmapconsortium/search-api/test-release/src/search-schema/data/definitions/enums/organ_types.yaml'
with urllib.request.urlopen(ORGAN_YAML_URL) as organ_file:
organ_yaml = yaml.load(organ_file, Loader=yaml.FullLoader)
if organ_code.upper() not in organ_yaml:
bad_request_error(f"Invalid Organ. Organ must be 2 digit code, case-insensitive located at {ORGAN_YAML_URL}")
# For local development/testing
if __name__ == "__main__":
try:
app.run(host='0.0.0.0', port="5002")
except Exception as e:
print("Error during starting debug server.")
print(str(e))
logger.error(e, exc_info=True)
print("Error during startup check the log file for further information")
| 44.463915
| 188
| 0.714127
|
393c2a4ed7fd25d9ba2d903271aeec9f9e1494c9
| 25,117
|
py
|
Python
|
adafruit_character_lcd/character_lcd.py
|
jepler/Adafruit_CircuitPython_CharLCD
|
a9e0c61575e02fe6b5eea348553f1e92d1f55632
|
[
"MIT"
] | null | null | null |
adafruit_character_lcd/character_lcd.py
|
jepler/Adafruit_CircuitPython_CharLCD
|
a9e0c61575e02fe6b5eea348553f1e92d1f55632
|
[
"MIT"
] | null | null | null |
adafruit_character_lcd/character_lcd.py
|
jepler/Adafruit_CircuitPython_CharLCD
|
a9e0c61575e02fe6b5eea348553f1e92d1f55632
|
[
"MIT"
] | 1
|
2020-12-07T14:16:40.000Z
|
2020-12-07T14:16:40.000Z
|
# The MIT License (MIT)
#
# Copyright (c) 2017 Brent Rubell for Adafruit Industries
# Copyright (c) 2018 Kattni Rembor for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_character_lcd.character_lcd`
====================================================
Module for interfacing with monochromatic character LCDs
* Author(s): Kattni Rembor, Brent Rubell, Asher Lieber,
Tony DiCola (original python charLCD library)
Implementation Notes
--------------------
**Hardware:**
"* `Adafruit Character LCDs <http://www.adafruit.com/category/63_96>`_"
**Software and Dependencies:**
* Adafruit CircuitPython firmware:
https://github.com/adafruit/circuitpython/releases
* Adafruit's Bus Device library (when using I2C/SPI):
https://github.com/adafruit/Adafruit_CircuitPython_BusDevice
"""
import time
import digitalio
from micropython import const
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_CharLCD.git"
# pylint: disable-msg=bad-whitespace
# Commands
_LCD_CLEARDISPLAY = const(0x01)
_LCD_RETURNHOME = const(0x02)
_LCD_ENTRYMODESET = const(0x04)
_LCD_DISPLAYCONTROL = const(0x08)
_LCD_CURSORSHIFT = const(0x10)
_LCD_FUNCTIONSET = const(0x20)
_LCD_SETCGRAMADDR = const(0x40)
_LCD_SETDDRAMADDR = const(0x80)
# Entry flags
_LCD_ENTRYLEFT = const(0x02)
_LCD_ENTRYSHIFTDECREMENT = const(0x00)
# Control flags
_LCD_DISPLAYON = const(0x04)
_LCD_CURSORON = const(0x02)
_LCD_CURSOROFF = const(0x00)
_LCD_BLINKON = const(0x01)
_LCD_BLINKOFF = const(0x00)
# Move flags
_LCD_DISPLAYMOVE = const(0x08)
_LCD_MOVERIGHT = const(0x04)
_LCD_MOVELEFT = const(0x00)
# Function set flags
_LCD_4BITMODE = const(0x00)
_LCD_2LINE = const(0x08)
_LCD_1LINE = const(0x00)
_LCD_5X8DOTS = const(0x00)
# Offset for up to 4 rows.
_LCD_ROW_OFFSETS = (0x00, 0x40, 0x14, 0x54)
# pylint: enable-msg=bad-whitespace
def _set_bit(byte_value, position, val):
# Given the specified byte_value set the bit at position to the provided
# boolean value val and return the modified byte.
ret = None
if val:
ret = byte_value | (1 << position)
else:
ret = byte_value & ~(1 << position)
return ret
def _map(xval, in_min, in_max, out_min, out_max):
# Affine transfer/map with constrained output.
outrange = float(out_max - out_min)
inrange = float(in_max - in_min)
ret = (xval - in_min) * (outrange / inrange) + out_min
if out_max > out_min:
ret = max(min(ret, out_max), out_min)
else:
ret = max(min(ret, out_min), out_max)
return ret
# pylint: disable-msg=too-many-instance-attributes
class Character_LCD:
"""Base class for character LCD.
:param ~digitalio.DigitalInOut rs: The reset data line
:param ~digitalio.DigitalInOut en: The enable data line
:param ~digitalio.DigitalInOut d4: The data line 4
:param ~digitalio.DigitalInOut d5: The data line 5
:param ~digitalio.DigitalInOut d6: The data line 6
:param ~digitalio.DigitalInOut d7: The data line 7
:param columns: The columns on the charLCD
:param lines: The lines on the charLCD
"""
LEFT_TO_RIGHT = const(0)
RIGHT_TO_LEFT = const(1)
# pylint: disable-msg=too-many-arguments
def __init__(self, rs, en, d4, d5, d6, d7, columns, lines):
self.columns = columns
self.lines = lines
# save pin numbers
self.reset = rs
self.enable = en
self.dl4 = d4
self.dl5 = d5
self.dl6 = d6
self.dl7 = d7
# set all pins as outputs
for pin in (rs, en, d4, d5, d6, d7):
pin.direction = digitalio.Direction.OUTPUT
# Initialise the display
self._write8(0x33)
self._write8(0x32)
# Initialise display control
self.displaycontrol = _LCD_DISPLAYON | _LCD_CURSOROFF | _LCD_BLINKOFF
# Initialise display function
self.displayfunction = _LCD_4BITMODE | _LCD_1LINE | _LCD_2LINE | _LCD_5X8DOTS
# Initialise display mode
self.displaymode = _LCD_ENTRYLEFT | _LCD_ENTRYSHIFTDECREMENT
# Write to displaycontrol
self._write8(_LCD_DISPLAYCONTROL | self.displaycontrol)
# Write to displayfunction
self._write8(_LCD_FUNCTIONSET | self.displayfunction)
# Set entry mode
self._write8(_LCD_ENTRYMODESET | self.displaymode)
self.clear()
self._message = None
self._enable = None
self._direction = None
# track row and column used in cursor_position
# initialize to 0,0
self.row = 0
self.column = 0
self._column_align = False
# pylint: enable-msg=too-many-arguments
def home(self):
"""Moves the cursor "home" to position (1, 1)."""
self._write8(_LCD_RETURNHOME)
time.sleep(0.003)
def clear(self):
"""Clears everything displayed on the LCD.
The following example displays, "Hello, world!", then clears the LCD.
.. code-block:: python
import time
import board
import busio
import adafruit_character_lcd.character_lcd_i2c as character_lcd
i2c = busio.I2C(board.SCL, board.SDA)
lcd = character_lcd.Character_LCD_I2C(i2c, 16, 2)
lcd.message = "Hello, world!"
time.sleep(5)
lcd.clear()
"""
self._write8(_LCD_CLEARDISPLAY)
time.sleep(0.003)
@property
def column_align(self):
"""If True, message text after '\\n' starts directly below start of first
character in message. If False, text after '\\n' starts at column zero.
"""
return self._column_align
@column_align.setter
def column_align(self, enable):
if isinstance(enable, bool):
self._column_align = enable
else:
raise ValueError("The column_align value must be either True or False")
@property
def cursor(self):
"""True if cursor is visible. False to stop displaying the cursor.
The following example shows the cursor after a displayed message:
.. code-block:: python
import time
import board
import busio
import adafruit_character_lcd.character_lcd_i2c as character_lcd
i2c = busio.I2C(board.SCL, board.SDA)
lcd = character_lcd.Character_LCD_I2C(i2c, 16, 2)
lcd.cursor = True
lcd.message = "Cursor! "
time.sleep(5)
"""
return self.displaycontrol & _LCD_CURSORON == _LCD_CURSORON
@cursor.setter
def cursor(self, show):
if show:
self.displaycontrol |= _LCD_CURSORON
else:
self.displaycontrol &= ~_LCD_CURSORON
self._write8(_LCD_DISPLAYCONTROL | self.displaycontrol)
def cursor_position(self, column, row):
"""Move the cursor to position ``column``, ``row`` for the next
message only. Displaying a message resets the cursor position to (0, 0).
:param column: column location
:param row: row location
"""
# Clamp row to the last row of the display
if row >= self.lines:
row = self.lines - 1
# Clamp to last column of display
if column >= self.columns:
column = self.columns - 1
# Set location
self._write8(_LCD_SETDDRAMADDR | (column + _LCD_ROW_OFFSETS[row]))
# Update self.row and self.column to match setter
self.row = row
self.column = column
@property
def blink(self):
"""
Blink the cursor. True to blink the cursor. False to stop blinking.
The following example shows a message followed by a blinking cursor for five seconds.
.. code-block:: python
import time
import board
import busio
import adafruit_character_lcd.character_lcd_i2c as character_lcd
i2c = busio.I2C(board.SCL, board.SDA)
lcd = character_lcd.Character_LCD_I2C(i2c, 16, 2)
lcd.blink = True
lcd.message = "Blinky cursor!"
time.sleep(5)
lcd.blink = False
"""
return self.displaycontrol & _LCD_BLINKON == _LCD_BLINKON
@blink.setter
def blink(self, blink):
if blink:
self.displaycontrol |= _LCD_BLINKON
else:
self.displaycontrol &= ~_LCD_BLINKON
self._write8(_LCD_DISPLAYCONTROL | self.displaycontrol)
@property
def display(self):
"""
Enable or disable the display. True to enable the display. False to disable the display.
The following example displays, "Hello, world!" on the LCD and then turns the display off.
.. code-block:: python
import time
import board
import busio
import adafruit_character_lcd.character_lcd_i2c as character_lcd
i2c = busio.I2C(board.SCL, board.SDA)
lcd = character_lcd.Character_LCD_I2C(i2c, 16, 2)
lcd.message = "Hello, world!"
time.sleep(5)
lcd.display = False
"""
return self.displaycontrol & _LCD_DISPLAYON == _LCD_DISPLAYON
@display.setter
def display(self, enable):
if enable:
self.displaycontrol |= _LCD_DISPLAYON
else:
self.displaycontrol &= ~_LCD_DISPLAYON
self._write8(_LCD_DISPLAYCONTROL | self.displaycontrol)
@property
def message(self):
"""Display a string of text on the character LCD.
Start position is (0,0) if cursor_position is not set.
If cursor_position is set, message starts at the set
position from the left for left to right text and from
the right for right to left text. Resets cursor column
and row to (0,0) after displaying the message.
The following example displays, "Hello, world!" on the LCD.
.. code-block:: python
import time
import board
import busio
import adafruit_character_lcd.character_lcd_i2c as character_lcd
i2c = busio.I2C(board.SCL, board.SDA)
lcd = character_lcd.Character_LCD_I2C(i2c, 16, 2)
lcd.message = "Hello, world!"
time.sleep(5)
"""
return self._message
@message.setter
def message(self, message):
self._message = message
# Set line to match self.row from cursor_position()
line = self.row
# Track times through iteration, to act on the initial character of the message
initial_character = 0
# iterate through each character
for character in message:
# If this is the first character in the string:
if initial_character == 0:
# Start at (0, 0) unless direction is set right to left, in which case start
# on the opposite side of the display if cursor_position not set or (0,0)
# If cursor_position is set then starts at the specified location for
# LEFT_TO_RIGHT. If RIGHT_TO_LEFT cursor_position is determined from right.
# allows for cursor_position to work in RIGHT_TO_LEFT mode
if self.displaymode & _LCD_ENTRYLEFT > 0:
col = self.column
else:
col = self.columns - 1 - self.column
self.cursor_position(col, line)
initial_character += 1
# If character is \n, go to next line
if character == "\n":
line += 1
# Start the second line at (0, 1) unless direction is set right to left in
# which case start on the opposite side of the display if cursor_position
# is (0,0) or not set. Start second line at same column as first line when
# cursor_position is set
if self.displaymode & _LCD_ENTRYLEFT > 0:
col = self.column * self._column_align
else:
if self._column_align:
col = self.column
else:
col = self.columns - 1
self.cursor_position(col, line)
# Write string to display
else:
self._write8(ord(character), True)
# reset column and row to (0,0) after message is displayed
self.column, self.row = 0, 0
def move_left(self):
"""Moves displayed text left one column.
The following example scrolls a message to the left off the screen.
.. code-block:: python
import time
import board
import busio
import adafruit_character_lcd.character_lcd_i2c as character_lcd
i2c = busio.I2C(board.SCL, board.SDA)
lcd = character_lcd.Character_LCD_I2C(i2c, 16, 2)
scroll_message = "<-- Scroll"
lcd.message = scroll_message
time.sleep(2)
for i in range(len(scroll_message)):
lcd.move_left()
time.sleep(0.5)
"""
self._write8(_LCD_CURSORSHIFT | _LCD_DISPLAYMOVE | _LCD_MOVELEFT)
def move_right(self):
"""Moves displayed text right one column.
The following example scrolls a message to the right off the screen.
.. code-block:: python
import time
import board
import busio
import adafruit_character_lcd.character_lcd_i2c as character_lcd
i2c = busio.I2C(board.SCL, board.SDA)
lcd = character_lcd.Character_LCD_I2C(i2c, 16, 2)
scroll_message = "Scroll -->"
lcd.message = scroll_message
time.sleep(2)
for i in range(len(scroll_message) + 16):
lcd.move_right()
time.sleep(0.5)
"""
self._write8(_LCD_CURSORSHIFT | _LCD_DISPLAYMOVE | _LCD_MOVERIGHT)
@property
def text_direction(self):
"""The direction the text is displayed. To display the text left to right beginning on the
left side of the LCD, set ``text_direction = LEFT_TO_RIGHT``. To display the text right
to left beginning on the right size of the LCD, set ``text_direction = RIGHT_TO_LEFT``.
Text defaults to displaying from left to right.
The following example displays "Hello, world!" from right to left.
.. code-block:: python
import time
import board
import busio
import adafruit_character_lcd.character_lcd_i2c as character_lcd
i2c = busio.I2C(board.SCL, board.SDA)
lcd = character_lcd.Character_LCD_I2C(i2c, 16, 2)
lcd.text_direction = lcd.RIGHT_TO_LEFT
lcd.message = "Hello, world!"
time.sleep(5)
"""
return self._direction
@text_direction.setter
def text_direction(self, direction):
self._direction = direction
if direction == self.LEFT_TO_RIGHT:
self._left_to_right()
elif direction == self.RIGHT_TO_LEFT:
self._right_to_left()
def _left_to_right(self):
# Displays text from left to right on the LCD.
self.displaymode |= _LCD_ENTRYLEFT
self._write8(_LCD_ENTRYMODESET | self.displaymode)
def _right_to_left(self):
# Displays text from right to left on the LCD.
self.displaymode &= ~_LCD_ENTRYLEFT
self._write8(_LCD_ENTRYMODESET | self.displaymode)
def create_char(self, location, pattern):
"""
Fill one of the first 8 CGRAM locations with custom characters.
The location parameter should be between 0 and 7 and pattern should
provide an array of 8 bytes containing the pattern. E.g. you can easily
design your custom character at http://www.quinapalus.com/hd44780udg.html
To show your custom character use, for example, ``lcd.message = "\x01"``
:param location: integer in range(8) to store the created character
:param ~bytes pattern: len(8) describes created character
"""
# only position 0..7 are allowed
location &= 0x7
self._write8(_LCD_SETCGRAMADDR | (location << 3))
for i in range(8):
self._write8(pattern[i], char_mode=True)
def _write8(self, value, char_mode=False):
# Sends 8b ``value`` in ``char_mode``.
# :param value: bytes
# :param char_mode: character/data mode selector. False (default) for
# data only, True for character bits.
# one ms delay to prevent writing too quickly.
time.sleep(0.001)
# set character/data bit. (charmode = False)
self.reset.value = char_mode
# WRITE upper 4 bits
self.dl4.value = ((value >> 4) & 1) > 0
self.dl5.value = ((value >> 5) & 1) > 0
self.dl6.value = ((value >> 6) & 1) > 0
self.dl7.value = ((value >> 7) & 1) > 0
# send command
self._pulse_enable()
# WRITE lower 4 bits
self.dl4.value = (value & 1) > 0
self.dl5.value = ((value >> 1) & 1) > 0
self.dl6.value = ((value >> 2) & 1) > 0
self.dl7.value = ((value >> 3) & 1) > 0
self._pulse_enable()
def _pulse_enable(self):
# Pulses (lo->hi->lo) to send commands.
self.enable.value = False
# 1microsec pause
time.sleep(0.0000001)
self.enable.value = True
time.sleep(0.0000001)
self.enable.value = False
time.sleep(0.0000001)
# pylint: enable-msg=too-many-instance-attributes
# pylint: disable-msg=too-many-instance-attributes
class Character_LCD_Mono(Character_LCD):
"""Interfaces with monochromatic character LCDs.
:param ~digitalio.DigitalInOut rs: The reset data line
:param ~digitalio.DigitalInOut en: The enable data line
:param ~digitalio.DigitalInOut d4: The data line 4
:param ~digitalio.DigitalInOut d5: The data line 5
:param ~digitalio.DigitalInOut d6: The data line 6
:param ~digitalio.DigitalInOut d7: The data line 7
:param columns: The columns on the charLCD
:param lines: The lines on the charLCD
:param ~digitalio.DigitalInOut backlight_pin: The backlight pin
:param bool backlight_inverted: ``False`` if LCD is not inverted, i.e. backlight pin is
connected to common anode. ``True`` if LCD is inverted i.e. backlight pin is connected
to common cathode.
"""
# pylint: disable-msg=too-many-arguments
def __init__(
self,
rs,
en,
db4,
db5,
db6,
db7,
columns,
lines,
backlight_pin=None,
backlight_inverted=False,
):
# Backlight pin and inversion
self.backlight_pin = backlight_pin
self.backlight_inverted = backlight_inverted
# Setup backlight
if backlight_pin is not None:
self.backlight_pin.direction = digitalio.Direction.OUTPUT
self.backlight = True
super().__init__(rs, en, db4, db5, db6, db7, columns, lines)
# pylint: enable-msg=too-many-arguments
@property
def backlight(self):
"""Enable or disable backlight. True if backlight is on. False if backlight is off.
The following example turns the backlight off, then displays, "Hello, world?", then turns
the backlight on and displays, "Hello, world!"
.. code-block:: python
import time
import board
import busio
import adafruit_character_lcd.character_lcd_i2c as character_lcd
i2c = busio.I2C(board.SCL, board.SDA)
lcd = character_lcd.Character_LCD_I2C(i2c, 16, 2)
lcd.backlight = False
lcd.message = "Hello, world?"
time.sleep(5)
lcd.backlight = True
lcd.message = "Hello, world!"
time.sleep(5)
"""
return self._enable
@backlight.setter
def backlight(self, enable):
self._enable = enable
if enable:
self.backlight_pin.value = not self.backlight_inverted
else:
self.backlight_pin.value = self.backlight_inverted
class Character_LCD_RGB(Character_LCD):
"""Interfaces with RGB character LCDs.
:param ~digitalio.DigitalInOut rs: The reset data line
:param ~digitalio.DigitalInOut en: The enable data line
:param ~digitalio.DigitalInOut db4: The data line 4
:param ~digitalio.DigitalInOut db5: The data line 5
:param ~digitalio.DigitalInOut db6: The data line 6
:param ~digitalio.DigitalInOut db7: The data line 7
:param columns: The columns on the charLCD
:param lines: The lines on the charLCD
:param ~pulseio.PWMOut, ~digitalio.DigitalInOut red: Red RGB Anode
:param ~pulseio.PWMOut, ~digitalio.DigitalInOut green: Green RGB Anode
:param ~pulseio.PWMOut, ~digitalio.DigitalInOut blue: Blue RGB Anode
:param ~digitalio.DigitalInOut read_write: The rw pin. Determines whether to read to or
write from the display. Not necessary if only writing to the display. Used on shield.
"""
# pylint: disable-msg=too-many-arguments
def __init__(
self,
rs,
en,
db4,
db5,
db6,
db7,
columns,
lines,
red,
green,
blue,
read_write=None,
):
# Define read_write (rw) pin
self.read_write = read_write
# Setup rw pin if used
if read_write is not None:
self.read_write.direction = digitalio.Direction.OUTPUT
# define color params
self.rgb_led = [red, green, blue]
for pin in self.rgb_led:
if hasattr(pin, "direction"):
# Assume a digitalio.DigitalInOut or compatible interface:
pin.direction = digitalio.Direction.OUTPUT
elif not hasattr(pin, "duty_cycle"):
raise TypeError(
"RGB LED objects must be instances of digitalio.DigitalInOut"
" or pulseio.PWMOut, or provide a compatible interface."
)
self._color = [0, 0, 0]
super().__init__(rs, en, db4, db5, db6, db7, columns, lines)
@property
def color(self):
"""
The color of the display. Provide a list of three integers ranging 0 - 100, ``[R, G, B]``.
``0`` is no color, or "off". ``100`` is maximum color. For example, the brightest red would
be ``[100, 0, 0]``, and a half-bright purple would be, ``[50, 0, 50]``.
If PWM is unavailable, ``0`` is off, and non-zero is on. For example, ``[1, 0, 0]`` would
be red.
The following example turns the LCD red and displays, "Hello, world!".
.. code-block:: python
import time
import board
import busio
import adafruit_character_lcd.character_lcd_rgb_i2c as character_lcd
i2c = busio.I2C(board.SCL, board.SDA)
lcd = character_lcd.Character_LCD_RGB_I2C(i2c, 16, 2)
lcd.color = [100, 0, 0]
lcd.message = "Hello, world!"
time.sleep(5)
"""
return self._color
@color.setter
def color(self, color):
self._color = color
for number, pin in enumerate(self.rgb_led):
if hasattr(pin, "duty_cycle"):
# Assume a pulseio.PWMOut or compatible interface and set duty cycle:
pin.duty_cycle = int(_map(color[number], 0, 100, 65535, 0))
elif hasattr(pin, "value"):
# If we don't have a PWM interface, all we can do is turn each color
# on / off. Assume a DigitalInOut (or compatible interface) and write
# 0 (on) to pin for any value greater than 0, or 1 (off) for 0:
pin.value = not color[number] > 1
| 34.312842
| 99
| 0.617192
|
0d59e1aeb6baa674260e9341886a5b627483b576
| 401
|
py
|
Python
|
api/database.py
|
groupwildman/todolist
|
7ce990b91e208fdb5ca757e3508a2f0764c16798
|
[
"Apache-2.0"
] | null | null | null |
api/database.py
|
groupwildman/todolist
|
7ce990b91e208fdb5ca757e3508a2f0764c16798
|
[
"Apache-2.0"
] | null | null | null |
api/database.py
|
groupwildman/todolist
|
7ce990b91e208fdb5ca757e3508a2f0764c16798
|
[
"Apache-2.0"
] | null | null | null |
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
SQLALCHEMY_DATABASE_URI = 'sqlite:///todolist.sqlite'
_engine = create_engine(
SQLALCHEMY_DATABASE_URI, connect_args={"check_same_thread": False}
)
_session_local = sessionmaker(autocommit=False, autoflush=False, bind=_engine)
_base = declarative_base()
| 26.733333
| 78
| 0.810474
|
d5cb786c4586dd9fd365217785fbebf472d1451d
| 404
|
py
|
Python
|
store/migrations/0004_shippingaddress_country.py
|
ketanv75/Django-training
|
ca7ad629ce92be370bbbf6e7f8a14cf170fd9174
|
[
"bzip2-1.0.6"
] | null | null | null |
store/migrations/0004_shippingaddress_country.py
|
ketanv75/Django-training
|
ca7ad629ce92be370bbbf6e7f8a14cf170fd9174
|
[
"bzip2-1.0.6"
] | null | null | null |
store/migrations/0004_shippingaddress_country.py
|
ketanv75/Django-training
|
ca7ad629ce92be370bbbf6e7f8a14cf170fd9174
|
[
"bzip2-1.0.6"
] | null | null | null |
# Generated by Django 3.0.6 on 2021-05-19 19:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0003_auto_20210519_2101'),
]
operations = [
migrations.AddField(
model_name='shippingaddress',
name='country',
field=models.CharField(max_length=200, null=True),
),
]
| 21.263158
| 62
| 0.608911
|
ec3cbb7462b8d3d54d94695b2935b0283d415d4e
| 3,811
|
py
|
Python
|
resources/libraries/python/MLRsearch/ReceiveRateInterval.py
|
preym17/csit
|
3151c98618c78e3782e48bbe4d9c8f906c126f69
|
[
"Apache-2.0"
] | null | null | null |
resources/libraries/python/MLRsearch/ReceiveRateInterval.py
|
preym17/csit
|
3151c98618c78e3782e48bbe4d9c8f906c126f69
|
[
"Apache-2.0"
] | null | null | null |
resources/libraries/python/MLRsearch/ReceiveRateInterval.py
|
preym17/csit
|
3151c98618c78e3782e48bbe4d9c8f906c126f69
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module defining ReceiveRateInterval class."""
import math
from resources.libraries.python.MLRsearch.ReceiveRateMeasurement \
import ReceiveRateMeasurement
class ReceiveRateInterval(object):
"""Structure defining two Rr measurements, and their relation."""
def __init__(self, measured_low, measured_high):
"""Store the bound measurements after checking argument types.
:param measured_low: Measurement for the lower bound.
:param measured_high: Measurement for the upper bound.
:type measured_low: ReceiveRateMeasurement.ReceiveRateMeasurement
:type measured_high: ReceiveRateMeasurement.ReceiveRateMeasurement
"""
# TODO: Type checking is not very pythonic,
# perhaps users can fix wrong usage without it?
if not isinstance(measured_low, ReceiveRateMeasurement):
raise TypeError("measured_low is not a ReceiveRateMeasurement: "
"{low!r}".format(low=measured_low))
if not isinstance(measured_high, ReceiveRateMeasurement):
raise TypeError("measured_high is not a ReceiveRateMeasurement: "
"{high!r}".format(high=measured_high))
self.measured_low = measured_low
self.measured_high = measured_high
# Declare secondary quantities to appease pylint.
self.abs_tr_width = None
"""Absolute width of target transmit rate. Upper minus lower."""
self.rel_tr_width = None
"""Relative width of target transmit rate. Absolute divided by upper."""
self.sort()
def sort(self):
"""Sort bounds by target Tr, compute secondary quantities."""
if self.measured_low.target_tr > self.measured_high.target_tr:
self.measured_low, self.measured_high = (
self.measured_high, self.measured_low)
self.abs_tr_width = (
self.measured_high.target_tr - self.measured_low.target_tr)
self.rel_tr_width = self.abs_tr_width / self.measured_high.target_tr
def width_in_goals(self, relative_width_goal):
"""Return float value.
Relative width goal is some (negative) value on logarithmic scale.
Current relative width is another logarithmic value.
Return the latter divided by the former.
This is useful when investigating how did surprising widths come to be.
:param relative_width_goal: Upper bound times this is the goal
difference between upper bound and lower bound.
:type relative_width_goal: float
:returns: Current width as logarithmic multiple of goal width [1].
:rtype: float
"""
return math.log(1.0 - self.rel_tr_width) / math.log(
1.0 - relative_width_goal)
def __str__(self):
"""Return string as half-open interval."""
return "[{low!s};{high!s})".format(
low=self.measured_low, high=self.measured_high)
def __repr__(self):
"""Return string evaluable as a constructor call."""
return ("ReceiveRateInterval(measured_low={low!r}"
",measured_high={high!r})".format(
low=self.measured_low, high=self.measured_high))
| 44.313953
| 80
| 0.683023
|
25b938894b14e87a7a848a5a001a3333f5325d8d
| 537
|
py
|
Python
|
projdir/app/migrations/0031_auto_20160624_1717.py
|
NITKOSG/InfoGami
|
f5ce567d5558a768e58886b3419e378cabb7049d
|
[
"MIT"
] | 2
|
2017-02-01T09:57:40.000Z
|
2017-06-03T15:26:55.000Z
|
projdir/app/migrations/0031_auto_20160624_1717.py
|
NITKOSG/InfoGami
|
f5ce567d5558a768e58886b3419e378cabb7049d
|
[
"MIT"
] | 2
|
2018-05-11T20:10:23.000Z
|
2019-05-01T21:13:07.000Z
|
projdir/app/migrations/0031_auto_20160624_1717.py
|
NITKOSG/InfoGami
|
f5ce567d5558a768e58886b3419e378cabb7049d
|
[
"MIT"
] | 3
|
2017-02-11T13:19:28.000Z
|
2018-08-31T18:51:18.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-06-24 11:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0030_messsagemodel_created'),
]
operations = [
migrations.AlterField(
model_name='userprofilemodel',
name='user_profile_pic',
field=models.FileField(blank=True, default='profile_pics/avatars/default.png', upload_to='profile_pics/'),
),
]
| 25.571429
| 118
| 0.651769
|
c61c45ef810906f6ad15797b1bd2dfdbc935cb92
| 1,595
|
py
|
Python
|
USACO Prep 2/marathon.py
|
PythonCoderAS/Algorithims-Assignments
|
1f33b6187d5ca36198cee8c7086f07952a1d6cbb
|
[
"MIT"
] | null | null | null |
USACO Prep 2/marathon.py
|
PythonCoderAS/Algorithims-Assignments
|
1f33b6187d5ca36198cee8c7086f07952a1d6cbb
|
[
"MIT"
] | 1
|
2021-12-31T02:14:55.000Z
|
2021-12-31T02:14:55.000Z
|
USACO Prep 2/marathon.py
|
PythonCoderAS/Algorithims-Assignments
|
1f33b6187d5ca36198cee8c7086f07952a1d6cbb
|
[
"MIT"
] | null | null | null |
def get_distance(coords1, coords2):
"""Gets the distance between 2 pairs of coordinates
Args:
coords1 ([int, int]): The first set of coordinates
coords2 ([int, int]): The second set of coordinates
"""
return abs(coords1[0] - coords2[0]) + abs(coords1[1] - coords2[1])
coordinates = []
with open("marathon.in", "r") as file:
n = int(file.readline().strip())
for line in file:
x, y = line.strip().split(" ")
coordinates.append((int(x), int(y)))
distance_pairs = {}
for i in range(n - 1):
start = coordinates[i]
finish = coordinates[i + 1]
distance = get_distance(start, finish)
distance_pairs[start, finish] = distance
distance_pairs_list = list(distance_pairs.items())
normal_max = sum([distance for _, distance in distance_pairs_list])
possible_values = distance_pairs_list[:-1]
possible_sums = []
for combo in range(len(possible_values)):
current_sum = normal_max
actual_index = combo
removed_coordinate_set = distance_pairs_list[actual_index]
modified_coordinate_set = distance_pairs_list[actual_index + 1]
current_sum -= (removed_coordinate_set[1] + modified_coordinate_set[1])
# removed_coordinate_set[0][1] is the coordinate pair that was removed, and is equal
# to modified_coordinate_set[0][0]. We need to add the distance between the new
# coordinate pairs.
current_sum += get_distance(removed_coordinate_set[0][0], modified_coordinate_set[0][1])
possible_sums.append(current_sum)
with open("marathon.out", "w") as file:
file.write(str(min(possible_sums)) + "\n")
| 36.25
| 92
| 0.697806
|
6fe9cd52136ed350e075521cd78e2588453ce6b9
| 603
|
py
|
Python
|
buildgrid/_version.py
|
antmicro/buildgrid
|
eccb35e3670e52957e189616fbf73dc82e8f83d4
|
[
"Apache-2.0"
] | null | null | null |
buildgrid/_version.py
|
antmicro/buildgrid
|
eccb35e3670e52957e189616fbf73dc82e8f83d4
|
[
"Apache-2.0"
] | null | null | null |
buildgrid/_version.py
|
antmicro/buildgrid
|
eccb35e3670e52957e189616fbf73dc82e8f83d4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2018 Bloomberg LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# <http://www.apache.org/licenses/LICENSE-2.0>
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = '0.0.2'
| 35.470588
| 74
| 0.754561
|
a04542158e4f8e06c8341e8fc4d8ac71a5730cdb
| 1,806
|
py
|
Python
|
src/streamlink/plugins/nrk.py
|
sn4kebite/streamlink
|
054b760ce7e9f43451eed08e9f39de440c3e5add
|
[
"BSD-2-Clause"
] | 5
|
2017-03-21T19:43:17.000Z
|
2018-10-03T14:04:29.000Z
|
src/streamlink/plugins/nrk.py
|
sn4kebite/streamlink
|
054b760ce7e9f43451eed08e9f39de440c3e5add
|
[
"BSD-2-Clause"
] | 7
|
2016-10-13T23:29:31.000Z
|
2018-06-28T14:04:32.000Z
|
src/streamlink/plugins/nrk.py
|
sn4kebite/streamlink
|
054b760ce7e9f43451eed08e9f39de440c3e5add
|
[
"BSD-2-Clause"
] | 2
|
2016-11-24T18:37:33.000Z
|
2017-03-21T19:43:49.000Z
|
import re
from urllib.parse import urljoin
from streamlink.plugin import Plugin
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream
COOKIE_PARAMS = (
"devicetype=desktop&"
"preferred-player-odm=hlslink&"
"preferred-player-live=hlslink"
)
_id_re = re.compile(r"/(?:program|direkte|serie/[^/]+)/([^/]+)")
_url_re = re.compile(r"https?://(tv|radio).nrk.no/")
_api_baseurl_re = re.compile(r'''apiBaseUrl:\s*["'](?P<baseurl>[^"']+)["']''')
_schema = validate.Schema(
validate.transform(_api_baseurl_re.search),
validate.any(
None,
validate.all(
validate.get("baseurl"),
validate.url(
scheme="http"
)
)
)
)
_mediaelement_schema = validate.Schema({
"mediaUrl": validate.url(
scheme="http",
path=validate.endswith(".m3u8")
)
})
class NRK(Plugin):
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
def _get_streams(self):
# Get the stream type from the url (tv/radio).
stream_type = _url_re.match(self.url).group(1).upper()
cookie = {
"NRK_PLAYER_SETTINGS_{0}".format(stream_type): COOKIE_PARAMS
}
# Construct API URL for this program.
baseurl = self.session.http.get(self.url, cookies=cookie, schema=_schema)
program_id = _id_re.search(self.url).group(1)
# Extract media URL.
json_url = urljoin(baseurl, "mediaelement/{0}".format(program_id))
res = self.session.http.get(json_url, cookies=cookie)
media_element = self.session.http.json(res, schema=_mediaelement_schema)
media_url = media_element["mediaUrl"]
return HLSStream.parse_variant_playlist(self.session, media_url)
__plugin__ = NRK
| 27.784615
| 81
| 0.640089
|
7a5cd2d678246bcc019c0c62e193f6ab86fecda4
| 4,445
|
py
|
Python
|
src/diamond/utils/config.py
|
harrisonfeng/Diamond
|
f2bece462577a7c557be8a9f90f6b9340c3db571
|
[
"MIT"
] | 2
|
2016-10-24T02:51:32.000Z
|
2021-01-09T20:49:44.000Z
|
src/diamond/utils/config.py
|
harrisonfeng/Diamond
|
f2bece462577a7c557be8a9f90f6b9340c3db571
|
[
"MIT"
] | 62
|
2016-09-30T14:04:52.000Z
|
2021-04-22T21:22:28.000Z
|
src/diamond/utils/config.py
|
harrisonfeng/Diamond
|
f2bece462577a7c557be8a9f90f6b9340c3db571
|
[
"MIT"
] | 4
|
2017-01-24T14:44:56.000Z
|
2021-03-03T17:14:19.000Z
|
# coding=utf-8
import configobj
import os
def str_to_bool(value):
"""
Converts string truthy/falsey strings to a bool
Empty strings are false
"""
if isinstance(value, basestring):
value = value.strip().lower()
if value in ['true', 't', 'yes', 'y']:
return True
elif value in ['false', 'f', 'no', 'n', '']:
return False
else:
raise NotImplementedError("Unknown bool %s" % value)
return value
def load_config(configfile):
"""
Load the full config / merge splitted configs if configured
"""
configfile = os.path.abspath(configfile)
config = configobj.ConfigObj(configfile)
config_extension = '.conf'
#########################################################################
# Load up other config files
#########################################################################
if 'configs' in config:
config_extension = config['configs'].get('extension', config_extension)
# Load other configs
if 'path' in config['configs']:
for cfgfile in os.listdir(config['configs']['path']):
cfgfile = os.path.join(config['configs']['path'],
cfgfile)
cfgfile = os.path.abspath(cfgfile)
if not cfgfile.endswith(config_extension):
continue
newconfig = configobj.ConfigObj(cfgfile)
config.merge(newconfig)
#########################################################################
if 'server' not in config:
raise Exception('Failed to load config file %s!' % configfile)
#########################################################################
# Load up handler specific configs
#########################################################################
if 'handlers' not in config:
config['handlers'] = configobj.ConfigObj()
if 'handlers_config_path' in config['server']:
handlers_config_path = config['server']['handlers_config_path']
if os.path.exists(handlers_config_path):
for cfgfile in os.listdir(handlers_config_path):
cfgfile = os.path.join(handlers_config_path, cfgfile)
cfgfile = os.path.abspath(cfgfile)
if not cfgfile.endswith(config_extension):
continue
filename = os.path.basename(cfgfile)
handler = os.path.splitext(filename)[0]
if handler not in config['handlers']:
config['handlers'][handler] = configobj.ConfigObj()
newconfig = configobj.ConfigObj(cfgfile)
config['handlers'][handler].merge(newconfig)
#########################################################################
# Load up Collector specific configs
#########################################################################
if 'collectors' not in config:
config['collectors'] = configobj.ConfigObj()
if 'collectors_config_path' in config['server']:
collectors_config_path = config['server']['collectors_config_path']
if os.path.exists(collectors_config_path):
for cfgfile in os.listdir(collectors_config_path):
cfgfile = os.path.join(collectors_config_path, cfgfile)
cfgfile = os.path.abspath(cfgfile)
if not cfgfile.endswith(config_extension):
continue
filename = os.path.basename(cfgfile)
collector = os.path.splitext(filename)[0]
if collector not in config['collectors']:
config['collectors'][collector] = configobj.ConfigObj()
try:
newconfig = configobj.ConfigObj(cfgfile)
except Exception, e:
raise Exception("Failed to load config file %s due to %s" %
(cfgfile, e))
config['collectors'][collector].merge(newconfig)
# Convert enabled to a bool
for collector in config['collectors']:
if 'enabled' in config['collectors'][collector]:
config['collectors'][collector]['enabled'] = str_to_bool(
config['collectors'][collector]['enabled']
)
#########################################################################
return config
| 37.041667
| 79
| 0.499663
|
af73504fdc25539fc41a7e67f4da3bd72f8a5cc1
| 11,645
|
py
|
Python
|
tools/tf_test_with_log_mgpu.py
|
luzai/InsightFace_Pytorch
|
2f3d865aa5fa14896df27fe9b43a5c4ceb02c7dd
|
[
"MIT"
] | 4
|
2019-01-24T03:43:36.000Z
|
2020-10-24T08:36:28.000Z
|
tools/tf_test_with_log_mgpu.py
|
luzai/InsightFace_Pytorch
|
2f3d865aa5fa14896df27fe9b43a5c4ceb02c7dd
|
[
"MIT"
] | null | null | null |
tools/tf_test_with_log_mgpu.py
|
luzai/InsightFace_Pytorch
|
2f3d865aa5fa14896df27fe9b43a5c4ceb02c7dd
|
[
"MIT"
] | null | null | null |
import os, sys
from ctypes import *
import numpy as np
import re
import functools
from scipy import spatial
import re
import multiprocessing as mp
import sklearn.preprocessing
try:
import cPickle as pickle
except:
import pickle
import argparse
import tensorflow as tf
import pdb
def generate_test_pair(jk_list, zj_list):
file_paths = [jk_list, zj_list]
jk_dict = {}
zj_dict = {}
jk_zj_dict_list = [jk_dict, zj_dict]
for path, x_dict in zip(file_paths, jk_zj_dict_list):
with open(path,'r') as fr:
for line in fr:
label = line.strip().split(' ')[1]
tmp = x_dict.get(label,[])
tmp.append(line.strip())
x_dict[label] = tmp
jk2zj_pairs = []
zj2jk_pairs = []
for key in jk_dict:
jk_file_list = jk_dict[key]
zj_file_list = zj_dict[key]
# for jk_file in jk_file_list:
# for zj_file in zj_file_list:
# jk2zj_pairs.append([jk_file, zj_file])
# zj2jk_pairs.append([zj_file, jk_file])
for zj_file in zj_file_list:
jk_list_tmp = []
for jk_file in jk_file_list:
jk_list_tmp.append(jk_file)
jk2zj_pairs.append([jk_file, [zj_file]])
zj2jk_pairs.append([zj_file, jk_list_tmp])
return jk2zj_pairs, zj2jk_pairs
def get_test_set_dict(test_cache_root, jk_list, zj_list):
ret_dict = {}
with open(zj_list) as label_fr:
with open(os.path.join(test_cache_root,'zj_list.bin'),'rb') as feature_fr:
for line in label_fr:
key = line.strip()
feature = np.frombuffer(feature_fr.read(feature_len*4), dtype=np.float32)
ret_dict[key] = feature
with open(jk_list) as label_fr:
with open(os.path.join(test_cache_root,'jk_all_list.bin'),'rb') as feature_fr:
for line in label_fr:
key = line.strip()
feature = np.frombuffer(feature_fr.read(feature_len*4), dtype=np.float32)
ret_dict[key] = feature
return ret_dict
def tf_build_graph(sess):
ret_list = []
dis_features_list = np.array_split(dis_features, gpu_used)
idx_start = [0]+[f.shape[0] for f in dis_features_list]
idx_start = [sum(idx_start[:i]) for i in range(1, len(idx_start))]
feed_dict = {}
for device_id in range(gpu_used):
with tf.device('/gpu:%s' % device_id):
dis_feature = tf.placeholder(tf.float32, shape=dis_features_list[device_id].shape)
disv_feature = tf.Variable(dis_feature)
feed_dict[dis_feature] = dis_features_list[device_id]
query_feature = tf.placeholder(tf.float32, shape=(None, feature_len))
similarity = tf.matmul(query_feature, tf.transpose(disv_feature))
similarity = tf.squeeze(similarity)
print((similarity.get_shape()))
query_results = tf.nn.top_k(similarity, k=100)
ret_list.append((query_results, query_feature))
sess.run(tf.global_variables_initializer(), feed_dict=feed_dict)
return ret_list, idx_start
def tf_build_graph_merge_topk(gpu_nums):
# return topk, ids of the merge result
sim_feed_keys, ids_feed_keys = [],[]
sim_tensor_list, id_tensor_list = [], []
with tf.device('/cpu'):
for i in range(gpu_nums):
v = tf.placeholder(tf.float32)
sim_feed_keys.append(v)
sim_tensor_list.append(v)
v = tf.placeholder(tf.int32)
ids_feed_keys.append(v)
v = v+idx_start[i]
id_tensor_list.append(v)
axis = -1
total = tf.concat(sim_tensor_list, axis=axis)
total_ids = tf.concat(id_tensor_list, axis=axis)
topk = tf.nn.top_k(total, k=100)
return topk, total_ids, sim_feed_keys, ids_feed_keys
def tf_query(sess, search_feature, ret_pairs):
'''
:param sess:
:param search_feature:
:param ret_pairs:
:param gpu_used:
:param merge_topk_items: tuple (topk result, total_ids result, sim_feed_keys, id_feed_keys)
:return:
'''
ids_list, similarities_list = [], []
for device_id in range(gpu_used):
with tf.device('/gpu:%s' % device_id):
query_results, query_feature = ret_pairs[device_id]
if len(search_feature.shape) < 2:
search_feature = search_feature[np.newaxis, :]
similarities, ids = sess.run(query_results, feed_dict={query_feature: search_feature})
ids_list.append(ids)
similarities_list.append(similarities)
with tf.device('/gpu:0'):
topk, total_ids, sim_feed_keys, id_feed_keys = topk_items
assert len(ids_list) == len(id_feed_keys)
assert len(similarities_list) == len(id_feed_keys)
feed_dict = {}
for ids, sims, id_feed_key, sim_feed_key in zip(ids_list, similarities_list, id_feed_keys, sim_feed_keys):
feed_dict[id_feed_key] = ids
feed_dict[sim_feed_key] = sims
topk_result, total_ids_result = sess.run([topk, total_ids], feed_dict=feed_dict)
similarities, ids = topk_result
# pdb.set_trace()
# TODO(HZF) ugly codes
if len(total_ids_result.shape) > 1:
ids_ret = []
for t_ids, id_idx in zip(total_ids_result, ids):
ids_ret.append(t_ids[id_idx])
ids = np.array(ids_ret, dtype=np.int32)
else:
ids = total_ids_result[ids]
return ids, similarities
def result_generator(pairs, test_set_dict, sess, query_results, label_list, q):
def write_result(ids, similarities, pair):
ret_str = ''
ret_str += 'GroupId=%s;'%pair[0].split(' ')[1]
ret_str += 'eType=%d;Src=%s,%s'%(idx, pair[0].split(' ')[0], pair[1].split(' ')[0])
if pair[0] not in test_set_dict or pair[1] not in test_set_dict:
ret_str+=',0;'
for i in [1,5,10,50,100]:
ret_str += 'top%d=0,'%i
q.append(ret_str)
return
search_feature = test_set_dict[pair[0]]
# print( search_feature.shape)
searched_feature = test_set_dict[pair[1]]
# print( searched_feature.shape)
similarity = np.dot(search_feature, searched_feature)
ret_str += ',%.3g;'%(similarity)
flag = False
for i in [1,5,10,50,100]:
tk = 0
if flag or similarity > similarities[i-1]:
flag = True
tk = 1
ret_str += 'top%d=%d,'%(i, tk)
if label_list is not None:
ret_str = ret_str[:-1]+';'+'dis_top='
for i in [1,2,3]:
# try:
ret_str += '%.3g,%s,'%(similarities[i-1], label_list[ids[i-1]].split(' ')[0])
# except Exception as e:
# from IPython import embed; embed()
q.append(ret_str)
idx = 0
count = 0
#zj2jk
for opair in pairs:
search_feature = test_set_dict[opair[0]]
ids, similarities = tf_query(sess, search_feature, query_results)
for item in opair[1]:
pair = [opair[0], item]
write_result(ids, similarities, pair)
count += 1
if count % 1000 == 0:
print( 'process: %d'%count, len(opair[1]))
idx = 2
count = 0
#jk2zj
for opair in pairs:
search_features = []
for item in opair[1]:
search_features.append(test_set_dict[item])
search_features = np.array(search_features)
# print( search_features.shape)
# pdb.set_trace()
idss, similaritiess = tf_query(sess, search_features, query_results)
# print( idss.shape, similaritiess.shape)
assert len(idss) == len(opair[1]) and len(idss)==len(similaritiess)
for ids, similarities, item in zip(idss, similaritiess, opair[1]):
pair = [item, opair[0]]
write_result(ids, similarities, pair)
count += 1
if count % 1000 == 0:
print( 'process: %d'%count, len(idss))
def comsumer(q, fw):
for result in q:
fw.write('%s\n'%result[:-1])
def get_final_result(test_cache_root, jk_list, zj_list, dist_list_path, result_file_path):
jk2zj_pairs, zj2jk_pairs = generate_test_pair(jk_list, zj_list)
dist_list = []
with open(dist_list_path,'r') as fr:
for line in fr:
dist_list.append(line.strip().split(' ')[0])
test_set_dict = get_test_set_dict(test_cache_root, jk_list, zj_list)
result_pattern = 'GroupId=%s;eType=%d;Src=%s,%s;top%d=%d,top%d=%d,top%d=%d,top%d=%d,top%d=%d'
fw = open(result_file_path,'w')
q = []
# from IPython import embed; embed()
result_generator(zj2jk_pairs, test_set_dict, sess, query_results_pairs, dist_list, q)
print( len(q))
comsumer(q,fw)
fw.close()
feature_len = 512
query_process_num = len(os.environ['CUDA_VISIBLE_DEVICES'].strip(',').split(','))
gpu_used = query_process_num
# os.environ['CUDA_VISIBLE_DEVICES'] = '3,4'
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('model_name', type=str, help='model names')
parser.add_argument('--prefix', type=str, )
return parser.parse_args()
args = parse_arguments()
# This names of the files and tests.
media = 'MediaCapturedImagesStd_02_en_sim'
hy = 'hy'
facereg = 'facereg1N_Select30_sim'
total_test = 'testsets_mtcnn_align112x112'
dist_list = 'dis_list.txt'
small_pix = '0509_Small_Pix_Select_clear'
big_angle = 'Big_Angle_all'
media_shade = 'MediaCapturedImagesStd_02_shade'
zj = '/zj_list.txt'
jk = '/jk_all_list.txt'
model_name = args.model_name
if 'pca' in model_name or '256' in model_name or 'beta' in model_name:
feature_len=256
assert feature_len==512
# Paths to the images lists, distractors dumps, images from lists dumps.
lists = args.prefix +'/lists/'
PREFIX= '/home/zl/prj/'
dist =PREFIX +'/output/'+model_name+'/dis/'
zjjk =PREFIX +'/output/'+model_name+'/'
dis_feature_len = 0
for im_name in os.listdir(dist):
if 'bin' in im_name:
st = os.stat(os.path.join(dist, im_name))
dis_feature_len += st.st_size/feature_len/4
dis_feature_len=int(dis_feature_len)
dis_features = np.zeros((dis_feature_len, feature_len))
idx = 0
print( 'loading dis set: %d'%dis_feature_len)
for im_name in os.listdir(dist):
if 'bin' in im_name:
with open(os.path.join(dist, im_name),'rb') as fr:
while True:
feature = fr.read(feature_len*4)
if len(feature) == 0:
break
feature = np.frombuffer(feature, dtype=np.float32)
dis_features[idx,:] = feature
idx+=1
assert idx == dis_feature_len
print( 'total dis feature length:%d'%idx)
# from IPython import embed; embed()
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
query_results_pairs, idx_start = tf_build_graph(sess)
topk_items = tf_build_graph_merge_topk(gpu_used)
#get_final_result(distractor_index, zjjk+total_test, test_lists+jk, test_lists+zj, dist+dist_list, zjjk+total_test+'.log')
get_final_result(zjjk+facereg, lists+facereg+jk, lists+facereg+zj, dist+dist_list, zjjk+facereg+'.log')
get_final_result(zjjk+media, lists+media+jk, lists+media+zj, dist+dist_list, zjjk+media+'.log')
#split test @deprecated
#get_final_result(distractor_index, zjjk+hy, lists+hy+jk, lists+hy+zj, lists+dist_list, zjjk+hy+'.log')
| 37.323718
| 122
| 0.623186
|
f751216398cfa36014558e25485b351239c660c9
| 3,911
|
py
|
Python
|
witio/splitwise.py
|
dizzy54/munimji
|
2d6616b4924e12d465ec352119a1621c925adc81
|
[
"MIT"
] | null | null | null |
witio/splitwise.py
|
dizzy54/munimji
|
2d6616b4924e12d465ec352119a1621c925adc81
|
[
"MIT"
] | null | null | null |
witio/splitwise.py
|
dizzy54/munimji
|
2d6616b4924e12d465ec352119a1621c925adc81
|
[
"MIT"
] | null | null | null |
from requests_oauthlib import OAuth1Session
from django.conf import settings
client_key = settings.SPLITWISE_CLIENT_KEY
client_secret = settings.SPLITWISE_CLIENT_SECRET
def get_request_token():
""" obtain generic resource owner key and secret from splitwise
"""
request_token_url = 'https://secure.splitwise.com/api/v3.0/get_request_token'
oauth = OAuth1Session(client_key, client_secret=client_secret)
fetch_response = oauth.fetch_request_token(request_token_url)
resource_owner_key = fetch_response.get('oauth_token')
resource_owner_secret = fetch_response.get('oauth_token_secret')
return oauth, resource_owner_key, resource_owner_secret
def get_splitwise_response(access_token, access_token_secret, protected_uri, *args, **kwargs):
oauth = OAuth1Session(
client_key,
client_secret=client_secret,
resource_owner_key=access_token,
resource_owner_secret=access_token_secret
)
response = oauth.get(protected_uri)
return response
def post_splitwise_request(access_token, access_token_secret, protected_uri, params_dict, *args, **kwargs):
oauth = OAuth1Session(
client_key,
client_secret=client_secret,
resource_owner_key=access_token,
resource_owner_secret=access_token_secret
)
response = oauth.post(protected_uri, data=params_dict)
return response
def get_user_by_auth(access_token, access_token_secret):
""" returns splitwise response json for user with input ouath access token
"""
protected_uri = 'https://secure.splitwise.com/api/v3.0/get_current_user'
response = get_splitwise_response(access_token, access_token_secret, protected_uri)
return response.json()
def create_equal_expense(access_token, access_token_secret, participant_list, total_amount, description):
"""
"""
protected_uri = 'https://secure.splitwise.com/api/v3.0/create_expense'
n_payers = 0
n_payees = 0
for participant in participant_list:
if participant['payer']:
n_payers += 1
if participant['payee']:
n_payees += 1
amount_paid = total_amount / n_payers
amount_split = total_amount / n_payees
params_dict = {
'payment': False,
'cost': total_amount,
'description': description,
}
'''
participant_list = []
for payer in payers:
participant_list.append({'participant': payer, 'payer': True, 'payee': False})
for payee in payees:
if payee in payers:
for participant in participant_list:
if participant['participant'] == payee:
participant['payee'] = True
else:
participant_list.append({'participant': payee, 'payer': False, 'payee': True})
'''
i = 0
for entry in participant_list:
participant = entry['participant']
payer_dict = {
'user_id': participant['id'],
'email': participant['email'],
'paid_share': amount_paid if entry['payer'] else 0,
'owed_share': amount_split if entry['payee'] else 0,
}
for param in ('user_id', 'email', 'paid_share', 'owed_share'):
k = 'users__%d__%s' % (i, param)
params_dict[k] = payer_dict[param]
i += 1
response = post_splitwise_request(access_token, access_token_secret, protected_uri, params_dict)
return response.json()
def get_expenses(access_token, access_token_secret):
protected_uri = 'https://secure.splitwise.com/api/v3.0/get_expenses'
response = get_splitwise_response(access_token, access_token_secret, protected_uri)
return response.json()
def get_friends(access_token, access_token_secret):
protected_uri = 'https://secure.splitwise.com/api/v3.0/get_friends'
response = get_splitwise_response(access_token, access_token_secret, protected_uri)
return response.json()
| 35.554545
| 107
| 0.695474
|
e924fdbbbde54b5197d9da525b7e0da57f4c6840
| 644
|
py
|
Python
|
experiments/rfcn/rfcn_train_test.py
|
fourmi1995/IronExperiment-DCN
|
5292539764588e0168016c7e7b4df038358e9f38
|
[
"MIT"
] | 2
|
2020-11-10T07:37:09.000Z
|
2021-02-09T06:26:25.000Z
|
experiments/rfcn/rfcn_train_test.py
|
fourmi1995/IronExperiment-DCN
|
5292539764588e0168016c7e7b4df038358e9f38
|
[
"MIT"
] | null | null | null |
experiments/rfcn/rfcn_train_test.py
|
fourmi1995/IronExperiment-DCN
|
5292539764588e0168016c7e7b4df038358e9f38
|
[
"MIT"
] | 1
|
2019-08-07T02:35:16.000Z
|
2019-08-07T02:35:16.000Z
|
# --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2017 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Yuwen Xiong
# --------------------------------------------------------
import os
import sys
os.environ['PYTHONUNBUFFERED'] = '1'
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '0'
os.environ['MXNET_ENABLE_GPU_P2P'] = '0'
this_dir = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(this_dir, '..', '..', 'rfcn'))
import train_rfcn
import test
if __name__ == "__main__":
train_rfcn.main()
test.main()
| 24.769231
| 63
| 0.560559
|
179332543970010e9cab0e87f0de91dbf9f6ebf4
| 242
|
py
|
Python
|
electrum_gui/common/explorer/data/exceptions.py
|
Umiiii/electrum
|
9e822640680c20c69a69695c033e97605aafcdce
|
[
"MIT"
] | null | null | null |
electrum_gui/common/explorer/data/exceptions.py
|
Umiiii/electrum
|
9e822640680c20c69a69695c033e97605aafcdce
|
[
"MIT"
] | null | null | null |
electrum_gui/common/explorer/data/exceptions.py
|
Umiiii/electrum
|
9e822640680c20c69a69695c033e97605aafcdce
|
[
"MIT"
] | null | null | null |
class ExplorerException(Exception):
pass
class TransactionNotFound(ExplorerException):
def __init__(self, txid: str):
super(TransactionNotFound, self).__init__(f"Transaction {repr(txid)} not found")
self.txid = txid
| 26.888889
| 88
| 0.719008
|
83dacac362385dd24cb9647ad16f70134abd86bb
| 28,296
|
py
|
Python
|
haystack/backends/solr_backend.py
|
kezabelle/django-haystack
|
58ca4cdef58e0158a26275590fc6837fbd55096b
|
[
"BSD-3-Clause"
] | 1
|
2017-10-12T14:25:06.000Z
|
2017-10-12T14:25:06.000Z
|
filenv/lib/python2.7/site-packages/haystack/backends/solr_backend.py
|
betoesquivel/fil2014
|
4c2e9188769096391bb206b76ed1ab8bd2ff66a1
|
[
"MIT"
] | null | null | null |
filenv/lib/python2.7/site-packages/haystack/backends/solr_backend.py
|
betoesquivel/fil2014
|
4c2e9188769096391bb206b76ed1ab8bd2ff66a1
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import warnings
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models.loading import get_model
from django.utils import six
from haystack.backends import BaseEngine, BaseSearchBackend, BaseSearchQuery, log_query, EmptyResults
from haystack.constants import ID, DJANGO_CT, DJANGO_ID
from haystack.exceptions import MissingDependency, MoreLikeThisError
from haystack.inputs import PythonData, Clean, Exact, Raw
from haystack.models import SearchResult
from haystack.utils import get_identifier
from haystack.utils import log as logging
try:
from pysolr import Solr, SolrError
except ImportError:
raise MissingDependency("The 'solr' backend requires the installation of 'pysolr'. Please refer to the documentation.")
class SolrSearchBackend(BaseSearchBackend):
# Word reserved by Solr for special use.
RESERVED_WORDS = (
'AND',
'NOT',
'OR',
'TO',
)
# Characters reserved by Solr for special use.
# The '\\' must come first, so as not to overwrite the other slash replacements.
RESERVED_CHARACTERS = (
'\\', '+', '-', '&&', '||', '!', '(', ')', '{', '}',
'[', ']', '^', '"', '~', '*', '?', ':', '/',
)
def __init__(self, connection_alias, **connection_options):
super(SolrSearchBackend, self).__init__(connection_alias, **connection_options)
if not 'URL' in connection_options:
raise ImproperlyConfigured("You must specify a 'URL' in your settings for connection '%s'." % connection_alias)
self.conn = Solr(connection_options['URL'], timeout=self.timeout)
self.log = logging.getLogger('haystack')
def update(self, index, iterable, commit=True):
docs = []
for obj in iterable:
try:
docs.append(index.full_prepare(obj))
except UnicodeDecodeError:
if not self.silently_fail:
raise
# We'll log the object identifier but won't include the actual object
# to avoid the possibility of that generating encoding errors while
# processing the log message:
self.log.error(u"UnicodeDecodeError while preparing object for update", exc_info=True, extra={
"data": {
"index": index,
"object": get_identifier(obj)
}
})
if len(docs) > 0:
try:
self.conn.add(docs, commit=commit, boost=index.get_field_weights())
except (IOError, SolrError) as e:
if not self.silently_fail:
raise
self.log.error("Failed to add documents to Solr: %s", e)
def remove(self, obj_or_string, commit=True):
solr_id = get_identifier(obj_or_string)
try:
kwargs = {
'commit': commit,
ID: solr_id
}
self.conn.delete(**kwargs)
except (IOError, SolrError) as e:
if not self.silently_fail:
raise
self.log.error("Failed to remove document '%s' from Solr: %s", solr_id, e)
def clear(self, models=[], commit=True):
try:
if not models:
# *:* matches all docs in Solr
self.conn.delete(q='*:*', commit=commit)
else:
models_to_delete = []
for model in models:
models_to_delete.append("%s:%s.%s" % (DJANGO_CT, model._meta.app_label, model._meta.module_name))
self.conn.delete(q=" OR ".join(models_to_delete), commit=commit)
# Run an optimize post-clear. http://wiki.apache.org/solr/FAQ#head-9aafb5d8dff5308e8ea4fcf4b71f19f029c4bb99
self.conn.optimize()
except (IOError, SolrError) as e:
if not self.silently_fail:
raise
if len(models):
self.log.error("Failed to clear Solr index of models '%s': %s", ','.join(models_to_delete), e)
else:
self.log.error("Failed to clear Solr index: %s", e)
@log_query
def search(self, query_string, **kwargs):
if len(query_string) == 0:
return {
'results': [],
'hits': 0,
}
search_kwargs = self.build_search_kwargs(query_string, **kwargs)
try:
raw_results = self.conn.search(query_string, **search_kwargs)
except (IOError, SolrError) as e:
if not self.silently_fail:
raise
self.log.error("Failed to query Solr using '%s': %s", query_string, e)
raw_results = EmptyResults()
return self._process_results(raw_results, highlight=kwargs.get('highlight'), result_class=kwargs.get('result_class', SearchResult), distance_point=kwargs.get('distance_point'))
def build_search_kwargs(self, query_string, sort_by=None, start_offset=0, end_offset=None,
fields='', highlight=False, facets=None,
date_facets=None, query_facets=None,
narrow_queries=None, spelling_query=None,
within=None, dwithin=None, distance_point=None,
models=None, limit_to_registered_models=None,
result_class=None, stats=None):
kwargs = {'fl': '* score'}
if fields:
if isinstance(fields, (list, set)):
fields = " ".join(fields)
kwargs['fl'] = fields
if sort_by is not None:
if sort_by in ['distance asc', 'distance desc'] and distance_point:
# Do the geo-enabled sort.
lng, lat = distance_point['point'].get_coords()
kwargs['sfield'] = distance_point['field']
kwargs['pt'] = '%s,%s' % (lat, lng)
if sort_by == 'distance asc':
kwargs['sort'] = 'geodist() asc'
else:
kwargs['sort'] = 'geodist() desc'
else:
if sort_by.startswith('distance '):
warnings.warn("In order to sort by distance, you must call the '.distance(...)' method.")
# Regular sorting.
kwargs['sort'] = sort_by
if start_offset is not None:
kwargs['start'] = start_offset
if end_offset is not None:
kwargs['rows'] = end_offset - start_offset
if highlight is True:
kwargs['hl'] = 'true'
kwargs['hl.fragsize'] = '200'
if self.include_spelling is True:
kwargs['spellcheck'] = 'true'
kwargs['spellcheck.collate'] = 'true'
kwargs['spellcheck.count'] = 1
if spelling_query:
kwargs['spellcheck.q'] = spelling_query
if facets is not None:
kwargs['facet'] = 'on'
kwargs['facet.field'] = facets.keys()
for facet_field, options in facets.items():
for key, value in options.items():
kwargs['f.%s.facet.%s' % (facet_field, key)] = self.conn._from_python(value)
if date_facets is not None:
kwargs['facet'] = 'on'
kwargs['facet.date'] = date_facets.keys()
kwargs['facet.date.other'] = 'none'
for key, value in date_facets.items():
kwargs["f.%s.facet.date.start" % key] = self.conn._from_python(value.get('start_date'))
kwargs["f.%s.facet.date.end" % key] = self.conn._from_python(value.get('end_date'))
gap_by_string = value.get('gap_by').upper()
gap_string = "%d%s" % (value.get('gap_amount'), gap_by_string)
if value.get('gap_amount') != 1:
gap_string += "S"
kwargs["f.%s.facet.date.gap" % key] = '+%s/%s' % (gap_string, gap_by_string)
if query_facets is not None:
kwargs['facet'] = 'on'
kwargs['facet.query'] = ["%s:%s" % (field, value) for field, value in query_facets]
if limit_to_registered_models is None:
limit_to_registered_models = getattr(settings, 'HAYSTACK_LIMIT_TO_REGISTERED_MODELS', True)
if models and len(models):
model_choices = sorted(['%s.%s' % (model._meta.app_label, model._meta.module_name) for model in models])
elif limit_to_registered_models:
# Using narrow queries, limit the results to only models handled
# with the current routers.
model_choices = self.build_models_list()
else:
model_choices = []
if len(model_choices) > 0:
if narrow_queries is None:
narrow_queries = set()
narrow_queries.add('%s:(%s)' % (DJANGO_CT, ' OR '.join(model_choices)))
if narrow_queries is not None:
kwargs['fq'] = list(narrow_queries)
if stats:
kwargs['stats'] = "true"
for k in stats.keys():
kwargs['stats.field'] = k
for facet in stats[k]:
kwargs['f.%s.stats.facet' % k] = facet
if within is not None:
from haystack.utils.geo import generate_bounding_box
kwargs.setdefault('fq', [])
((min_lat, min_lng), (max_lat, max_lng)) = generate_bounding_box(within['point_1'], within['point_2'])
# Bounding boxes are min, min TO max, max. Solr's wiki was *NOT*
# very clear on this.
bbox = '%s:[%s,%s TO %s,%s]' % (within['field'], min_lat, min_lng, max_lat, max_lng)
kwargs['fq'].append(bbox)
if dwithin is not None:
kwargs.setdefault('fq', [])
lng, lat = dwithin['point'].get_coords()
geofilt = '{!geofilt pt=%s,%s sfield=%s d=%s}' % (lat, lng, dwithin['field'], dwithin['distance'].km)
kwargs['fq'].append(geofilt)
# Check to see if the backend should try to include distances
# (Solr 4.X+) in the results.
if self.distance_available and distance_point:
# In early testing, you can't just hand Solr 4.X a proper bounding box
# & request distances. To enable native distance would take calculating
# a center point & a radius off the user-provided box, which kinda
# sucks. We'll avoid it for now, since Solr 4.x's release will be some
# time yet.
# kwargs['fl'] += ' _dist_:geodist()'
pass
return kwargs
def more_like_this(self, model_instance, additional_query_string=None,
start_offset=0, end_offset=None, models=None,
limit_to_registered_models=None, result_class=None, **kwargs):
from haystack import connections
# Deferred models will have a different class ("RealClass_Deferred_fieldname")
# which won't be in our registry:
model_klass = model_instance._meta.concrete_model
index = connections[self.connection_alias].get_unified_index().get_index(model_klass)
field_name = index.get_content_field()
params = {
'fl': '*,score',
}
if start_offset is not None:
params['start'] = start_offset
if end_offset is not None:
params['rows'] = end_offset
narrow_queries = set()
if limit_to_registered_models is None:
limit_to_registered_models = getattr(settings, 'HAYSTACK_LIMIT_TO_REGISTERED_MODELS', True)
if models and len(models):
model_choices = sorted(['%s.%s' % (model._meta.app_label, model._meta.module_name) for model in models])
elif limit_to_registered_models:
# Using narrow queries, limit the results to only models handled
# with the current routers.
model_choices = self.build_models_list()
else:
model_choices = []
if len(model_choices) > 0:
if narrow_queries is None:
narrow_queries = set()
narrow_queries.add('%s:(%s)' % (DJANGO_CT, ' OR '.join(model_choices)))
if additional_query_string:
narrow_queries.add(additional_query_string)
if narrow_queries:
params['fq'] = list(narrow_queries)
query = "%s:%s" % (ID, get_identifier(model_instance))
try:
raw_results = self.conn.more_like_this(query, field_name, **params)
except (IOError, SolrError) as e:
if not self.silently_fail:
raise
self.log.error("Failed to fetch More Like This from Solr for document '%s': %s", query, e)
raw_results = EmptyResults()
return self._process_results(raw_results, result_class=result_class)
def _process_results(self, raw_results, highlight=False, result_class=None, distance_point=None):
from haystack import connections
results = []
hits = raw_results.hits
facets = {}
stats = {}
spelling_suggestion = None
if result_class is None:
result_class = SearchResult
if hasattr(raw_results,'stats'):
stats = raw_results.stats.get('stats_fields',{})
if hasattr(raw_results, 'facets'):
facets = {
'fields': raw_results.facets.get('facet_fields', {}),
'dates': raw_results.facets.get('facet_dates', {}),
'queries': raw_results.facets.get('facet_queries', {}),
}
for key in ['fields']:
for facet_field in facets[key]:
# Convert to a two-tuple, as Solr's json format returns a list of
# pairs.
facets[key][facet_field] = list(zip(facets[key][facet_field][::2], facets[key][facet_field][1::2]))
if self.include_spelling is True:
if hasattr(raw_results, 'spellcheck'):
if len(raw_results.spellcheck.get('suggestions', [])):
# For some reason, it's an array of pairs. Pull off the
# collated result from the end.
spelling_suggestion = raw_results.spellcheck.get('suggestions')[-1]
unified_index = connections[self.connection_alias].get_unified_index()
indexed_models = unified_index.get_indexed_models()
for raw_result in raw_results.docs:
app_label, model_name = raw_result[DJANGO_CT].split('.')
additional_fields = {}
model = get_model(app_label, model_name)
if model and model in indexed_models:
for key, value in raw_result.items():
index = unified_index.get_index(model)
string_key = str(key)
if string_key in index.fields and hasattr(index.fields[string_key], 'convert'):
additional_fields[string_key] = index.fields[string_key].convert(value)
else:
additional_fields[string_key] = self.conn._to_python(value)
del(additional_fields[DJANGO_CT])
del(additional_fields[DJANGO_ID])
del(additional_fields['score'])
if raw_result[ID] in getattr(raw_results, 'highlighting', {}):
additional_fields['highlighted'] = raw_results.highlighting[raw_result[ID]]
if distance_point:
additional_fields['_point_of_origin'] = distance_point
if raw_result.get('__dist__'):
from haystack.utils.geo import Distance
additional_fields['_distance'] = Distance(km=float(raw_result['__dist__']))
else:
additional_fields['_distance'] = None
result = result_class(app_label, model_name, raw_result[DJANGO_ID], raw_result['score'], **additional_fields)
results.append(result)
else:
hits -= 1
return {
'results': results,
'hits': hits,
'stats': stats,
'facets': facets,
'spelling_suggestion': spelling_suggestion,
}
def build_schema(self, fields):
content_field_name = ''
schema_fields = []
for field_name, field_class in fields.items():
field_data = {
'field_name': field_class.index_fieldname,
'type': 'text_en',
'indexed': 'true',
'stored': 'true',
'multi_valued': 'false',
}
if field_class.document is True:
content_field_name = field_class.index_fieldname
# DRL_FIXME: Perhaps move to something where, if none of these
# checks succeed, call a custom method on the form that
# returns, per-backend, the right type of storage?
if field_class.field_type in ['date', 'datetime']:
field_data['type'] = 'date'
elif field_class.field_type == 'integer':
field_data['type'] = 'long'
elif field_class.field_type == 'float':
field_data['type'] = 'float'
elif field_class.field_type == 'boolean':
field_data['type'] = 'boolean'
elif field_class.field_type == 'ngram':
field_data['type'] = 'ngram'
elif field_class.field_type == 'edge_ngram':
field_data['type'] = 'edge_ngram'
elif field_class.field_type == 'location':
field_data['type'] = 'location'
if field_class.is_multivalued:
field_data['multi_valued'] = 'true'
if field_class.stored is False:
field_data['stored'] = 'false'
# Do this last to override `text` fields.
if field_class.indexed is False:
field_data['indexed'] = 'false'
# If it's text and not being indexed, we probably don't want
# to do the normal lowercase/tokenize/stemming/etc. dance.
if field_data['type'] == 'text_en':
field_data['type'] = 'string'
# If it's a ``FacetField``, make sure we don't postprocess it.
if hasattr(field_class, 'facet_for'):
# If it's text, it ought to be a string.
if field_data['type'] == 'text_en':
field_data['type'] = 'string'
schema_fields.append(field_data)
return (content_field_name, schema_fields)
def extract_file_contents(self, file_obj):
"""Extract text and metadata from a structured file (PDF, MS Word, etc.)
Uses the Solr ExtractingRequestHandler, which is based on Apache Tika.
See the Solr wiki for details:
http://wiki.apache.org/solr/ExtractingRequestHandler
Due to the way the ExtractingRequestHandler is implemented it completely
replaces the normal Haystack indexing process with several unfortunate
restrictions: only one file per request, the extracted data is added to
the index with no ability to modify it, etc. To simplify the process and
allow for more advanced use we'll run using the extract-only mode to
return the extracted data without adding it to the index so we can then
use it within Haystack's normal templating process.
Returns None if metadata cannot be extracted; otherwise returns a
dictionary containing at least two keys:
:contents:
Extracted full-text content, if applicable
:metadata:
key:value pairs of text strings
"""
try:
return self.conn.extract(file_obj)
except Exception as e:
self.log.warning(u"Unable to extract file contents: %s", e,
exc_info=True, extra={"data": {"file": file_obj}})
return None
class SolrSearchQuery(BaseSearchQuery):
def matching_all_fragment(self):
return '*:*'
def add_spatial(self, lat, lon, sfield, distance, filter='bbox'):
"""Adds spatial query parameters to search query"""
kwargs = {
'lat': lat,
'long': long,
'sfield': sfield,
'distance': distance,
}
self.spatial_query.update(kwargs)
def add_order_by_distance(self, lat, long, sfield):
"""Orders the search result by distance from point."""
kwargs = {
'lat': lat,
'long': long,
'sfield': sfield,
}
self.order_by_distance.update(kwargs)
def build_query_fragment(self, field, filter_type, value):
from haystack import connections
query_frag = ''
if not hasattr(value, 'input_type_name'):
# Handle when we've got a ``ValuesListQuerySet``...
if hasattr(value, 'values_list'):
value = list(value)
if isinstance(value, six.string_types):
# It's not an ``InputType``. Assume ``Clean``.
value = Clean(value)
else:
value = PythonData(value)
# Prepare the query using the InputType.
prepared_value = value.prepare(self)
if not isinstance(prepared_value, (set, list, tuple)):
# Then convert whatever we get back to what pysolr wants if needed.
prepared_value = self.backend.conn._from_python(prepared_value)
# 'content' is a special reserved word, much like 'pk' in
# Django's ORM layer. It indicates 'no special field'.
if field == 'content':
index_fieldname = ''
else:
index_fieldname = u'%s:' % connections[self._using].get_unified_index().get_index_fieldname(field)
filter_types = {
'contains': u'%s',
'startswith': u'%s*',
'exact': u'%s',
'gt': u'{%s TO *}',
'gte': u'[%s TO *]',
'lt': u'{* TO %s}',
'lte': u'[* TO %s]',
}
if value.post_process is False:
query_frag = prepared_value
else:
if filter_type in ['contains', 'startswith']:
if value.input_type_name == 'exact':
query_frag = prepared_value
else:
# Iterate over terms & incorportate the converted form of each into the query.
terms = []
for possible_value in prepared_value.split(' '):
terms.append(filter_types[filter_type] % self.backend.conn._from_python(possible_value))
if len(terms) == 1:
query_frag = terms[0]
else:
query_frag = u"(%s)" % " AND ".join(terms)
elif filter_type == 'in':
in_options = []
for possible_value in prepared_value:
in_options.append(u'"%s"' % self.backend.conn._from_python(possible_value))
query_frag = u"(%s)" % " OR ".join(in_options)
elif filter_type == 'range':
start = self.backend.conn._from_python(prepared_value[0])
end = self.backend.conn._from_python(prepared_value[1])
query_frag = u'["%s" TO "%s"]' % (start, end)
elif filter_type == 'exact':
if value.input_type_name == 'exact':
query_frag = prepared_value
else:
prepared_value = Exact(prepared_value).prepare(self)
query_frag = filter_types[filter_type] % prepared_value
else:
if value.input_type_name != 'exact':
prepared_value = Exact(prepared_value).prepare(self)
query_frag = filter_types[filter_type] % prepared_value
if len(query_frag) and not isinstance(value, Raw):
if not query_frag.startswith('(') and not query_frag.endswith(')'):
query_frag = "(%s)" % query_frag
return u"%s%s" % (index_fieldname, query_frag)
def build_alt_parser_query(self, parser_name, query_string='', **kwargs):
if query_string:
query_string = Clean(query_string).prepare(self)
kwarg_bits = []
for key in sorted(kwargs.keys()):
if isinstance(kwargs[key], six.string_types) and ' ' in kwargs[key]:
kwarg_bits.append(u"%s='%s'" % (key, kwargs[key]))
else:
kwarg_bits.append(u"%s=%s" % (key, kwargs[key]))
return u'_query_:"{!%s %s}%s"' % (parser_name, Clean(' '.join(kwarg_bits)), query_string)
def build_params(self, spelling_query=None, **kwargs):
search_kwargs = {
'start_offset': self.start_offset,
'result_class': self.result_class
}
order_by_list = None
if self.order_by:
if order_by_list is None:
order_by_list = []
for order_by in self.order_by:
if order_by.startswith('-'):
order_by_list.append('%s desc' % order_by[1:])
else:
order_by_list.append('%s asc' % order_by)
search_kwargs['sort_by'] = ", ".join(order_by_list)
if self.date_facets:
search_kwargs['date_facets'] = self.date_facets
if self.distance_point:
search_kwargs['distance_point'] = self.distance_point
if self.dwithin:
search_kwargs['dwithin'] = self.dwithin
if self.end_offset is not None:
search_kwargs['end_offset'] = self.end_offset
if self.facets:
search_kwargs['facets'] = self.facets
if self.fields:
search_kwargs['fields'] = self.fields
if self.highlight:
search_kwargs['highlight'] = self.highlight
if self.models:
search_kwargs['models'] = self.models
if self.narrow_queries:
search_kwargs['narrow_queries'] = self.narrow_queries
if self.query_facets:
search_kwargs['query_facets'] = self.query_facets
if self.within:
search_kwargs['within'] = self.within
if spelling_query:
search_kwargs['spelling_query'] = spelling_query
if self.stats:
search_kwargs['stats'] = self.stats
return search_kwargs
def run(self, spelling_query=None, **kwargs):
"""Builds and executes the query. Returns a list of search results."""
final_query = self.build_query()
search_kwargs = self.build_params(spelling_query, **kwargs)
results = self.backend.search(final_query, **search_kwargs)
self._results = results.get('results', [])
self._hit_count = results.get('hits', 0)
self._facet_counts = self.post_process_facets(results)
self._stats = results.get('stats',{})
self._spelling_suggestion = results.get('spelling_suggestion', None)
def run_mlt(self, **kwargs):
"""Builds and executes the query. Returns a list of search results."""
if self._more_like_this is False or self._mlt_instance is None:
raise MoreLikeThisError("No instance was provided to determine 'More Like This' results.")
additional_query_string = self.build_query()
search_kwargs = {
'start_offset': self.start_offset,
'result_class': self.result_class,
'models': self.models
}
if self.end_offset is not None:
search_kwargs['end_offset'] = self.end_offset - self.start_offset
results = self.backend.more_like_this(self._mlt_instance, additional_query_string, **search_kwargs)
self._results = results.get('results', [])
self._hit_count = results.get('hits', 0)
class SolrEngine(BaseEngine):
backend = SolrSearchBackend
query = SolrSearchQuery
| 39.136929
| 184
| 0.569763
|
996df28ac810dc93e3bec17dff9fa94b80a2641e
| 9,074
|
py
|
Python
|
tests/integration/test_storage_hdfs/test.py
|
Rahul171201/ClickHouse
|
b0600c3751371f0667b15494731f2fbb073605e6
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_storage_hdfs/test.py
|
Rahul171201/ClickHouse
|
b0600c3751371f0667b15494731f2fbb073605e6
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_storage_hdfs/test.py
|
Rahul171201/ClickHouse
|
b0600c3751371f0667b15494731f2fbb073605e6
|
[
"Apache-2.0"
] | null | null | null |
import os
import pytest
from helpers.cluster import ClickHouseCluster
from helpers.hdfs_api import HDFSApi
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', with_hdfs=True, user_configs=[], main_configs=['configs/log_conf.xml'])
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
except Exception as ex:
print(ex)
raise ex
finally:
cluster.shutdown()
def test_read_write_storage(started_cluster):
hdfs_api = HDFSApi("root")
node1.query(
"create table SimpleHDFSStorage (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/simple_storage', 'TSV')")
node1.query("insert into SimpleHDFSStorage values (1, 'Mark', 72.53)")
assert hdfs_api.read_data("/simple_storage") == "1\tMark\t72.53\n"
assert node1.query("select * from SimpleHDFSStorage") == "1\tMark\t72.53\n"
def test_read_write_storage_with_globs(started_cluster):
hdfs_api = HDFSApi("root")
node1.query(
"create table HDFSStorageWithRange (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/storage{1..5}', 'TSV')")
node1.query(
"create table HDFSStorageWithEnum (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/storage{1,2,3,4,5}', 'TSV')")
node1.query(
"create table HDFSStorageWithQuestionMark (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/storage?', 'TSV')")
node1.query(
"create table HDFSStorageWithAsterisk (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/storage*', 'TSV')")
for i in ["1", "2", "3"]:
hdfs_api.write_data("/storage" + i, i + "\tMark\t72.53\n")
assert hdfs_api.read_data("/storage" + i) == i + "\tMark\t72.53\n"
assert node1.query("select count(*) from HDFSStorageWithRange") == "3\n"
assert node1.query("select count(*) from HDFSStorageWithEnum") == "3\n"
assert node1.query("select count(*) from HDFSStorageWithQuestionMark") == "3\n"
assert node1.query("select count(*) from HDFSStorageWithAsterisk") == "3\n"
try:
node1.query("insert into HDFSStorageWithEnum values (1, 'NEW', 4.2)")
assert False, "Exception have to be thrown"
except Exception as ex:
print(ex)
assert "in readonly mode" in str(ex)
try:
node1.query("insert into HDFSStorageWithQuestionMark values (1, 'NEW', 4.2)")
assert False, "Exception have to be thrown"
except Exception as ex:
print(ex)
assert "in readonly mode" in str(ex)
try:
node1.query("insert into HDFSStorageWithAsterisk values (1, 'NEW', 4.2)")
assert False, "Exception have to be thrown"
except Exception as ex:
print(ex)
assert "in readonly mode" in str(ex)
def test_read_write_table(started_cluster):
hdfs_api = HDFSApi("root")
data = "1\tSerialize\t555.222\n2\tData\t777.333\n"
hdfs_api.write_data("/simple_table_function", data)
assert hdfs_api.read_data("/simple_table_function") == data
assert node1.query(
"select * from hdfs('hdfs://hdfs1:9000/simple_table_function', 'TSV', 'id UInt64, text String, number Float64')") == data
def test_write_table(started_cluster):
hdfs_api = HDFSApi("root")
node1.query(
"create table OtherHDFSStorage (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/other_storage', 'TSV')")
node1.query("insert into OtherHDFSStorage values (10, 'tomas', 55.55), (11, 'jack', 32.54)")
result = "10\ttomas\t55.55\n11\tjack\t32.54\n"
assert hdfs_api.read_data("/other_storage") == result
assert node1.query("select * from OtherHDFSStorage order by id") == result
def test_bad_hdfs_uri(started_cluster):
try:
node1.query(
"create table BadStorage1 (id UInt32, name String, weight Float64) ENGINE = HDFS('hads:hgsdfs100500:9000/other_storage', 'TSV')")
except Exception as ex:
print(ex)
assert "Illegal HDFS URI" in str(ex)
try:
node1.query(
"create table BadStorage2 (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs100500:9000/other_storage', 'TSV')")
except Exception as ex:
print(ex)
assert "Unable to create builder to connect to HDFS" in str(ex)
try:
node1.query(
"create table BadStorage3 (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/<>', 'TSV')")
except Exception as ex:
print(ex)
assert "Unable to open HDFS file" in str(ex)
def test_globs_in_read_table(started_cluster):
hdfs_api = HDFSApi("root")
some_data = "1\tSerialize\t555.222\n2\tData\t777.333\n"
globs_dir = "/dir_for_test_with_globs/"
files = ["dir1/dir_dir/file1", "dir2/file2", "simple_table_function", "dir/file", "some_dir/dir1/file",
"some_dir/dir2/file", "some_dir/file", "table1_function", "table2_function", "table3_function"]
for filename in files:
hdfs_api.write_data(globs_dir + filename, some_data)
test_requests = [("dir{1..5}/dir_dir/file1", 1, 1),
("*_table_functio?", 1, 1),
("dir/fil?", 1, 1),
("table{3..8}_function", 1, 1),
("table{2..8}_function", 2, 2),
("dir/*", 1, 1),
("dir/*?*?*?*?*", 1, 1),
("dir/*?*?*?*?*?*", 0, 0),
("some_dir/*/file", 2, 1),
("some_dir/dir?/*", 2, 1),
("*/*/*", 3, 2),
("?", 0, 0)]
for pattern, paths_amount, files_amount in test_requests:
inside_table_func = "'hdfs://hdfs1:9000" + globs_dir + pattern + "', 'TSV', 'id UInt64, text String, number Float64'"
assert node1.query("select * from hdfs(" + inside_table_func + ")") == paths_amount * some_data
assert node1.query("select count(distinct _path) from hdfs(" + inside_table_func + ")").rstrip() == str(
paths_amount)
assert node1.query("select count(distinct _file) from hdfs(" + inside_table_func + ")").rstrip() == str(
files_amount)
def test_read_write_gzip_table(started_cluster):
hdfs_api = HDFSApi("root")
data = "1\tHello Jessica\t555.222\n2\tI rolled a joint\t777.333\n"
hdfs_api.write_gzip_data("/simple_table_function.gz", data)
assert hdfs_api.read_gzip_data("/simple_table_function.gz") == data
assert node1.query(
"select * from hdfs('hdfs://hdfs1:9000/simple_table_function.gz', 'TSV', 'id UInt64, text String, number Float64')") == data
def test_read_write_gzip_table_with_parameter_gzip(started_cluster):
hdfs_api = HDFSApi("root")
data = "1\tHello Jessica\t555.222\n2\tI rolled a joint\t777.333\n"
hdfs_api.write_gzip_data("/simple_table_function", data)
assert hdfs_api.read_gzip_data("/simple_table_function") == data
assert node1.query(
"select * from hdfs('hdfs://hdfs1:9000/simple_table_function', 'TSV', 'id UInt64, text String, number Float64', 'gzip')") == data
def test_read_write_table_with_parameter_none(started_cluster):
hdfs_api = HDFSApi("root")
data = "1\tHello Jessica\t555.222\n2\tI rolled a joint\t777.333\n"
hdfs_api.write_data("/simple_table_function.gz", data)
assert hdfs_api.read_data("/simple_table_function.gz") == data
assert node1.query(
"select * from hdfs('hdfs://hdfs1:9000/simple_table_function.gz', 'TSV', 'id UInt64, text String, number Float64', 'none')") == data
def test_read_write_gzip_table_with_parameter_auto_gz(started_cluster):
hdfs_api = HDFSApi("root")
data = "1\tHello Jessica\t555.222\n2\tI rolled a joint\t777.333\n"
hdfs_api.write_gzip_data("/simple_table_function.gz", data)
assert hdfs_api.read_gzip_data("/simple_table_function.gz") == data
assert node1.query(
"select * from hdfs('hdfs://hdfs1:9000/simple_table_function.gz', 'TSV', 'id UInt64, text String, number Float64', 'auto')") == data
def test_write_gz_storage(started_cluster):
hdfs_api = HDFSApi("root")
node1.query(
"create table GZHDFSStorage (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/storage.gz', 'TSV')")
node1.query("insert into GZHDFSStorage values (1, 'Mark', 72.53)")
assert hdfs_api.read_gzip_data("/storage.gz") == "1\tMark\t72.53\n"
assert node1.query("select * from GZHDFSStorage") == "1\tMark\t72.53\n"
def test_write_gzip_storage(started_cluster):
hdfs_api = HDFSApi("root")
node1.query(
"create table GZIPHDFSStorage (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/gzip_storage', 'TSV', 'gzip')")
node1.query("insert into GZIPHDFSStorage values (1, 'Mark', 72.53)")
assert hdfs_api.read_gzip_data("/gzip_storage") == "1\tMark\t72.53\n"
assert node1.query("select * from GZIPHDFSStorage") == "1\tMark\t72.53\n"
| 41.815668
| 145
| 0.65616
|
cc00ca69531811639b039ba179df425915c03894
| 3,199
|
py
|
Python
|
test/petab/test_amici_objective.py
|
stephanmg/pyPESTO
|
72488fbb3eaa91dd163f88bac71a1a165a0da70f
|
[
"BSD-3-Clause"
] | null | null | null |
test/petab/test_amici_objective.py
|
stephanmg/pyPESTO
|
72488fbb3eaa91dd163f88bac71a1a165a0da70f
|
[
"BSD-3-Clause"
] | null | null | null |
test/petab/test_amici_objective.py
|
stephanmg/pyPESTO
|
72488fbb3eaa91dd163f88bac71a1a165a0da70f
|
[
"BSD-3-Clause"
] | null | null | null |
"""
This is for testing the pypesto.Objective.
"""
from pypesto.objective.amici_util import add_sim_grad_to_opt_grad
import petab
import pypesto
import pypesto.petab
import pypesto.optimize
import pypesto.objective.constants
import pytest
import numpy as np
from .petab_util import folder_base
ATOL = 1e-1
RTOL = 1e-0
def test_add_sim_grad_to_opt_grad():
"""
Test gradient mapping/summation works as expected.
17 = 1 + 2*5 + 2*3
"""
par_opt_ids = ['opt_par_1',
'opt_par_2',
'opt_par_3']
mapping_par_opt_to_par_sim = {
'sim_par_1': 'opt_par_1',
'sim_par_2': 'opt_par_3',
'sim_par_3': 'opt_par_3'
}
par_sim_ids = ['sim_par_1', 'sim_par_2', 'sim_par_3']
sim_grad = np.asarray([1.0, 3.0, 5.0])
opt_grad = np.asarray([1.0, 1.0, 1.0])
expected = np.asarray([3.0, 1.0, 17.0])
add_sim_grad_to_opt_grad(
par_opt_ids,
par_sim_ids,
mapping_par_opt_to_par_sim,
sim_grad,
opt_grad,
coefficient=2.0)
assert np.allclose(expected, opt_grad)
def test_error_leastsquares_with_ssigma():
petab_problem = petab.Problem.from_yaml(
folder_base + "Zheng_PNAS2012/Zheng_PNAS2012.yaml")
petab_problem.model_name = "Zheng_PNAS2012"
importer = pypesto.petab.PetabImporter(petab_problem)
obj = importer.create_objective()
problem = importer.create_problem(obj)
optimizer = pypesto.optimize.ScipyOptimizer(
'ls_trf', options={'max_nfev': 50})
with pytest.raises(RuntimeError):
pypesto.optimize.minimize(
problem=problem, optimizer=optimizer, n_starts=1,
options=pypesto.optimize.OptimizeOptions(allow_failed_starts=False)
)
def test_preeq_guesses():
"""
Test whether optimization with preequilibration guesses works, asserts
that steadystate guesses are written and checks that gradient is still
correct with guesses set.
"""
petab_problem = petab.Problem.from_yaml(
folder_base + "Zheng_PNAS2012/Zheng_PNAS2012.yaml")
petab_problem.model_name = "Zheng_PNAS2012"
importer = pypesto.petab.PetabImporter(petab_problem)
obj = importer.create_objective()
problem = importer.create_problem(obj)
# assert that initial guess is uninformative
assert problem.objective.steadystate_guesses['fval'] == np.inf
optimizer = pypesto.optimize.ScipyOptimizer(
'L-BFGS-B', options={'maxiter': 50})
result = pypesto.optimize.minimize(
problem=problem, optimizer=optimizer, n_starts=1,
)
assert problem.objective.steadystate_guesses['fval'] < np.inf
assert len(obj.steadystate_guesses['data']) == 1
df = obj.check_grad(
result.optimize_result.list[0]['x'],
eps=1e-3,
verbosity=0,
mode=pypesto.objective.constants.MODE_FUN
)
print("relative errors MODE_FUN: ", df.rel_err.values)
print("absolute errors MODE_FUN: ", df.abs_err.values)
assert np.all((df.rel_err.values < RTOL) | (df.abs_err.values < ATOL))
# assert that resetting works
problem.objective.initialize()
assert problem.objective.steadystate_guesses['fval'] == np.inf
| 30.466667
| 79
| 0.683651
|
a932a823434a703b05d9e90b790882604e184307
| 1,466
|
py
|
Python
|
tools/fcos/test_dior_r_quad.py
|
chisyliu/RotationDetection
|
6f2bd55a51a6de0bcd0959a85977682511fd440d
|
[
"Apache-2.0"
] | 2
|
2022-03-05T09:55:49.000Z
|
2022-03-05T10:12:51.000Z
|
tools/fcos/test_dior_r_quad.py
|
junhai0428/RotationDetection
|
4249720ea4dacdd60e696901df8034e5cd0a1843
|
[
"Apache-2.0"
] | null | null | null |
tools/fcos/test_dior_r_quad.py
|
junhai0428/RotationDetection
|
4249720ea4dacdd60e696901df8034e5cd0a1843
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
sys.path.append("../../")
from alpharotate.libs.models.detectors.fcos import build_whole_network_batch_quad
from tools.test_dior_r_base_q import TestDIORR
from configs import cfgs
from alpharotate.libs.val_libs.voc_eval_r import EVAL
class TestDIORRFCOS(TestDIORR):
def eval(self):
fcos = build_whole_network_batch_quad.DetectionNetworkFCOS(cfgs=self.cfgs,
is_training=False)
all_boxes_r = self.eval_with_plac(img_dir=self.args.img_dir, det_net=fcos,
image_ext=self.args.image_ext)
# with open(cfgs.VERSION + '_detections_r.pkl', 'rb') as f2:
# all_boxes_r = pickle.load(f2)
#
# print(len(all_boxes_r))
imgs = os.listdir(self.args.img_dir)
real_test_imgname_list = [i.split(self.args.image_ext)[0] for i in imgs]
print(10 * "**")
print('rotation eval:')
evaler = EVAL(self.cfgs)
evaler.voc_evaluate_detections(all_boxes=all_boxes_r,
test_imgid_list=real_test_imgname_list,
test_annotation_path=self.args.test_annotation_path)
if __name__ == '__main__':
tester = TestDIORRFCOS(cfgs)
tester.eval()
| 31.191489
| 91
| 0.629604
|
e51f209728bb72d4a80d0d3ef48d37faa4033b44
| 3,149
|
py
|
Python
|
9_GenericCBV/GenericCBV/settings.py
|
LIkelion-at-KOREATECH/LikeLion_Django_Study_Summary
|
c788182af5bcfd16bdd4b57235a48659758e494b
|
[
"MIT"
] | 28
|
2019-10-15T13:15:26.000Z
|
2021-11-08T08:23:45.000Z
|
9_GenericCBV/GenericCBV/settings.py
|
jhleed/LikeLion_Django_Study_Summary
|
c788182af5bcfd16bdd4b57235a48659758e494b
|
[
"MIT"
] | null | null | null |
9_GenericCBV/GenericCBV/settings.py
|
jhleed/LikeLion_Django_Study_Summary
|
c788182af5bcfd16bdd4b57235a48659758e494b
|
[
"MIT"
] | 17
|
2019-09-09T00:15:36.000Z
|
2021-01-28T13:08:51.000Z
|
"""
Django settings for GenericCBV project.
Generated by 'django-admin startproject' using Django 2.1.8.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm!+aswd=4507---1g8=!37+cs_vl#r&!d1p#v%+55(^6v-8&cf'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'post.apps.PostConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'GenericCBV.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'GenericCBV.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| 25.601626
| 91
| 0.694189
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.