hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7b4f80cffeab03970d45e2719bf3cfd815720f37 | 7,916 | py | Python | handsomeware.py | lord-aceldama/HandsomeWare | 6b9bc79752c4c93267466347dc54f5f7c21f5f1c | [
"MIT"
] | 1 | 2020-11-11T05:50:27.000Z | 2020-11-11T05:50:27.000Z | handsomeware.py | lord-aceldama/HandsomeWare | 6b9bc79752c4c93267466347dc54f5f7c21f5f1c | [
"MIT"
] | null | null | null | handsomeware.py | lord-aceldama/HandsomeWare | 6b9bc79752c4c93267466347dc54f5f7c21f5f1c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import random
try:
import pyAesCrypt
except ImportError:
print("Error importing pyAesCrypt. Please run 'pip install pyAesCrypt'.")
sys.exit(-1)
VERSION = "0.9.2"
BUFFER_SIZE = 64 * 1024
DEFAULT_PASSWORD = None # if you want you can use a string like "Password123" here.
# ---------------------------------------------------------------------
def print_splash():
"""Prints a splash screen."""
print(f"""
)
( /( ( ( (
)\\()) ) )\\ ) ) ( )\\))( ' ) ( (
((_)\\ ( /( ( (()/(( ( ( ))\\((_)()\\ ) ( /( )( ))\\
_((_))(_)) )\\ ) ((_))\\ )\\ )\\ '/((_)(())\\_)())(_)|()\\ /((_)
| || ((_)_ _(_/( _| ((_)((_)_((_))(_)) \\ \\((_)/ ((_)_ ((_|_))
| __ / _` | ' \\)) _` (_-< _ \\ ' \\() -_) \\ \\/\\/ // _` | '_/ -_)
|_||_\\__,_|_||_|\\__,_/__|___/_|_|_|\\___| \\_/\\_/ \\__,_|_| \\___|
( v{VERSION} )
""")
def print_help():
"""Prints the help menu."""
script = os.path.basename(sys.argv[0])
print(f"""
PROJECT HOME: https://github.com/lord-aceldama/HandsomeWare
GENERAL:
./{script} --help : Show this help message.
./{script} --version : Prints the current version of the script.
DECRYPTION:
./{script} --decrypt <inputfile> <outputfile>
ENCRYPTION
./{script} [flags] <path>
Flags:
--shred [passes] : Shred files after encryption. Use passes to specify
how many times to overwrite files. (Default: 1)
--ssd : Use this flag if the file/directory is on a drive
that does rotational writes. If you are unsure,
check [ https://unix.stackexchange.com/a/65602 ].
--x <ext>[,<ext>] : Only encrypt files with extension <ext>. Multiple
extensions can be given by separating them with ",".
--rnd [len] : Use random password of length [len]. (Default: 20)
""")
def get_random_string(length):
"""Gets a random string of specified length."""
sample_letters = 'abcdefghijklmnopqrstuvwxyz1234567890'
return ''.join([random.choice(sample_letters) for _ in range(length)])
def create_file(path):
"""Creates a temporary file and returns the name and disk free space."""
result = None, 0
# Get tempfile name
try:
# Get temp file name
filename = '.'
while os.path.exists(filename):
filename = os.path.join(path, get_random_string(32))
# Create temp file
f = open(filename, "wb")
f.close()
# Get disk free space
statvfs = os.statvfs(path)
free_bytes = statvfs.f_frsize * statvfs.f_bavail
result = filename, free_bytes
except Exception as e:
# Something went wrong
print(f"ERROR: {e}")
return result
def lock_free_space(path):
"""Creates a file (or files, if df > 1GB) filling all free disk space.
disk free: https://stackoverflow.com/a/39799743
fast write: https://stackoverflow.com/a/8816144
"""
result = []
filename, free_bytes = create_file(path)
while (filename is not None) and (free_bytes > 0):
result.append(filename)
f = open(filename, "wb")
f.seek(min(2 ** 30, free_bytes) - 1)
f.write(b"\0")
f.close()
filename, free_bytes = create_file(path)
return result
def secure_delete(file_path, passes=1, ssd=False):
"""Fast file shredder."""
lock_files = lock_free_space(os.path.dirname(file_path)) if ssd else []
with open(file_path, "ba+") as del_file:
length = del_file.tell()
for i in range(passes):
del_file.seek(0)
del_file.write(os.urandom(length))
os.remove(file_path)
for lock_file in lock_files:
os.remove(lock_file)
def get_filename(file_path, filename):
"""Makes sure the file doesn't exist."""
result = f"{filename}.aes"
i = 0
while os.path.exists(os.path.join(file_path, result)):
i = i + 1
result = f"{filename}_{i}.aes"
return result
def get_password():
"""Gets a password and make sure you type it correctly, twice."""
def twice():
"""Asks for pass."""
return input("Enter password: "), input("Re-enter password: ")
result, verify = twice()
while result != verify:
print("Password mismatch, please try again.")
result, verify = twice()
return result
def do_encrypt():
"""."""
password = DEFAULT_PASSWORD
root_dir = sys.argv[-1]
if os.path.exists(root_dir):
# Get command line flags
ext = None
if "--x" in sys.argv:
ext = sys.argv[sys.argv.index("--x") + 1].split(",")
print(f" * Encrypting all files with extension(s): {', '.join(ext)}")
shred_originals = "--shred" in sys.argv
shred_passes = 1
if shred_originals:
try:
shred_passes = int(sys.argv[sys.argv.index("--shred") + 1])
except:
shred_passes = 1
print(f" * Shredding originals after encryption. ({shred_passes} passes)")
if "--rnd" in sys.argv:
try:
password = get_random_string(int(sys.argv[sys.argv.index("--x") + 1]))
except:
print(" * Attempted to get password length but failed. Defaulting to 20 chars.")
password = get_random_string(20)
print(f" * Random password generated: {password}")
# Get password, if not set
if password is None:
password = get_password()
# Set SSD flag
is_ssd = "--ssd" in sys.argv
# Nuke!
for path, _, files in os.walk(root_dir):
print(f"[{path}]")
for infile in files:
f_ext = os.path.splitext(infile)[1]
if (ext is None) or (f_ext == "") or (f_ext[1:] in ext):
outfile = os.path.join(path, get_filename(path, infile))
print(f" + {infile} -> {outfile}")
try:
pyAesCrypt.encryptFile(os.path.join(path, infile), outfile, password, BUFFER_SIZE)
if shred_originals:
print(f" >> Shredding...")
secure_delete(os.path.join(path, infile), shred_passes, is_ssd)
except Exception as e:
print(f" >> Error: {e}")
else:
print("Error: The path could not be found!")
def do_decrypt():
"""."""
if len(sys.argv) == 4:
file_in = sys.argv[2]
file_out = sys.argv[3]
if not os.path.exists(file_in):
print(f"Error: Could not find file '{file_in}.'")
elif os.path.isdir(file_in):
print(f"Error: File expected but '{file_in}' is a directory.")
elif os.path.exists(file_out):
print(f"Error: File '{file_in}' already exists.'")
else:
password = input("Password: ") if DEFAULT_PASSWORD is None else DEFAULT_PASSWORD
print("Decrypting...")
pyAesCrypt.decryptFile(file_in, file_out, password, BUFFER_SIZE)
else:
print(f"Error: Expected 3 command line args but got {len(sys.argv) - 1}.")
print_help()
# ---------------------------------------------------------------------
print_splash()
if "--version" in sys.argv:
print(VERSION)
if (len(sys.argv) == 1) or ("--help" in sys.argv):
print_help()
else:
if "--decrypt" in sys.argv:
do_decrypt()
else:
do_encrypt()
print("\nAll tasks failed successfully. Have a nice day :)\n\n")
| 32.846473 | 106 | 0.52615 |
99182095224a64253fe6880da6072387781fa38e | 3,447 | py | Python | benchmarks/ltl_infinite_state/bounded_counter/f3/constant_bound.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 3 | 2021-04-23T23:29:26.000Z | 2022-03-23T10:00:30.000Z | benchmarks/ltl_infinite_state/bounded_counter/f3/constant_bound.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | null | null | null | benchmarks/ltl_infinite_state/bounded_counter/f3/constant_bound.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 1 | 2021-11-17T22:02:56.000Z | 2021-11-17T22:02:56.000Z | from collections import Iterable
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type
from mathsat import msat_make_and, msat_make_not, msat_make_or
from mathsat import msat_make_leq, msat_make_equal
from mathsat import msat_make_number, msat_make_plus
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
real_type = msat_get_rational_type(menv)
i = msat_declare_function(menv, "i", real_type)
i = msat_make_constant(menv, i)
r = msat_declare_function(menv, "r", real_type)
r = msat_make_constant(menv, r)
m_x_i = msat_declare_function(menv, "m_x_i", real_type)
m_x_i = msat_make_constant(menv, m_x_i)
x_m_x_i = msat_declare_function(menv, name_next("m_x_i"), real_type)
x_m_x_i = msat_make_constant(menv, x_m_x_i)
x_i = msat_declare_function(menv, name_next("i"), real_type)
x_i = msat_make_constant(menv, x_i)
x_r = msat_declare_function(menv, name_next("r"), real_type)
x_r = msat_make_constant(menv, x_r)
curr2next = {i: x_i, r: x_r, m_x_i: x_m_x_i}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
l = msat_make_number(menv, "10")
r_gt_0 = msat_make_gt(menv, r, zero)
r_lt_l = msat_make_lt(menv, r, l)
i_geq_0 = msat_make_geq(menv, i, zero)
init = msat_make_and(menv, r_gt_0, r_lt_l)
init = msat_make_and(menv, init, i_geq_0)
# r' = r
trans = msat_make_equal(menv, x_r, r)
# i < l -> (i' = i + 1 | i' = i)
i_lt_l = msat_make_lt(menv, i, l)
x_i_eq_i_p_1 = msat_make_equal(menv, x_i,
msat_make_plus(menv, i, one))
x_i_eq_i = msat_make_equal(menv, x_i, i)
x_i_eq_i_p_1_or_i = msat_make_or(menv, x_i_eq_i_p_1, x_i_eq_i)
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_lt_l, x_i_eq_i_p_1_or_i))
# i >= l -> i' = 0
i_geq_l = msat_make_geq(menv, i, l)
x_i_eq_0 = msat_make_equal(menv, x_i, zero)
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_geq_l, x_i_eq_0))
# i' = m_x_i
trans = msat_make_and(menv, trans,
msat_make_equal(menv, x_i, m_x_i))
# (G F m_x_i > i) -> ! G F r > i
x_i_gt_i = msat_make_gt(menv, m_x_i, i)
G_F_x_i_gt_i = enc.make_G(enc.make_F(x_i_gt_i))
r_gt_i = msat_make_gt(menv, r, i)
n_G_F_r_gt_i = msat_make_not(menv, enc.make_G(enc.make_F(r_gt_i)))
ltl = msat_make_impl(menv, G_F_x_i_gt_i, n_G_F_r_gt_i)
return TermMap(curr2next), init, trans, ltl
| 38.3 | 74 | 0.682042 |
d501ff0661361dd656b12388dd1e0d47e3aae354 | 1,365 | py | Python | congruencial_multiplicativo.py | NicolasImplant/Pseudoaleaorios_Python | 87ff8a020bcf6776b968f020facae00ee37f779e | [
"MIT"
] | null | null | null | congruencial_multiplicativo.py | NicolasImplant/Pseudoaleaorios_Python | 87ff8a020bcf6776b968f020facae00ee37f779e | [
"MIT"
] | null | null | null | congruencial_multiplicativo.py | NicolasImplant/Pseudoaleaorios_Python | 87ff8a020bcf6776b968f020facae00ee37f779e | [
"MIT"
] | null | null | null | from functions import multiplicativo
# Función que ejecuta de manera cíclica y recursiva el algoritmo congruencial multiplicativo, los parámetros k y g se manipularon
# para garantizar el ciclo de vida máximo del generador
# Function that cyclically and recursively executes the multiplicative congruential algorithm, the parameters k and g were manipulated
# to ensure maximum generator life cycle
def pseudoRandNumbers(seed:int, k:int, g:int, N:int) -> list:
a = 5 + 8*k
m = 2**g
x = [0 for i in range(N)]
random_numbers = [0 for i in range(N)]
x[0] = seed
for i in range(N-1):
x[i+1], random_numbers[i] = multiplicativo(a,x[i],m)
return random_numbers
if __name__ == '__main__':
# Área de ingreso de los parámetros del modelo.
# Entry area of the model parameters.
try:
seed = int(input('Digita el valor de la semilla: '))
k = int(input('Digita el valor de la constante de k: '))
g = int(input('Digita el valor de la constante de g: '))
if seed < 10 or k < 10 or g < 10:
raise ValueError
N = int(input('Digita la cantidad de números aleatorios requerida: ')) + 1
print(pseudoRandNumbers(seed,k,g,N))
except ValueError:
print('Para un ciclo de vida máximo, los valores de las constantes y la semilla deben ser mayores a 10') | 36.891892 | 134 | 0.668132 |
b1bba88d24e60a3b511875c6ac56c4bf8d3ce6a3 | 2,371 | py | Python | leetcode_python/Depth-First-Search/increasing_subsequences.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | 18 | 2019-08-01T07:45:02.000Z | 2022-03-31T18:05:44.000Z | leetcode_python/Depth-First-Search/increasing_subsequences.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | leetcode_python/Depth-First-Search/increasing_subsequences.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | 15 | 2019-12-29T08:46:20.000Z | 2022-03-08T14:14:05.000Z | # V0
# IDEA : DFS
class Solution(object):
def findSubsequences(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
res = set()
self.dfs(nums, 0, res, [])
return map(list, res)
def dfs(self, nums, index, res, path):
if len(path) >= 2:
res.add(tuple(path))
for i in range(index, len(nums)):
### be aware of it
if not path or nums[i] >= path[-1]:
### be aware of it
self.dfs(nums, i + 1, res, path + [nums[i]])
# V1
# https://blog.csdn.net/fuxuemingzhu/article/details/79827505
# IDEA : DFS
class Solution(object):
def findSubsequences(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
res = set()
self.dfs(nums, 0, res, [])
return map(list, res)
def dfs(self, nums, index, res, path):
if len(path) >= 2:
res.add(tuple(path))
for i in range(index, len(nums)):
if not path or nums[i] >= path[-1]:
self.dfs(nums, i + 1, res, path + [nums[i]])
# V1'
# https://blog.csdn.net/fuxuemingzhu/article/details/79827505
# IDEA : DP
class Solution(object):
def findSubsequences(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
dp = set()
for n in nums:
for y in list(dp):
if n >= y[-1]:
dp.add(y + (n,))
dp.add((n,))
return list(e for e in dp if len(e) > 1)
# V1''
# https://www.jiuzhang.com/solution/increasing-subsequences/#tag-highlight-lang-python
class Solution(object):
def findSubsequences(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
res = []
self.subsets(nums, 0, [], res)
return res
def subsets(self, nums, index, temp, res):
if len(nums) >= index and len(temp) >= 2:
res.append(temp[:])
used = {}
for i in range(index, len(nums)):
if len(temp) > 0 and temp[-1] > nums[i]: continue
if nums[i] in used: continue
used[nums[i]] = True
temp.append(nums[i])
self.subsets(nums, i+1, temp, res)
temp.pop()
# V2
| 28.22619 | 86 | 0.486293 |
705f11c61375d9e525605f2ed6f4b810b17cfbd0 | 4,998 | py | Python | vrpy/greedy.py | samuelefiorini/vrpy | ad3232b9e9ee9276c9c799d16b4a4a8c2b41eef1 | [
"MIT"
] | 83 | 2020-04-01T06:39:11.000Z | 2022-03-25T01:33:19.000Z | vrpy/greedy.py | samuelefiorini/vrpy | ad3232b9e9ee9276c9c799d16b4a4a8c2b41eef1 | [
"MIT"
] | 111 | 2020-04-01T17:11:30.000Z | 2022-03-31T02:22:53.000Z | vrpy/greedy.py | samuelefiorini/vrpy | ad3232b9e9ee9276c9c799d16b4a4a8c2b41eef1 | [
"MIT"
] | 37 | 2020-04-01T08:59:38.000Z | 2022-03-30T03:04:39.000Z | import logging
logger = logging.getLogger(__name__)
class _Greedy:
"""
Greedy algorithm. Iteratively adds closest feasible node to current path.
Args:
G (DiGraph): Graph on which algorithm is run.
load_capacity (int, optional) : Maximum load per route. Defaults to None.
num_stops (int, optional) : Maximum stops per route. Defaults to None.
"""
def __init__(self, G, load_capacity=None, num_stops=None, duration=None):
self.G = G.copy()
self._format_cost()
self._best_routes = []
self._unprocessed_nodes = [
v for v in self.G.nodes() if v not in ["Source", "Sink"]
]
if isinstance(load_capacity, list):
# capacity of vehicle type 1 is used!
self.load_capacity = load_capacity[0]
else:
self.load_capacity = load_capacity
self.num_stops = num_stops
self.duration = duration
self._best_value = 0
@property
def best_value(self):
return self._best_value
@property
def best_routes(self):
return self._best_routes
def run(self):
"""The forward search is run."""
while self._unprocessed_nodes != []:
self._load = 0
self._stops = 0
self._time = 0
self._run_forward()
self._update_routes()
if self._current_path == ["Source", "Sink"]:
break
def _run_forward(self):
"""
A path starting from Source is greedily extended
until Sink is reached.
The procedure aborts if path becomes infeasible.
"""
self._current_path = ["Source"]
while True:
self._get_next_node()
self._update()
if self._new_node == "Sink":
break
def _get_next_node(self):
self._last_node = self._current_path[-1]
out_going_costs = {}
# Store the successors cost that meet constraints
for v in self.G.successors(self._last_node):
if self._constraints_met(v) and v in self._unprocessed_nodes:
out_going_costs[v] = self.G.edges[self._last_node, v]["cost"]
if out_going_costs == {}:
logger.debug("path cannot be extended")
self._new_node = "Sink"
else:
# Select best successor
self._new_node = sorted(out_going_costs, key=out_going_costs.get)[0]
def _constraints_met(self, v):
"""Checks if constraints are respected."""
if v in self._current_path or self._check_source_sink(v):
return False
elif self.load_capacity and not self._check_capacity(v):
return False
elif self.duration and not self._check_duration(v):
return False
else:
return True
def _update(self):
"""Updates path, path load, unprocessed nodes."""
self._load += self.G.nodes[self._new_node]["demand"]
last_node = self._current_path[-1]
self._current_path.append(self._new_node)
if self._new_node not in ["Source", "Sink"]:
self._unprocessed_nodes.remove(self._new_node)
self._stops += 1
self._best_value += self.G.edges[last_node, self._new_node]["cost"]
self._time += (
self.G.edges[last_node, self._new_node]["time"]
+ self.G.nodes[self._new_node]["service_time"]
)
if self._stops == self.num_stops and self._new_node != "Sink":
# End path
self._current_path.append("Sink")
if self._new_node in self.G.predecessors("Sink"):
self._best_value += self.G.edges[self._new_node, "Sink"]["cost"]
self._new_node = "Sink"
else:
self._best_value += 1e10
self._current_path = None
def _update_routes(self):
"""Stores best routes as list of nodes."""
if self._current_path:
self._best_routes.append(self._current_path)
def _check_source_sink(self, v):
"""Checks if edge Source Sink."""
return self._last_node == "Source" and v == "Sink"
def _check_capacity(self, v):
"""Checks capacity constraint."""
return self._load + self.G.nodes[v]["demand"] <= self.load_capacity
def _check_duration(self, v):
"""Checks duration constraint."""
u = self._current_path[-1]
return_time = self.G.edges[v, "Sink"]["time"] if v != "Sink" else 0
return (
self._time
+ self.G.nodes[v]["service_time"]
+ self.G.edges[u, v]["time"]
+ return_time
<= self.duration
)
def _format_cost(self):
"""If list of costs is given, first item of list is considered."""
for (i, j) in self.G.edges():
if isinstance(self.G.edges[i, j]["cost"], list):
self.G.edges[i, j]["cost"] = self.G.edges[i, j]["cost"][0]
| 34.708333 | 81 | 0.577431 |
3b15cdc0205d0ad3e33f8ec356e34290e28d3889 | 11,826 | py | Python | st2actions/tests/unit/test_pythonrunner.py | totalkyos/stack-storm | b89bc648d53dae03c7484d22abd771edfe45bbb8 | [
"Apache-2.0"
] | null | null | null | st2actions/tests/unit/test_pythonrunner.py | totalkyos/stack-storm | b89bc648d53dae03c7484d22abd771edfe45bbb8 | [
"Apache-2.0"
] | null | null | null | st2actions/tests/unit/test_pythonrunner.py | totalkyos/stack-storm | b89bc648d53dae03c7484d22abd771edfe45bbb8 | [
"Apache-2.0"
] | null | null | null | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import mock
from st2actions.runners import pythonrunner
from st2actions.runners.python_action_wrapper import PythonActionWrapper
from st2actions.runners.pythonrunner import Action
from st2actions.container import service
from st2actions.runners.utils import get_action_class_instance
from st2common.services import config as config_service
from st2common.constants.action import ACTION_OUTPUT_RESULT_DELIMITER
from st2common.constants.action import LIVEACTION_STATUS_SUCCEEDED, LIVEACTION_STATUS_FAILED
from st2common.constants.pack import SYSTEM_PACK_NAME
from base import RunnerTestCase
from st2tests.base import CleanDbTestCase
import st2tests.base as tests_base
PACAL_ROW_ACTION_PATH = os.path.join(tests_base.get_resources_path(), 'packs',
'pythonactions/actions/pascal_row.py')
# Note: runner inherits parent args which doesn't work with tests since test pass additional
# unrecognized args
mock_sys = mock.Mock()
mock_sys.argv = []
@mock.patch('st2actions.runners.pythonrunner.sys', mock_sys)
class PythonRunnerTestCase(RunnerTestCase, CleanDbTestCase):
register_packs = True
register_pack_configs = True
def test_runner_creation(self):
runner = pythonrunner.get_runner()
self.assertTrue(runner is not None, 'Creation failed. No instance.')
self.assertEqual(type(runner), pythonrunner.PythonRunner, 'Creation failed. No instance.')
def test_simple_action(self):
runner = pythonrunner.get_runner()
runner.action = self._get_mock_action_obj()
runner.runner_parameters = {}
runner.entry_point = PACAL_ROW_ACTION_PATH
runner.container_service = service.RunnerContainerService()
runner.pre_run()
(status, result, _) = runner.run({'row_index': 4})
self.assertEqual(status, LIVEACTION_STATUS_SUCCEEDED)
self.assertTrue(result is not None)
self.assertEqual(result['result'], [1, 4, 6, 4, 1])
def test_simple_action_config_value_provided_overriden_in_datastore(self):
wrapper = PythonActionWrapper(pack='dummy_pack_5', file_path=PACAL_ROW_ACTION_PATH,
user='joe')
# No values provided in the datastore
instance = wrapper._get_action_instance()
self.assertEqual(instance.config['api_key'], 'some_api_key') # static value
self.assertEqual(instance.config['regions'], ['us-west-1']) # static value
self.assertEqual(instance.config['api_secret'], None)
self.assertEqual(instance.config['private_key_path'], None)
# api_secret overriden in the datastore (user scoped value)
config_service.set_datastore_value_for_config_key(pack_name='dummy_pack_5',
key_name='api_secret',
user='joe',
value='foosecret',
secret=True)
# private_key_path overriden in the datastore (global / non-user scoped value)
config_service.set_datastore_value_for_config_key(pack_name='dummy_pack_5',
key_name='private_key_path',
value='foopath')
instance = wrapper._get_action_instance()
self.assertEqual(instance.config['api_key'], 'some_api_key') # static value
self.assertEqual(instance.config['regions'], ['us-west-1']) # static value
self.assertEqual(instance.config['api_secret'], 'foosecret')
self.assertEqual(instance.config['private_key_path'], 'foopath')
def test_simple_action_fail(self):
runner = pythonrunner.get_runner()
runner.action = self._get_mock_action_obj()
runner.runner_parameters = {}
runner.entry_point = PACAL_ROW_ACTION_PATH
runner.container_service = service.RunnerContainerService()
runner.pre_run()
(status, result, _) = runner.run({'row_index': '4'})
self.assertTrue(result is not None)
self.assertEqual(status, LIVEACTION_STATUS_FAILED)
def test_simple_action_no_file(self):
runner = pythonrunner.get_runner()
runner.action = self._get_mock_action_obj()
runner.runner_parameters = {}
runner.entry_point = 'foo.py'
runner.container_service = service.RunnerContainerService()
runner.pre_run()
(status, result, _) = runner.run({})
self.assertTrue(result is not None)
self.assertEqual(status, LIVEACTION_STATUS_FAILED)
def test_simple_action_no_entry_point(self):
runner = pythonrunner.get_runner()
runner.action = self._get_mock_action_obj()
runner.runner_parameters = {}
runner.entry_point = ''
runner.container_service = service.RunnerContainerService()
expected_msg = 'Action .*? is missing entry_point attribute'
self.assertRaisesRegexp(Exception, expected_msg, runner.run, {})
@mock.patch('st2common.util.green.shell.subprocess.Popen')
def test_action_with_user_supplied_env_vars(self, mock_popen):
env_vars = {'key1': 'val1', 'key2': 'val2', 'PYTHONPATH': 'foobar'}
mock_process = mock.Mock()
mock_process.communicate.return_value = ('', '')
mock_popen.return_value = mock_process
runner = pythonrunner.get_runner()
runner.action = self._get_mock_action_obj()
runner.runner_parameters = {'env': env_vars}
runner.entry_point = PACAL_ROW_ACTION_PATH
runner.container_service = service.RunnerContainerService()
runner.pre_run()
(_, _, _) = runner.run({'row_index': 4})
_, call_kwargs = mock_popen.call_args
actual_env = call_kwargs['env']
for key, value in env_vars.items():
# Verify that a blacklsited PYTHONPATH has been filtered out
if key == 'PYTHONPATH':
self.assertTrue(actual_env[key] != value)
else:
self.assertEqual(actual_env[key], value)
@mock.patch('st2common.util.green.shell.subprocess.Popen')
def test_stdout_interception_and_parsing(self, mock_popen):
values = {'delimiter': ACTION_OUTPUT_RESULT_DELIMITER}
# No output to stdout and no result (implicit None)
mock_stdout = '%(delimiter)sNone%(delimiter)s' % values
mock_stderr = 'foo stderr'
mock_process = mock.Mock()
mock_process.communicate.return_value = (mock_stdout, mock_stderr)
mock_process.returncode = 0
mock_popen.return_value = mock_process
runner = pythonrunner.get_runner()
runner.action = self._get_mock_action_obj()
runner.runner_parameters = {}
runner.entry_point = PACAL_ROW_ACTION_PATH
runner.container_service = service.RunnerContainerService()
runner.pre_run()
(_, output, _) = runner.run({'row_index': 4})
self.assertEqual(output['stdout'], '')
self.assertEqual(output['stderr'], mock_stderr)
self.assertEqual(output['result'], 'None')
self.assertEqual(output['exit_code'], 0)
# Output to stdout and no result (implicit None)
mock_stdout = 'pre result%(delimiter)sNone%(delimiter)spost result' % values
mock_stderr = 'foo stderr'
mock_process = mock.Mock()
mock_process.communicate.return_value = (mock_stdout, mock_stderr)
mock_process.returncode = 0
mock_popen.return_value = mock_process
runner = pythonrunner.get_runner()
runner.action = self._get_mock_action_obj()
runner.runner_parameters = {}
runner.entry_point = PACAL_ROW_ACTION_PATH
runner.container_service = service.RunnerContainerService()
runner.pre_run()
(_, output, _) = runner.run({'row_index': 4})
self.assertEqual(output['stdout'], 'pre resultpost result')
self.assertEqual(output['stderr'], mock_stderr)
self.assertEqual(output['result'], 'None')
self.assertEqual(output['exit_code'], 0)
@mock.patch('st2common.util.green.shell.subprocess.Popen')
def test_common_st2_env_vars_are_available_to_the_action(self, mock_popen):
mock_process = mock.Mock()
mock_process.communicate.return_value = ('', '')
mock_popen.return_value = mock_process
runner = pythonrunner.get_runner()
runner.auth_token = mock.Mock()
runner.auth_token.token = 'ponies'
runner.action = self._get_mock_action_obj()
runner.runner_parameters = {}
runner.entry_point = PACAL_ROW_ACTION_PATH
runner.container_service = service.RunnerContainerService()
runner.pre_run()
(_, _, _) = runner.run({'row_index': 4})
_, call_kwargs = mock_popen.call_args
actual_env = call_kwargs['env']
self.assertCommonSt2EnvVarsAvailableInEnv(env=actual_env)
def test_action_class_instantiation_action_service_argument(self):
class Action1(Action):
# Constructor not overriden so no issue here
pass
def run(self):
pass
class Action2(Action):
# Constructor overriden, but takes action_service argument
def __init__(self, config, action_service=None):
super(Action2, self).__init__(config=config,
action_service=action_service)
def run(self):
pass
class Action3(Action):
# Constructor overriden, but doesn't take to action service
def __init__(self, config):
super(Action3, self).__init__(config=config)
def run(self):
pass
config = {'a': 1, 'b': 2}
action_service = 'ActionService!'
action1 = get_action_class_instance(action_cls=Action1, config=config,
action_service=action_service)
self.assertEqual(action1.config, config)
self.assertEqual(action1.action_service, action_service)
action2 = get_action_class_instance(action_cls=Action2, config=config,
action_service=action_service)
self.assertEqual(action2.config, config)
self.assertEqual(action2.action_service, action_service)
action3 = get_action_class_instance(action_cls=Action3, config=config,
action_service=action_service)
self.assertEqual(action3.config, config)
self.assertEqual(action3.action_service, action_service)
def _get_mock_action_obj(self):
"""
Return mock action object.
Pack gets set to the system pack so the action doesn't require a separate virtualenv.
"""
action = mock.Mock()
action.pack = SYSTEM_PACK_NAME
action.entry_point = 'foo.py'
return action
| 43.638376 | 98 | 0.660917 |
3f48b6d775632a099bf722c13f9ead1d522dc270 | 452 | py | Python | ex063.py | rafaelclemes81/Python | 0e685b4e528a29bb23ecf11c9ccdbae8730b3ac3 | [
"MIT"
] | null | null | null | ex063.py | rafaelclemes81/Python | 0e685b4e528a29bb23ecf11c9ccdbae8730b3ac3 | [
"MIT"
] | null | null | null | ex063.py | rafaelclemes81/Python | 0e685b4e528a29bb23ecf11c9ccdbae8730b3ac3 | [
"MIT"
] | null | null | null | '''ESCREVA UM PROGRAMA QUE LEIA UM NÚMERO INTEIRO E MOSTRE NA TELA A QUANTIDADE DE
ELEMENTO DA SEQUENCIA FEBONACCI QUE O USUÁRIO SOLICITAR'''
from time import sleep
e = int(input('Quantos elementos da série Febonacci você quer exibir? '))
i = 4
a = 0
b = 1
c = a + b
print(a, end=' ')
sleep(0.2)
print(b, end=' ')
sleep(0.2)
print(c, end=' ')
sleep(0.2)
while i <= e:
a = b
b = c
c = a + b
print((c), end=' ')
sleep(0.2)
i += 1
| 20.545455 | 82 | 0.610619 |
bd30ed9b7f2e9132c4a409b2e0f00c098d1a1e8e | 2,300 | py | Python | aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20180510/DescribeTagResourcesRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20180510/DescribeTagResourcesRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20180510/DescribeTagResourcesRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcdn.endpoint import endpoint_data
class DescribeTagResourcesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cdn', '2018-05-10', 'DescribeTagResources')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Tags(self):
return self.get_query_params().get('Tag')
def set_Tags(self, Tags):
for depth1 in range(len(Tags)):
if Tags[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tags[depth1].get('Key'))
if Tags[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tags[depth1].get('Value'))
def get_ResourceIds(self):
return self.get_query_params().get('ResourceId')
def set_ResourceIds(self, ResourceIds):
for depth1 in range(len(ResourceIds)):
if ResourceIds[depth1] is not None:
self.add_query_param('ResourceId.' + str(depth1 + 1) , ResourceIds[depth1])
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_ResourceType(self):
return self.get_query_params().get('ResourceType')
def set_ResourceType(self,ResourceType):
self.add_query_param('ResourceType',ResourceType) | 37.096774 | 89 | 0.737391 |
eb09cc29aae600ced76ac9ebf7acc380c1f43c1f | 3,272 | py | Python | contrib/RemoteSensing/predict_demo.py | tianlanshidai/PaddleSeg | f506494b20b93de74b21237f70b76097755da786 | [
"Apache-2.0"
] | 1 | 2022-02-08T02:14:54.000Z | 2022-02-08T02:14:54.000Z | contrib/RemoteSensing/predict_demo.py | tianlanshidai/PaddleSeg | f506494b20b93de74b21237f70b76097755da786 | [
"Apache-2.0"
] | null | null | null | contrib/RemoteSensing/predict_demo.py | tianlanshidai/PaddleSeg | f506494b20b93de74b21237f70b76097755da786 | [
"Apache-2.0"
] | 2 | 2021-07-07T02:23:16.000Z | 2022-02-08T02:14:58.000Z | # coding: utf8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path as osp
import sys
import numpy as np
from PIL import Image as Image
import argparse
from models import load_model
def parse_args():
parser = argparse.ArgumentParser(description='RemoteSensing predict')
parser.add_argument(
'--single_img',
dest='single_img',
help='single image path to predict',
default=None,
type=str)
parser.add_argument(
'--data_dir',
dest='data_dir',
help='dataset directory',
default=None,
type=str)
parser.add_argument(
'--file_list',
dest='file_list',
help='file name of predict file list',
default=None,
type=str)
parser.add_argument(
'--load_model_dir',
dest='load_model_dir',
help='model load directory',
default=None,
type=str)
parser.add_argument(
'--save_img_dir',
dest='save_img_dir',
help='save directory name of predict results',
default='predict_results',
type=str)
if len(sys.argv) < 2:
parser.print_help()
sys.exit(1)
return parser.parse_args()
args = parse_args()
data_dir = args.data_dir
file_list = args.file_list
single_img = args.single_img
load_model_dir = args.load_model_dir
save_img_dir = args.save_img_dir
if not osp.exists(save_img_dir):
os.makedirs(save_img_dir)
# predict
model = load_model(load_model_dir)
color_map = [0, 0, 0, 0, 255, 0]
if single_img is not None:
pred = model.predict(single_img)
# 以伪彩色png图片保存预测结果
pred_name = osp.basename(single_img).rstrip('npy') + 'png'
pred_path = osp.join(save_img_dir, pred_name)
pred_mask = Image.fromarray(pred['label_map'].astype(np.uint8), mode='P')
pred_mask.putpalette(color_map)
pred_mask.save(pred_path)
elif (file_list is not None) and (data_dir is not None):
with open(osp.join(data_dir, file_list)) as f:
lines = f.readlines()
for line in lines:
img_path = line.split(' ')[0]
print('Predicting {}'.format(img_path))
img_path_ = osp.join(data_dir, img_path)
pred = model.predict(img_path_)
# 以伪彩色png图片保存预测结果
pred_name = osp.basename(img_path).rstrip('npy') + 'png'
pred_path = osp.join(save_img_dir, pred_name)
pred_mask = Image.fromarray(
pred['label_map'].astype(np.uint8), mode='P')
pred_mask.putpalette(color_map)
pred_mask.save(pred_path)
else:
raise Exception(
'You should either set the parameter single_img, or set the parameters data_dir, file_list.'
)
| 31.161905 | 100 | 0.659535 |
6484075cbd86178b17c7dcb20424eab0f939915b | 831 | py | Python | Scraping/Locators/Xpath-locator.py | koshchii/WebAppTesting-Selenium | 9cd8673fdd16e2ffe176ccf1c0df3ad49c19e46b | [
"MIT"
] | null | null | null | Scraping/Locators/Xpath-locator.py | koshchii/WebAppTesting-Selenium | 9cd8673fdd16e2ffe176ccf1c0df3ad49c19e46b | [
"MIT"
] | null | null | null | Scraping/Locators/Xpath-locator.py | koshchii/WebAppTesting-Selenium | 9cd8673fdd16e2ffe176ccf1c0df3ad49c19e46b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 1 13:35:09 2021
@author: alexk
Relative Xpath basic structure:
//tag[@attribute="value"]
"""
import time
from selenium import webdriver
#Initialize driver
driver = webdriver.Chrome()
#Open URL and maximize window
url = "http://automationpractice.com/index.php"
driver.get(url)
driver.maximize_window()
#Use relative Xpath
cart_Xpath = '//a[@title="View my shopping cart"]'
cart_element = driver.find_element_by_xpath(cart_Xpath)
cart_element.click()
time.sleep(2)
driver.close()
time.sleep(2)
#Use absolute Xpath
driver = webdriver.Chrome()
url = "http://automationpractice.com/index.php"
driver.get(url)
cart_abs_Xpath = '/html/body/div/div[1]/header/div[3]/div/div/div[3]/div/a'
cart_element = driver.find_element_by_xpath(cart_abs_Xpath)
cart_element.click()
time.sleep(2) | 21.868421 | 75 | 0.749699 |
28a0f4692e9f9bc29d42941b6274b5cba999dfbf | 20,571 | py | Python | selfdrive/car/hyundai/carstate.py | sky84ky/neokii_EN | 219526b585719ca5503c3a9accbd673ed63343f8 | [
"MIT"
] | 1 | 2020-12-26T07:03:55.000Z | 2020-12-26T07:03:55.000Z | selfdrive/car/hyundai/carstate.py | choDeaJang/neokii_KR | 18394262c09091a90ef6eed067eee2760cca3f75 | [
"MIT"
] | null | null | null | selfdrive/car/hyundai/carstate.py | choDeaJang/neokii_KR | 18394262c09091a90ef6eed067eee2760cca3f75 | [
"MIT"
] | 9 | 2021-08-15T13:11:02.000Z | 2021-09-19T09:43:57.000Z | from cereal import car
from selfdrive.car.hyundai.values import DBC, STEER_THRESHOLD, FEATURES, CAR, HYBRID_CAR, EV_CAR
from selfdrive.car.interfaces import CarStateBase
from opendbc.can.parser import CANParser
from opendbc.can.can_define import CANDefine
from selfdrive.config import Conversions as CV
from common.params import Params
GearShifter = car.CarState.GearShifter
class CarState(CarStateBase):
def __init__(self, CP):
super().__init__(CP)
can_define = CANDefine(DBC[CP.carFingerprint]["pt"])
if self.CP.carFingerprint in FEATURES["use_cluster_gears"]:
self.shifter_values = can_define.dv["CLU15"]["CF_Clu_Gear"]
elif self.CP.carFingerprint in FEATURES["use_tcu_gears"]:
self.shifter_values = can_define.dv["TCU12"]["CUR_GR"]
else: # preferred and elect gear methods use same definition
self.shifter_values = can_define.dv["LVR12"]["CF_Lvr_Gear"]
#Auto detection for setup
self.no_radar = CP.sccBus == -1
self.mdps_bus = CP.mdpsBus
self.sas_bus = CP.sasBus
self.scc_bus = CP.sccBus
self.has_scc13 = CP.hasScc13 or CP.carFingerprint in FEATURES["has_scc13"]
self.has_scc14 = CP.hasScc14 or CP.carFingerprint in FEATURES["has_scc14"]
self.leftBlinker = False
self.rightBlinker = False
self.lkas_button_on = True
self.cruise_main_button = 0
self.mdps_error_cnt = 0
self.cruise_unavail_cnt = 0
self.apply_steer = 0.
# scc smoother
self.acc_mode = False
self.cruise_gap = 1
self.brake_pressed = False
self.gas_pressed = False
self.standstill = False
self.cruiseState_enabled = False
self.cruiseState_speed = 0
self.use_cluster_speed = Params().get_bool('UseClusterSpeed')
self.long_control_enabled = Params().get_bool('LongControlEnabled')
def update(self, cp, cp2, cp_cam):
cp_mdps = cp2 if self.mdps_bus else cp
cp_sas = cp2 if self.sas_bus else cp
cp_scc = cp2 if self.scc_bus == 1 else cp_cam if self.scc_bus == 2 else cp
self.prev_cruise_buttons = self.cruise_buttons
self.prev_cruise_main_button = self.cruise_main_button
self.prev_left_blinker = self.leftBlinker
self.prev_right_blinker = self.rightBlinker
self.prev_lkas_button = self.lkas_button_on
ret = car.CarState.new_message()
ret.doorOpen = any([cp.vl["CGW1"]['CF_Gway_DrvDrSw'], cp.vl["CGW1"]['CF_Gway_AstDrSw'],
cp.vl["CGW2"]['CF_Gway_RLDrSw'], cp.vl["CGW2"]['CF_Gway_RRDrSw']])
ret.seatbeltUnlatched = cp.vl["CGW1"]['CF_Gway_DrvSeatBeltSw'] == 0
self.is_set_speed_in_mph = bool(cp.vl["CLU11"]["CF_Clu_SPEED_UNIT"])
self.speed_conv_to_ms = CV.MPH_TO_MS if self.is_set_speed_in_mph else CV.KPH_TO_MS
if not self.use_cluster_speed or self.long_control_enabled:
ret.wheelSpeeds.fl = cp.vl["WHL_SPD11"]['WHL_SPD_FL'] * CV.KPH_TO_MS
ret.wheelSpeeds.fr = cp.vl["WHL_SPD11"]['WHL_SPD_FR'] * CV.KPH_TO_MS
ret.wheelSpeeds.rl = cp.vl["WHL_SPD11"]['WHL_SPD_RL'] * CV.KPH_TO_MS
ret.wheelSpeeds.rr = cp.vl["WHL_SPD11"]['WHL_SPD_RR'] * CV.KPH_TO_MS
ret.vEgoRaw = (ret.wheelSpeeds.fl + ret.wheelSpeeds.fr + ret.wheelSpeeds.rl + ret.wheelSpeeds.rr) / 4.
else:
ret.vEgoRaw = cp.vl["CLU11"]["CF_Clu_Vanz"]
decimal = cp.vl["CLU11"]["CF_Clu_VanzDecimal"]
if 0. < decimal < 0.5:
ret.vEgoRaw += decimal
ret.vEgoRaw *= self.speed_conv_to_ms
ret.vEgo, ret.aEgo = self.update_speed_kf(ret.vEgoRaw)
ret.standstill = ret.vEgoRaw < 0.1
ret.steeringAngleDeg = cp_sas.vl["SAS11"]['SAS_Angle']
ret.steeringRateDeg = cp_sas.vl["SAS11"]['SAS_Speed']
ret.yawRate = cp.vl["ESP12"]['YAW_RATE']
ret.leftBlinker, ret.rightBlinker = self.update_blinker_from_lamp(50, cp.vl["CGW1"]['CF_Gway_TurnSigLh'],
cp.vl["CGW1"]['CF_Gway_TurnSigRh'])
ret.steeringTorque = cp_mdps.vl["MDPS12"]['CR_Mdps_StrColTq']
ret.steeringTorqueEps = cp_mdps.vl["MDPS12"]['CR_Mdps_OutTq']
ret.steeringPressed = abs(ret.steeringTorque) > STEER_THRESHOLD
if cp_mdps.vl["MDPS12"]['CF_Mdps_ToiUnavail'] != 0:
self.mdps_error_cnt += 1
else:
self.mdps_error_cnt = 0
ret.steerWarning = self.mdps_error_cnt > 100
if self.CP.enableAutoHold:
ret.autoHold = cp.vl["ESP11"]['AVH_STAT']
# cruise state
ret.cruiseState.enabled = (cp_scc.vl["SCC12"]['ACCMode'] != 0) if not self.no_radar else \
cp.vl["LVR12"]['CF_Lvr_CruiseSet'] != 0
ret.cruiseState.available = (cp_scc.vl["SCC11"]["MainMode_ACC"] != 0) if not self.no_radar else \
cp.vl['EMS16']['CRUISE_LAMP_M'] != 0
ret.cruiseState.standstill = cp_scc.vl["SCC11"]['SCCInfoDisplay'] == 4. if not self.no_radar else False
if ret.cruiseState.enabled:
ret.cruiseState.speed = cp_scc.vl["SCC11"]['VSetDis'] * self.speed_conv_to_ms if not self.no_radar else \
cp.vl["LVR12"]["CF_Lvr_CruiseSet"] * self.speed_conv_to_ms
else:
ret.cruiseState.speed = 0
self.cruise_main_button = cp.vl["CLU11"]["CF_Clu_CruiseSwMain"]
self.cruise_buttons = cp.vl["CLU11"]["CF_Clu_CruiseSwState"]
# TODO: Find brake pressure
ret.brake = 0
ret.brakePressed = cp.vl["TCS13"]['DriverBraking'] != 0
# TODO: Check this
ret.brakeLights = bool(cp.vl["TCS13"]['BrakeLight'] or ret.brakePressed)
ret.gasPressed = cp.vl["TCS13"]["DriverOverride"] == 1
if self.CP.carFingerprint in (HYBRID_CAR | EV_CAR):
if self.CP.carFingerprint in HYBRID_CAR:
ret.gas = cp.vl["E_EMS11"]["CR_Vcu_AccPedDep_Pos"] / 254.
else:
ret.gas = cp.vl["E_EMS11"]["Accel_Pedal_Pos"] / 254.
if self.CP.hasEms:
ret.gas = cp.vl["EMS12"]['PV_AV_CAN'] / 100.
ret.gasPressed = bool(cp.vl["EMS16"]["CF_Ems_AclAct"])
# TODO: refactor gear parsing in function
# Gear Selection via Cluster - For those Kia/Hyundai which are not fully discovered, we can use the Cluster Indicator for Gear Selection,
# as this seems to be standard over all cars, but is not the preferred method.
if self.CP.carFingerprint in FEATURES["use_cluster_gears"]:
gear = cp.vl["CLU15"]["CF_Clu_Gear"]
elif self.CP.carFingerprint in FEATURES["use_tcu_gears"]:
gear = cp.vl["TCU12"]["CUR_GR"]
elif self.CP.carFingerprint in FEATURES["use_elect_gears"]:
gear = cp.vl["ELECT_GEAR"]["Elect_Gear_Shifter"]
else:
gear = cp.vl["LVR12"]["CF_Lvr_Gear"]
ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(gear))
if self.CP.carFingerprint in FEATURES["use_fca"]:
ret.stockAeb = cp.vl["FCA11"]["FCA_CmdAct"] != 0
ret.stockFcw = cp.vl["FCA11"]["CF_VSM_Warn"] == 2
else:
ret.stockAeb = cp.vl["SCC12"]["AEB_CmdAct"] != 0
ret.stockFcw = cp.vl["SCC12"]["CF_VSM_Warn"] == 2
# Blind Spot Detection and Lane Change Assist signals
if self.CP.enableBsm:
ret.leftBlindspot = cp.vl["LCA11"]["CF_Lca_IndLeft"] != 0
ret.rightBlindspot = cp.vl["LCA11"]["CF_Lca_IndRight"] != 0
else:
ret.leftBlindspot = False
ret.rightBlindspot = False
# save the entire LKAS11, CLU11, SCC12 and MDPS12
self.lkas11 = cp_cam.vl["LKAS11"]
self.clu11 = cp.vl["CLU11"]
self.scc11 = cp_scc.vl["SCC11"]
self.scc12 = cp_scc.vl["SCC12"]
self.mdps12 = cp_mdps.vl["MDPS12"]
self.park_brake = cp.vl["CGW1"]['CF_Gway_ParkBrakeSw']
self.steer_state = cp_mdps.vl["MDPS12"]['CF_Mdps_ToiActive'] #0 NOT ACTIVE, 1 ACTIVE
self.cruise_unavail_cnt += 1 if cp.vl["TCS13"]['CF_VSM_Avail'] != 1 and cp.vl["TCS13"]['ACCEnable'] != 0 else -self.cruise_unavail_cnt
self.cruise_unavail = self.cruise_unavail_cnt > 100
self.lead_distance = cp_scc.vl["SCC11"]['ACC_ObjDist'] if not self.no_radar else 0
if self.has_scc13:
self.scc13 = cp_scc.vl["SCC13"]
if self.has_scc14:
self.scc14 = cp_scc.vl["SCC14"]
self.lkas_error = cp_cam.vl["LKAS11"]["CF_Lkas_LdwsSysState"] == 7
if not self.lkas_error and self.car_fingerprint not in [CAR.SONATA,CAR.PALISADE,
CAR.SONATA_HEV, CAR.SONATA21_HEV, CAR.SANTA_FE, CAR.KONA_EV, CAR.NIRO_EV, CAR.KONA]:
self.lkas_button_on = bool(cp_cam.vl["LKAS11"]["CF_Lkas_LdwsSysState"])
# scc smoother
driver_override = cp.vl["TCS13"]["DriverOverride"]
self.acc_mode = cp_scc.vl["SCC12"]['ACCMode'] != 0
self.cruise_gap = cp_scc.vl["SCC11"]['TauGapSet'] if not self.no_radar else 1
self.gas_pressed = ret.gasPressed or driver_override == 1
self.brake_pressed = ret.brakePressed or driver_override == 2
self.standstill = ret.standstill or ret.cruiseState.standstill
self.cruiseState_enabled = ret.cruiseState.enabled
self.cruiseState_speed = ret.cruiseState.speed
ret.cruiseGap = self.cruise_gap
return ret
@staticmethod
def get_can_parser(CP):
signals = [
# sig_name, sig_address, default
("WHL_SPD_FL", "WHL_SPD11", 0),
("WHL_SPD_FR", "WHL_SPD11", 0),
("WHL_SPD_RL", "WHL_SPD11", 0),
("WHL_SPD_RR", "WHL_SPD11", 0),
("YAW_RATE", "ESP12", 0),
("CF_Gway_DrvSeatBeltInd", "CGW4", 1),
("CF_Gway_DrvSeatBeltSw", "CGW1", 0),
("CF_Gway_DrvDrSw", "CGW1", 0), # Driver Door
("CF_Gway_AstDrSw", "CGW1", 0), # Passenger door
("CF_Gway_RLDrSw", "CGW2", 0), # Rear reft door
("CF_Gway_RRDrSw", "CGW2", 0), # Rear right door
("CF_Gway_TurnSigLh", "CGW1", 0),
("CF_Gway_TurnSigRh", "CGW1", 0),
("CF_Gway_ParkBrakeSw", "CGW1", 0), # Parking Brake
("CYL_PRES", "ESP12", 0),
("CF_Clu_CruiseSwState", "CLU11", 0),
("CF_Clu_CruiseSwMain", "CLU11", 0),
("CF_Clu_SldMainSW", "CLU11", 0),
("CF_Clu_ParityBit1", "CLU11", 0),
("CF_Clu_VanzDecimal" , "CLU11", 0),
("CF_Clu_Vanz", "CLU11", 0),
("CF_Clu_SPEED_UNIT", "CLU11", 0),
("CF_Clu_DetentOut", "CLU11", 0),
("CF_Clu_RheostatLevel", "CLU11", 0),
("CF_Clu_CluInfo", "CLU11", 0),
("CF_Clu_AmpInfo", "CLU11", 0),
("CF_Clu_AliveCnt1", "CLU11", 0),
("ACCEnable", "TCS13", 0),
("BrakeLight", "TCS13", 0),
("DriverBraking", "TCS13", 0),
("DriverOverride", "TCS13", 0), # scc smoother
("CF_VSM_Avail", "TCS13", 0),
("ESC_Off_Step", "TCS15", 0),
#("CF_Lvr_GearInf", "LVR11", 0), # Transmission Gear (0 = N or P, 1-8 = Fwd, 14 = Rev)
("MainMode_ACC", "SCC11", 1),
("SCCInfoDisplay", "SCC11", 0),
("AliveCounterACC", "SCC11", 0),
("VSetDis", "SCC11", 30),
("ObjValid", "SCC11", 0),
("DriverAlertDisplay", "SCC11", 0),
("TauGapSet", "SCC11", 4),
("ACC_ObjStatus", "SCC11", 0),
("ACC_ObjLatPos", "SCC11", 0),
("ACC_ObjDist", "SCC11", 150), #TK211X value is 204.6
("ACC_ObjRelSpd", "SCC11", 0),
("Navi_SCC_Curve_Status", "SCC11", 0),
("Navi_SCC_Curve_Act", "SCC11", 0),
("Navi_SCC_Camera_Act", "SCC11", 0),
("Navi_SCC_Camera_Status", "SCC11", 2),
("ACCMode", "SCC12", 0),
("CF_VSM_Prefill", "SCC12", 0),
("CF_VSM_DecCmdAct", "SCC12", 0),
("CF_VSM_HBACmd", "SCC12", 0),
("CF_VSM_Warn", "SCC12", 0),
("CF_VSM_Stat", "SCC12", 0),
("CF_VSM_BeltCmd", "SCC12", 0),
("ACCFailInfo", "SCC12", 0),
("StopReq", "SCC12", 0),
("CR_VSM_DecCmd", "SCC12", 0),
("aReqRaw", "SCC12", 0), #aReqMax
("TakeOverReq", "SCC12", 0),
("PreFill", "SCC12", 0),
("aReqValue", "SCC12", 0), #aReqMin
("CF_VSM_ConfMode", "SCC12", 1),
("AEB_Failinfo", "SCC12", 0),
("AEB_Status", "SCC12", 2),
("AEB_CmdAct", "SCC12", 0),
("AEB_StopReq", "SCC12", 0),
("CR_VSM_Alive", "SCC12", 0),
("CR_VSM_ChkSum", "SCC12", 0),
("SCCDrvModeRValue", "SCC13", 2),
("SCC_Equip", "SCC13", 1),
("AebDrvSetStatus", "SCC13", 0),
("JerkUpperLimit", "SCC14", 0),
("JerkLowerLimit", "SCC14", 0),
("SCCMode2", "SCC14", 0),
("ComfortBandUpper", "SCC14", 0),
("ComfortBandLower", "SCC14", 0),
]
checks = [
# address, frequency
("TCS13", 50),
("TCS15", 10),
("CLU11", 50),
("ESP12", 100),
("CGW1", 10),
("CGW2", 5),
("CGW4", 5),
("WHL_SPD11", 50),
]
if CP.sccBus == 0 and CP.pcmCruise:
checks += [
("SCC11", 50),
("SCC12", 50),
]
if CP.mdpsBus == 0:
signals += [
("CR_Mdps_StrColTq", "MDPS12", 0),
("CF_Mdps_Def", "MDPS12", 0),
("CF_Mdps_ToiActive", "MDPS12", 0),
("CF_Mdps_ToiUnavail", "MDPS12", 0),
("CF_Mdps_ToiFlt", "MDPS12", 0),
("CF_Mdps_MsgCount2", "MDPS12", 0),
("CF_Mdps_Chksum2", "MDPS12", 0),
("CF_Mdps_SErr", "MDPS12", 0),
("CR_Mdps_StrTq", "MDPS12", 0),
("CF_Mdps_FailStat", "MDPS12", 0),
("CR_Mdps_OutTq", "MDPS12", 0)
]
checks += [
("MDPS12", 50)
]
if CP.sasBus == 0:
signals += [
("SAS_Angle", "SAS11", 0),
("SAS_Speed", "SAS11", 0),
]
checks += [
("SAS11", 100)
]
if CP.sccBus == -1:
signals += [
("CRUISE_LAMP_M", "EMS16", 0),
("CF_Lvr_CruiseSet", "LVR12", 0),
]
if CP.carFingerprint in FEATURES["use_cluster_gears"]:
signals += [
("CF_Clu_Gear", "CLU15", 0),
]
elif CP.carFingerprint in FEATURES["use_tcu_gears"]:
signals += [
("CUR_GR", "TCU12",0),
]
elif CP.carFingerprint in FEATURES["use_elect_gears"]:
signals += [
("Elect_Gear_Shifter", "ELECT_GEAR", 0),
]
else:
signals += [
("CF_Lvr_Gear","LVR12",0),
]
if CP.carFingerprint in (HYBRID_CAR | EV_CAR):
if CP.carFingerprint in HYBRID_CAR:
signals += [
("CR_Vcu_AccPedDep_Pos", "E_EMS11", 0)
]
else:
signals += [
("Accel_Pedal_Pos", "E_EMS11", 0),
]
checks += [
("E_EMS11", 50),
]
else:
signals += [
("PV_AV_CAN", "EMS12", 0),
("CF_Ems_AclAct", "EMS16", 0),
]
checks += [
("EMS12", 100),
("EMS16", 100),
]
if CP.carFingerprint in FEATURES["use_fca"]:
signals += [
("FCA_CmdAct", "FCA11", 0),
("CF_VSM_Warn", "FCA11", 0),
]
if not CP.openpilotLongitudinalControl:
checks += [("FCA11", 50)]
if CP.carFingerprint in [CAR.SANTA_FE]:
checks.remove(("TCS13", 50))
if CP.enableBsm:
signals += [
("CF_Lca_IndLeft", "LCA11", 0),
("CF_Lca_IndRight", "LCA11", 0),
]
checks += [("LCA11", 50)]
if CP.enableAutoHold:
signals += [
("AVH_STAT", "ESP11", 0),
("LDM_STAT", "ESP11", 0),
]
checks += [("ESP11", 50)]
return CANParser(DBC[CP.carFingerprint]['pt'], signals, checks, 0, enforce_checks=False)
@staticmethod
def get_can2_parser(CP):
signals = []
checks = []
if CP.mdpsBus == 1:
signals += [
("CR_Mdps_StrColTq", "MDPS12", 0),
("CF_Mdps_Def", "MDPS12", 0),
("CF_Mdps_ToiActive", "MDPS12", 0),
("CF_Mdps_ToiUnavail", "MDPS12", 0),
("CF_Mdps_ToiFlt", "MDPS12", 0),
("CF_Mdps_MsgCount2", "MDPS12", 0),
("CF_Mdps_Chksum2", "MDPS12", 0),
("CF_Mdps_SErr", "MDPS12", 0),
("CR_Mdps_StrTq", "MDPS12", 0),
("CF_Mdps_FailStat", "MDPS12", 0),
("CR_Mdps_OutTq", "MDPS12", 0)
]
checks += [
("MDPS12", 50)
]
if CP.sasBus == 1:
signals += [
("SAS_Angle", "SAS11", 0),
("SAS_Speed", "SAS11", 0),
]
checks += [
("SAS11", 100)
]
if CP.sccBus == 1:
signals += [
("MainMode_ACC", "SCC11", 1),
("SCCInfoDisplay", "SCC11", 0),
("AliveCounterACC", "SCC11", 0),
("VSetDis", "SCC11", 30),
("ObjValid", "SCC11", 0),
("DriverAlertDisplay", "SCC11", 0),
("TauGapSet", "SCC11", 4),
("ACC_ObjStatus", "SCC11", 0),
("ACC_ObjLatPos", "SCC11", 0),
("ACC_ObjDist", "SCC11", 150.),
("ACC_ObjRelSpd", "SCC11", 0),
("Navi_SCC_Curve_Status", "SCC11", 0),
("Navi_SCC_Curve_Act", "SCC11", 0),
("Navi_SCC_Camera_Act", "SCC11", 0),
("Navi_SCC_Camera_Status", "SCC11", 2),
("ACCMode", "SCC12", 0),
("CF_VSM_Prefill", "SCC12", 0),
("CF_VSM_DecCmdAct", "SCC12", 0),
("CF_VSM_HBACmd", "SCC12", 0),
("CF_VSM_Warn", "SCC12", 0),
("CF_VSM_Stat", "SCC12", 0),
("CF_VSM_BeltCmd", "SCC12", 0),
("ACCFailInfo", "SCC12", 0),
("StopReq", "SCC12", 0),
("CR_VSM_DecCmd", "SCC12", 0),
("aReqRaw", "SCC12", 0), #aReqMax
("TakeOverReq", "SCC12", 0),
("PreFill", "SCC12", 0),
("aReqValue", "SCC12", 0), #aReqMin
("CF_VSM_ConfMode", "SCC12", 1),
("AEB_Failinfo", "SCC12", 0),
("AEB_Status", "SCC12", 2),
("AEB_CmdAct", "SCC12", 0),
("AEB_StopReq", "SCC12", 0),
("CR_VSM_Alive", "SCC12", 0),
("CR_VSM_ChkSum", "SCC12", 0),
("SCCDrvModeRValue", "SCC13", 2),
("SCC_Equip", "SCC13", 1),
("AebDrvSetStatus", "SCC13", 0),
("JerkUpperLimit", "SCC14", 0),
("JerkLowerLimit", "SCC14", 0),
("SCCMode2", "SCC14", 0),
("ComfortBandUpper", "SCC14", 0),
("ComfortBandLower", "SCC14", 0),
]
checks += [
("SCC11", 50),
("SCC12", 50),
]
return CANParser(DBC[CP.carFingerprint]['pt'], signals, checks, 1, enforce_checks=False)
@staticmethod
def get_cam_can_parser(CP):
signals = [
# sig_name, sig_address, default
("CF_Lkas_LdwsActivemode", "LKAS11", 0),
("CF_Lkas_LdwsSysState", "LKAS11", 0),
("CF_Lkas_SysWarning", "LKAS11", 0),
("CF_Lkas_LdwsLHWarning", "LKAS11", 0),
("CF_Lkas_LdwsRHWarning", "LKAS11", 0),
("CF_Lkas_HbaLamp", "LKAS11", 0),
("CF_Lkas_FcwBasReq", "LKAS11", 0),
("CF_Lkas_ToiFlt", "LKAS11", 0),
("CF_Lkas_HbaSysState", "LKAS11", 0),
("CF_Lkas_FcwOpt", "LKAS11", 0),
("CF_Lkas_HbaOpt", "LKAS11", 0),
("CF_Lkas_FcwSysState", "LKAS11", 0),
("CF_Lkas_FcwCollisionWarning", "LKAS11", 0),
("CF_Lkas_MsgCount", "LKAS11", 0),
("CF_Lkas_FusionState", "LKAS11", 0),
("CF_Lkas_FcwOpt_USM", "LKAS11", 0),
("CF_Lkas_LdwsOpt_USM", "LKAS11", 0)
]
checks = [
("LKAS11", 100)
]
if CP.sccBus == 2:
signals += [
("MainMode_ACC", "SCC11", 1),
("SCCInfoDisplay", "SCC11", 0),
("AliveCounterACC", "SCC11", 0),
("VSetDis", "SCC11", 30),
("ObjValid", "SCC11", 0),
("DriverAlertDisplay", "SCC11", 0),
("TauGapSet", "SCC11", 4),
("ACC_ObjStatus", "SCC11", 0),
("ACC_ObjLatPos", "SCC11", 0),
("ACC_ObjDist", "SCC11", 150.),
("ACC_ObjRelSpd", "SCC11", 0),
("Navi_SCC_Curve_Status", "SCC11", 0),
("Navi_SCC_Curve_Act", "SCC11", 0),
("Navi_SCC_Camera_Act", "SCC11", 0),
("Navi_SCC_Camera_Status", "SCC11", 2),
("ACCMode", "SCC12", 0),
("CF_VSM_Prefill", "SCC12", 0),
("CF_VSM_DecCmdAct", "SCC12", 0),
("CF_VSM_HBACmd", "SCC12", 0),
("CF_VSM_Warn", "SCC12", 0),
("CF_VSM_Stat", "SCC12", 0),
("CF_VSM_BeltCmd", "SCC12", 0),
("ACCFailInfo", "SCC12", 0),
("StopReq", "SCC12", 0),
("CR_VSM_DecCmd", "SCC12", 0),
("aReqRaw", "SCC12", 0), #aReqMax
("TakeOverReq", "SCC12", 0),
("PreFill", "SCC12", 0),
("aReqValue", "SCC12", 0), #aReqMin
("CF_VSM_ConfMode", "SCC12", 1),
("AEB_Failinfo", "SCC12", 0),
("AEB_Status", "SCC12", 2),
("AEB_CmdAct", "SCC12", 0),
("AEB_StopReq", "SCC12", 0),
("CR_VSM_Alive", "SCC12", 0),
("CR_VSM_ChkSum", "SCC12", 0),
("SCCDrvModeRValue", "SCC13", 2),
("SCC_Equip", "SCC13", 1),
("AebDrvSetStatus", "SCC13", 0),
("JerkUpperLimit", "SCC14", 0),
("JerkLowerLimit", "SCC14", 0),
("SCCMode2", "SCC14", 0),
("ComfortBandUpper", "SCC14", 0),
("ComfortBandLower", "SCC14", 0),
]
checks += [
("SCC11", 50),
("SCC12", 50),
]
return CANParser(DBC[CP.carFingerprint]['pt'], signals, checks, 2, enforce_checks=False)
| 34.631313 | 141 | 0.576588 |
580517a0bd6b87cfac9286c860e745c7b8446167 | 6,411 | py | Python | dataio.py | mfs6174/Deep6174 | 92e2ceb48134e0cf003f130aef8d838a7a16c27d | [
"Apache-2.0"
] | null | null | null | dataio.py | mfs6174/Deep6174 | 92e2ceb48134e0cf003f130aef8d838a7a16c27d | [
"Apache-2.0"
] | null | null | null | dataio.py | mfs6174/Deep6174 | 92e2ceb48134e0cf003f130aef8d838a7a16c27d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# File: dataio.py
# Date: Thu Sep 18 10:23:38 2014 -0700
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
# Author: Xi-Jin Zhang <zhangxijin91@gmail.com>
import gzip
import cPickle as pickle
import operator
#from IPython.core.debugger import Tracer
from itertools import izip, count
import glob
import numpy as np
import os, sys
import scipy.io as sio
import cv2
import theano as tn
from lib.utils import memorized
def _read_data_fallback(dataset):
def read(name):
pat = '{0}/{1}-*.pkl.gz'.format(dataset, name)
all_imgs = []
all_labels = []
for f in sorted(glob.glob(pat)):
fin = gzip.open(f, 'rb')
imgs, labels = pickle.load(fin)
if not len(all_imgs):
all_imgs = np.vstack([imgs])
all_labels = labels
else:
all_imgs = np.vstack([all_imgs, imgs])
all_labels = np.concatenate((all_labels, labels))
fin.close()
return (all_imgs, all_labels)
return (read('train'), read('valid'), read('test'))
@memorized
def read_data(dataset):
""" return (train, valid, test)"""
print 'Loading data from {0} ...'.format(dataset)
if os.path.isfile(dataset):
f = gzip.open(dataset, 'rb')
train, valid, test = pickle.load(f)
f.close()
return (train, valid, test)
if os.path.isdir(dataset):
return _read_data_fallback(dataset)
print 'Data loaded.'
assert False, "Invalid Dataset Filename"
def _save_data_fallback(data, basename):
dirname = basename + ".pkl.gz"
try:
os.mkdir(dirname)
except:
pass
def save(dataset, name):
size = reduce(operator.mul, dataset[0].shape)
nslice = np.ceil(size / (2.5 * (10 ** 8)))
print nslice
imgs = np.array_split(dataset[0], nslice)
labels = np.array_split(dataset[1], nslice)
for idx, img_slice, label_slice in izip(count(), imgs, labels):
to_save = (img_slice, label_slice)
fname = "{0}-{1}.pkl.gz".format(name, idx)
fout = gzip.open(os.path.join(dirname, fname), 'wb')
pickle.dump(to_save, fout, -1)
fout.close()
for idx, name in enumerate(['train', 'valid', 'test']):
dataset = data[idx]
save(dataset, name)
def save_data(data, basename):
""" param data is (train, valid, test)
basename doesn't contain .pkl.gz suffix
"""
print 'Writing data to {0}'.format(basename)
output = basename + '.pkl.gz'
assert not os.path.exists(output), "Path exists! " + str(output)
try:
# first try pickle
fout = gzip.open(output, 'wb')
pickle.dump(data, fout, -1)
fout.close()
except:
print "Pickle failed ! Split the data!"
os.remove(output)
_save_data_fallback(data, basename)
def get_dataset_imgsize(dataset, transform=True):
train = read_data(dataset)[0]
shape = train[0][0].shape
if not transform:
return shape
if len(shape) == 1:
size = int(np.sqrt(shape[0]))
return (size, size)
else:
return shape
def sample_dataset(imgs, labels, cnt):
""" sample `cnt' images from the dataset (imgs, labels)"""
assert cnt < len(imgs)
assert len(imgs) == len(labels)
idxs = random.sample(range(len(imgs)), cnt)
imgs = imgs[idxs]
labels = labels[idxs]
return (imgs, labels)
@memorized
def read_raw_image_label(ipath,image,label = None,multi = 0):
""" return (image, label) (not flattened)"""
#print 'Loading image and label from {0} ...'.format(image)
if label is None:
label=ipath+"label/"+image
image = ipath+image
if os.path.isfile(image) and os.path.isfile(label):
im = cv2.imread(image).astype(tn.config.floatX)/255.0
assert im is not None, "invalid image"
lb = cv2.imread(label,multi)
assert lb is not None, "invalid label"
if (len(im.shape)==2):
newim = im.reshape((1,im.shape[0],im.shape[1]))
elif len(im.shape)==3:
newim = np.ndarray((im.shape[2],im.shape[0],im.shape[1]))
for i in range(0,im.shape[2]):newim[i,:,:]=im[:,:,i]
else:
assert False, "invalid image shape"
if (len(lb.shape)==2):
newlb = lb
elif len(lb.shape)==3:
newlb = np.ndarray((lb.shape[2],lb.shape[0],lb.shape[1]))
for i in range(0,lb.shape[2]):newlb[i,:,:]=lb[:,:,i]
else:
assert False, "invalid label shape"
#print 'Data loaded.'
return (newim,newlb)
assert False, "Invalid Dataset Filename"
@memorized
def read_raw_image_only(image):
if os.path.isfile(image):
im = cv2.imread(image).astype(tn.config.floatX)/255.0
assert im is not None, "invalid image"
if (len(im.shape)==2):
newim = im.reshape((1,im.shape[0],im.shape[1]))
elif len(im.shape)==3:
newim = np.ndarray((im.shape[2],im.shape[0],im.shape[1]))
for i in range(0,im.shape[2]):newim[i,:,:]=im[:,:,i]
else:
assert False, "invalid image shape"
return newim
assert False, "Invalid image Filename"
image_type = [".tif",".jpg",".png",".bmp",".pgm"]
def list_images(dir):
files = os.listdir(dir)
images = []
for f in files:
name,ext=os.path.splitext(f)
if ext in image_type:
images.append(f)
return images
def get_image_list(dir):
ilist = [list_images(dir[i]) for i in range(3)]
return ilist
@memorized
def read_image_label(dataset):
""" return (image, label) (not flattened)"""
#print 'Loading data from {0} ...'.format(dataset)
if os.path.isfile(dataset):
f = gzip.open(dataset, 'rb')
im,lb = pickle.load(f)
f.close()
return (im,lb)
#if os.path.isdir(dataset):
# return _read_data_fallback(dataset)
#print 'Data loaded.'
assert False, "Invalid Dataset Filename"
if __name__ == '__main__':
if len(sys.argv) == 2:
dataset = sys.argv[1]
else:
dataset = './mnist.pkl.gz'
t, v, ts = read_data(dataset)
print len(t[0]), len(v[0]), len(ts[0])
#print "Saving..."
#_save_data_fallback((t, v, ts), 'testdir')
#tt, vv, ttss = _read_data_fallback('testdir')
#print tt[1] == t[1]
| 30.822115 | 71 | 0.5854 |
0cd267b2f165ee7d5e689be3ef728a8f778d1a28 | 1,769 | py | Python | src/recipes/index.py | brysontyrrell/MacAdminsChickenWings | 7263e431c1f28006935d7dc6e851642c6926b5cb | [
"MIT"
] | null | null | null | src/recipes/index.py | brysontyrrell/MacAdminsChickenWings | 7263e431c1f28006935d7dc6e851642c6926b5cb | [
"MIT"
] | null | null | null | src/recipes/index.py | brysontyrrell/MacAdminsChickenWings | 7263e431c1f28006935d7dc6e851642c6926b5cb | [
"MIT"
] | null | null | null | import decimal
import json
import os
import random
import boto3
from boto3.dynamodb.conditions import Key
dynamodb_table = boto3.resource("dynamodb").Table(os.getenv("TABLE"))
def lambda_handler(event, context):
params = event.get("queryStringParameters", {})
response = {"method": random_component_by_key("cooking")}
if "no_sauce" not in params and response["method"]["use_sauce"]:
response["sauce"] = random_component_by_key("sauce")
if "no_dip" not in params:
response["dip"] = random_component_by_key("dip")
return json.dumps(response, cls=DecimalEncoder)
def random_component_by_key(component):
results = dynamodb_table.query(
KeyConditionExpression=Key("pk").eq(f"C#{component}"), ProjectionExpression="sk"
)
random_selection = random.choice(results["Items"])
return dynamodb_table.get_item(
Key={"pk": f"C#{component}", "sk": random_selection["sk"]},
ProjectionExpression="#d",
ExpressionAttributeNames={"#d": "data"},
)["Item"]["data"]
def random_component_by_value(component):
char = "".join(random.sample("abcdef1234567890", 8))
def _result(sk):
return dynamodb_table.query(
KeyConditionExpression=Key("pk").eq(f"C#{component}") & sk,
ProjectionExpression="#d",
ExpressionAttributeNames={"#d": "data"},
Limit=1,
)["Items"][0]["data"]
try:
return _result(Key("sk").gt(f"ID#{char}"))
except:
return _result(Key("sk").lte(f"ID#{char}"))
class DecimalEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, decimal.Decimal):
return int(obj) if obj % 1 == 0 else float(obj)
return json.JSONEncoder.default(self, obj)
| 29 | 88 | 0.64952 |
c4addb15f53c8f5bd55d72a78eaa3f71e2b57b08 | 5,182 | py | Python | rmnd_lca/export.py | xiaoshir/rmnd-lca | 9831d59361ec7afbb6ff69e318eb225fd7ae7aaf | [
"BSD-3-Clause"
] | 2 | 2021-02-02T13:17:56.000Z | 2021-02-03T13:20:49.000Z | rmnd_lca/export.py | xiaoshir/rmnd-lca | 9831d59361ec7afbb6ff69e318eb225fd7ae7aaf | [
"BSD-3-Clause"
] | null | null | null | rmnd_lca/export.py | xiaoshir/rmnd-lca | 9831d59361ec7afbb6ff69e318eb225fd7ae7aaf | [
"BSD-3-Clause"
] | null | null | null | import os
from . import DATA_DIR
import csv
FILEPATH_BIOSPHERE_FLOWS = (DATA_DIR / "flows_biosphere.csv")
class Export:
"""
Class that exports the transformed data into matrices:
* A matrix: contains products exchanges
* B matrix: contains exchanges activities and the biosphere
The A and B matrices are exported as csv files in a sparse representation (only non-zero values are listed), like so:
- index row, index column, value of exchange
Dictionaries to map row numbers to activities and products names are also exported.
:ivar db: transformed database
:vartype db: dict
:ivar scenario: name of a Remind scenario
:vartype scenario: str
:ivar year: year of a Remind scenario
:vartype year: int
"""
def __init__(self, db, scenario, year):
self.db = db
self.scenario = scenario
self.year = year
def export_db_to_matrices(self):
index_A = self.create_index_of_A_matrix()
filepath = DATA_DIR / "matrices"
if not os.path.exists(filepath):
os.makedirs(filepath)
# Export A matrix
with open(filepath / 'A_matrix.csv','w') as f:
writer=csv.writer(f, delimiter=';',lineterminator='\n',)
writer.writerow(['index of activity', 'index of product', 'value'])
for ds in self.db:
for exc in ds['exchanges']:
if exc['type'] == 'production':
row = [index_A[(ds['name'], ds['reference product'], ds['unit'], ds['location'])],
index_A[(exc['name'], exc['product'], exc['unit'], exc['location'])],
exc['amount']]
writer.writerow(row)
if exc['type'] == 'technosphere':
row = [index_A[(ds['name'], ds['reference product'], ds['unit'], ds['location'])],
index_A[(exc['name'], exc['product'], exc['unit'], exc['location'])],
exc['amount']*-1]
writer.writerow(row)
# Export A index
csv_columns = ['(Activity name, Product, unit, location)', 'index']
with open(filepath / 'A_matrix_index.csv','w') as f:
writer = csv.writer(f, delimiter=';',lineterminator='\n',)
for d in index_A:
writer.writerow([d, index_A[d]])
index_B = self.create_index_of_B_matrix()
rev_index_B = self.create_rev_index_of_B_matrix()
# Export B matrix
with open(filepath / 'B_matrix.csv','w') as f:
writer=csv.writer(f, delimiter=';',lineterminator='\n',)
writer.writerow(['index of activity', 'index of biosphere flow', 'value'])
for ds in self.db:
for exc in ds['exchanges']:
if exc['type'] == 'biosphere':
try:
row = [
index_A[(ds['name'], ds['reference product'], ds['unit'], ds['location'])],
index_B[rev_index_B[exc['input'][1]]],
exc['amount'] * -1
]
except KeyError:
print(exc)
writer.writerow(row)
# Export B index
csv_columns = ['(Flow name, Flow main category, Flow sub-category, unit)', 'index']
with open(filepath / 'B_matrix_index.csv','w') as f:
writer = csv.writer(f, delimiter=';',lineterminator='\n',)
for d in index_B:
writer.writerow([d, index_B[d]])
print("Matrices saved in {}.".format(filepath))
def create_index_of_A_matrix(self):
"""
Create a dictionary with row/column indices of the A matrix as key and a tuple (activity name, reference product,
unit, location) as value.
:return: a dictionary to map indices to activities
:rtype: dict
"""
return {(self.db[i]['name'],
self.db[i]['reference product'],
self.db[i]['unit'],
self.db[i]['location'],):i
for i in range(0,len(self.db))}
def create_index_of_B_matrix(self):
if not FILEPATH_BIOSPHERE_FLOWS.is_file():
raise FileNotFoundError(
"The dictionary of biosphere flows could not be found."
)
csv_dict = {}
with open(FILEPATH_BIOSPHERE_FLOWS) as f:
input_dict = csv.reader(f, delimiter=";")
i=0
for row in input_dict:
csv_dict[row[1]] = i
i+=1
return csv_dict
def create_rev_index_of_B_matrix(self):
if not FILEPATH_BIOSPHERE_FLOWS.is_file():
raise FileNotFoundError(
"The dictionary of biosphere flows could not be found."
)
csv_dict = {}
with open(FILEPATH_BIOSPHERE_FLOWS) as f:
input_dict = csv.reader(f, delimiter=";")
for row in input_dict:
csv_dict[row[0]] = row[1]
return csv_dict
| 38.671642 | 121 | 0.533771 |
e3bf49c21133fa2b1adc31a5f5ab0c39ddad9eb8 | 1,446 | py | Python | xlsxwriter/test/comparison/test_chart_axis20.py | hugovk/XlsxWriter | e97cc66637d9895480ee32cfb5e561d652d3787b | [
"BSD-2-Clause"
] | null | null | null | xlsxwriter/test/comparison/test_chart_axis20.py | hugovk/XlsxWriter | e97cc66637d9895480ee32cfb5e561d652d3787b | [
"BSD-2-Clause"
] | null | null | null | xlsxwriter/test/comparison/test_chart_axis20.py | hugovk/XlsxWriter | e97cc66637d9895480ee32cfb5e561d652d3787b | [
"BSD-2-Clause"
] | null | null | null | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_axis20.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [43572224, 43812352]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_x_axis({'label_position': 'next_to'})
chart.set_y_axis({'label_position': 'none'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| 25.821429 | 79 | 0.570539 |
0ece52f4b017eea88f1d7e5090852bb153576624 | 2,332 | py | Python | python/leetcode/easy/ex0053.py | vilisimo/ads | cba2b04db6fd2755e32d0e3f2e4480fd808155f5 | [
"MIT"
] | null | null | null | python/leetcode/easy/ex0053.py | vilisimo/ads | cba2b04db6fd2755e32d0e3f2e4480fd808155f5 | [
"MIT"
] | null | null | null | python/leetcode/easy/ex0053.py | vilisimo/ads | cba2b04db6fd2755e32d0e3f2e4480fd808155f5 | [
"MIT"
] | null | null | null | # Given an integer array nums, find the contiguous subarray (containing at least one number)
# which has the largest sum and return its sum.
# A subarray is a contiguous part of an array.
# Example 1:
# Input: nums = [-2,1,-3,4,-1,2,1,-5,4]
# Output: 6
# Explanation: [4,-1,2,1] has the largest sum = 6.
# Example 2:
# Input: nums = [1]
# Output: 1
# Example 3:
# Input: nums = [5,4,-1,7,8]
# Output: 23
# Constraints
# 1 <= nums.length <= 10^5
# -10^4 <= nums[i] <= 10^4
# Follow up: If you have figured out the O(n) solution, try coding another solution
# using the divide and conquer approach, which is more subtle.
from typing import List
class O2nSolution:
def maxSubArray(self, nums: List[int]) -> int:
for i in range(1, len(nums)):
if nums[i-1] > 0:
curr_sum = nums[i-1] + nums[i]
nums[i] = curr_sum
return max(nums)
class KadanesSolution:
def maxSubArray(self, nums: List[int]) -> int:
max_so_far = float('-inf')
max_sum = 0
for i in range(len(nums)):
if max_sum < 0:
max_sum = 0
max_sum = max_sum + nums[i]
max_so_far = max(max_so_far, max_sum)
return max_so_far
class DivideAndConquerSolution:
def maxSubArray(self, nums: List[int]) -> int:
return self.max_sub(nums, start=0, end=len(nums) - 1)
def max_sub(self, nums: List[int], start: int, end: int) -> int:
if start == end:
return nums[start]
mid = (start + end) // 2
left = self.max_sub(nums=nums, start=start, end=mid)
right = self.max_sub(nums=nums, start=mid + 1, end=end)
center = self.max_center(nums=nums, left=start, center=mid, right=end)
return max(left, right, center)
def max_center(self, nums: List[int], left: int, center: int, right: int) -> int:
left_sum = float('-inf')
left_total = 0
for i in range(center, left - 1, -1):
left_total += nums[i]
left_sum = max(left_total, left_sum)
right_sum = float('-inf')
right_total = 0
for i in range(center + 1, right + 1):
right_total += nums[i]
right_sum = max(right_total, right_sum)
return max(left_sum, left_sum + right_sum, right_sum)
| 27.761905 | 92 | 0.583619 |
da6615b53ab23e1d7a9e92f35c52d1b335367b2f | 959 | py | Python | schemas/orders.py | rimtzg/supertpv-branch-server | c8ebf8b1d43c90d964a3f26c195b7078a573adb7 | [
"MIT"
] | null | null | null | schemas/orders.py | rimtzg/supertpv-branch-server | c8ebf8b1d43c90d964a3f26c195b7078a573adb7 | [
"MIT"
] | 1 | 2022-02-26T17:28:54.000Z | 2022-02-26T17:28:54.000Z | schemas/orders.py | rimtzg/supertpv-branch-server | c8ebf8b1d43c90d964a3f26c195b7078a573adb7 | [
"MIT"
] | null | null | null | from schema import Schema, Optional, Use, Or
from datetime import datetime
from bson import ObjectId
# from order_templates import schema as order_template
schema = Schema({
'_id' : Use(ObjectId),
'client_id' : Use(ObjectId),
'client_name' : Use(str.lower),
'date' : Use(datetime.fromisoformat),
'number' : Use(int),
Optional('active', default=True) : Use(bool),
'categories' : [
{
'_id' : Use(ObjectId),
'name' : Use(str.lower),
'products' : [
{
'_id' : Use(ObjectId),
'code' : Use(str.lower),
'name' : Use(str.lower),
'amount' : Or(Use(int), Use(float)),
'label' : Use(str.lower),
'price' : Or(Use(int), Use(float))
}
]
}
]
}, ignore_extra_keys=True) | 29.060606 | 56 | 0.459854 |
14870081e93874284d534b3eff7241fd341f862c | 335 | py | Python | info_screen/urls.py | OPpuolitaival/django-info-screen | 63f65b08d4dcb463b6f17f840ca39979d2784685 | [
"MIT"
] | 1 | 2017-01-13T19:31:26.000Z | 2017-01-13T19:31:26.000Z | info_screen/urls.py | OPpuolitaival/django-info-screen | 63f65b08d4dcb463b6f17f840ca39979d2784685 | [
"MIT"
] | null | null | null | info_screen/urls.py | OPpuolitaival/django-info-screen | 63f65b08d4dcb463b6f17f840ca39979d2784685 | [
"MIT"
] | null | null | null | # coding: utf-8
from django.conf.urls import url
from . import views
app_name = 'info_screen'
urlpatterns = [
url(r'^api/', views.ScreenJsonView.as_view(), name='api'),
url(r'^image/(?P<page_uuid>.*)', views.ImageView.as_view(), name='image'),
url(r'^(?P<screen_uuid>.*)/', views.ScreenView.as_view(), name='screen'),
]
| 25.769231 | 78 | 0.653731 |
185dfe17f7670bdeaa1027d96a75f762fdba37ab | 615 | py | Python | docker/optimization/pyOpt/tags/v1.2.0/pyOpt/pyCONMIN/setup.py | liujiamingustc/phd | 4f815a738abad43531d02ac66f5bd0d9a1def52a | [
"Apache-2.0"
] | 3 | 2021-01-06T03:01:18.000Z | 2022-03-21T03:02:55.000Z | docker/optimization/pyOpt/tags/v1.2.0/pyOpt/pyCONMIN/setup.py | liujiamingustc/phd | 4f815a738abad43531d02ac66f5bd0d9a1def52a | [
"Apache-2.0"
] | null | null | null | docker/optimization/pyOpt/tags/v1.2.0/pyOpt/pyCONMIN/setup.py | liujiamingustc/phd | 4f815a738abad43531d02ac66f5bd0d9a1def52a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os,sys
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('pyCONMIN',parent_package,top_path)
config.add_library('conmin',
sources=[os.path.join('source', '*.f')])
config.add_extension('conmin',
sources=['source/f2py/conmin.pyf'],
libraries=['conmin'])
config.add_data_files('LICENSE','README')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 23.653846 | 62 | 0.656911 |
66e2a742e4812a6c81a0def32deddbd512ac23d0 | 4,541 | py | Python | naff/api/http/http_requests/reactions.py | Kigstn/Dis-Snek | efbf5a39ac550e5f04d0e11edf733ce312ccc0bf | [
"MIT"
] | 64 | 2021-10-12T15:31:36.000Z | 2022-03-29T18:25:47.000Z | naff/api/http/http_requests/reactions.py | Kigstn/Dis-Snek | efbf5a39ac550e5f04d0e11edf733ce312ccc0bf | [
"MIT"
] | 166 | 2021-10-10T16:27:52.000Z | 2022-03-30T09:04:54.000Z | naff/api/http/http_requests/reactions.py | Kigstn/Dis-Snek | efbf5a39ac550e5f04d0e11edf733ce312ccc0bf | [
"MIT"
] | 34 | 2021-10-10T13:26:41.000Z | 2022-03-23T13:59:35.000Z | from typing import TYPE_CHECKING, Any, List
import discord_typings
from naff.client.const import MISSING, Absent
from ..route import Route
__all__ = ("ReactionRequests",)
if TYPE_CHECKING:
from naff.models.discord.snowflake import Snowflake_Type
class ReactionRequests:
request: Any
async def create_reaction(self, channel_id: "Snowflake_Type", message_id: "Snowflake_Type", emoji: str) -> None:
"""
Create a reaction for a message.
Args:
channel_id: The channel this is taking place in
message_id: The message to create a a reaction on
emoji: The emoji to use (format: `name:id`)
"""
return await self.request(
Route(
"PUT",
"/channels/{channel_id}/messages/{message_id}/reactions/{emoji}/@me",
channel_id=channel_id,
message_id=message_id,
emoji=emoji,
)
)
async def remove_self_reaction(
self, channel_id: "Snowflake_Type", message_id: "Snowflake_Type", emoji: str
) -> None:
"""
Remove client's reaction from a message.
Args:
channel_id: The channel this is taking place in.
message_id: The message to remove the reaction on.
emoji: The emoji to remove. (format: `name:id`)
"""
return await self.request(
Route(
"DELETE",
"/channels/{channel_id}/messages/{message_id}/reactions/{emoji}/@me",
channel_id=channel_id,
message_id=message_id,
emoji=emoji,
)
)
async def remove_user_reaction(
self, channel_id: "Snowflake_Type", message_id: "Snowflake_Type", emoji: str, user_id: "Snowflake_Type"
) -> None:
"""
Remove user's reaction from a message.
Args:
channel_id: The channel this is taking place in
message_id: The message to remove the reaction on.
emoji: The emoji to remove. (format: `name:id`)
user_id: The user to remove reaction of.
"""
return await self.request(
Route(
"DELETE",
"/channels/{channel_id}/messages/{message_id}/reactions/{emoji}/{user_id}",
channel_id=channel_id,
message_id=message_id,
emoji=emoji,
user_id=user_id,
)
)
async def clear_reaction(self, channel_id: "Snowflake_Type", message_id: "Snowflake_Type", emoji: str) -> None:
"""
Remove specific reaction from a message.
Args:
channel_id: The channel this is taking place in.
message_id: The message to remove the reaction on.
emoji: The emoji to remove. (format: `name:id`)
"""
return await self.request(
Route(
"DELETE",
"/channels/{channel_id}/messages/{message_id}/reactions/{emoji}",
channel_id=channel_id,
message_id=message_id,
emoji=emoji,
)
)
async def clear_reactions(self, channel_id: "Snowflake_Type", message_id: "Snowflake_Type") -> None:
"""
Remove reactions from a message.
Args:
channel_id: The channel this is taking place in.
message_id: The message to clear reactions from.
"""
return await self.request(Route("DELETE", f"/channels/{channel_id}/messages/{message_id}/reactions"))
async def get_reactions(
self,
channel_id: "Snowflake_Type",
message_id: "Snowflake_Type",
emoji: str,
limit: Absent[int] = MISSING,
after: "Snowflake_Type" = MISSING,
) -> List[discord_typings.UserData]:
"""
Gets specific reaction from a message.
Args:
channel_id: The channel this is taking place in.
message_id: The message to get the reaction.
emoji: The emoji to get. (format: `name:id`)
Returns:
List of users who reacted with the emoji.
"""
return await self.request(
Route(
"GET",
"/channels/{channel_id}/messages/{message_id}/reactions/{emoji}",
channel_id=channel_id,
message_id=message_id,
emoji=emoji,
),
params={"limit": limit, "after": after},
)
| 31.317241 | 116 | 0.560669 |
02189c6c5dda9396f32a238074fc4c4d240ef7e6 | 8,538 | py | Python | netmiko/huawei/huawei.py | dBitech/netmiko | 72dae46bef4727e288b6a7d6b0119b25d44e1471 | [
"MIT"
] | null | null | null | netmiko/huawei/huawei.py | dBitech/netmiko | 72dae46bef4727e288b6a7d6b0119b25d44e1471 | [
"MIT"
] | null | null | null | netmiko/huawei/huawei.py | dBitech/netmiko | 72dae46bef4727e288b6a7d6b0119b25d44e1471 | [
"MIT"
] | null | null | null | from typing import Optional, Any
import time
import re
import warnings
from netmiko.no_enable import NoEnable
from netmiko.base_connection import DELAY_FACTOR_DEPR_SIMPLE_MSG
from netmiko.cisco_base_connection import CiscoBaseConnection
from netmiko.exceptions import NetmikoAuthenticationException
from netmiko import log
class HuaweiBase(NoEnable, CiscoBaseConnection):
def session_preparation(self) -> None:
"""Prepare the session after the connection has been established."""
self.ansi_escape_codes = True
# The _test_channel_read happens in special_login_handler()
self.set_base_prompt()
self.disable_paging(command="screen-length 0 temporary")
def strip_ansi_escape_codes(self, string_buffer: str) -> str:
"""
Huawei does a strange thing where they add a space and then add ESC[1D
to move the cursor to the left one.
The extra space is problematic.
"""
code_cursor_left = chr(27) + r"\[\d+D"
output = string_buffer
pattern = rf" {code_cursor_left}"
output = re.sub(pattern, "", output)
return super().strip_ansi_escape_codes(output)
def config_mode(
self,
config_command: str = "system-view",
pattern: str = "",
re_flags: int = 0,
) -> str:
return super().config_mode(
config_command=config_command, pattern=pattern, re_flags=re_flags
)
def exit_config_mode(self, exit_config: str = "return", pattern: str = r">") -> str:
"""Exit configuration mode."""
return super().exit_config_mode(exit_config=exit_config, pattern=pattern)
def check_config_mode(self, check_string: str = "]", pattern: str = "") -> bool:
"""Checks whether in configuration mode. Returns a boolean."""
return super().check_config_mode(check_string=check_string)
def set_base_prompt(
self,
pri_prompt_terminator: str = ">",
alt_prompt_terminator: str = "]",
delay_factor: float = 1.0,
pattern: Optional[str] = None,
) -> str:
"""
Sets self.base_prompt
Used as delimiter for stripping of trailing prompt in output.
Should be set to something that is general and applies in multiple contexts.
For Huawei this will be the router prompt with < > or [ ] stripped off.
This will be set on logging in, but not when entering system-view
"""
prompt = super().set_base_prompt(
pri_prompt_terminator=pri_prompt_terminator,
alt_prompt_terminator=alt_prompt_terminator,
delay_factor=delay_factor,
pattern=pattern,
)
# Strip off any leading HRP_. characters for USGv5 HA
prompt = re.sub(r"^HRP_.", "", prompt, flags=re.M)
# Strip off leading terminator
prompt = prompt[1:]
prompt = prompt.strip()
self.base_prompt = prompt
log.debug(f"prompt: {self.base_prompt}")
return self.base_prompt
def save_config(
self, cmd: str = "save", confirm: bool = True, confirm_response: str = "y"
) -> str:
"""Save Config for HuaweiSSH"""
return super().save_config(
cmd=cmd, confirm=confirm, confirm_response=confirm_response
)
def cleanup(self, command: str = "quit") -> None:
return super().cleanup(command=command)
class HuaweiSSH(HuaweiBase):
"""Huawei SSH driver."""
def special_login_handler(self, delay_factor: float = 1.0) -> None:
# Huawei prompts for password change before displaying the initial base prompt.
# Search for that password change prompt or for base prompt.
password_change_prompt = r"(Change now|Please choose)"
prompt_or_password_change = r"(?:Change now|Please choose|[>\]])"
data = self.read_until_pattern(pattern=prompt_or_password_change)
if re.search(password_change_prompt, data):
self.write_channel("N" + self.RETURN)
self.read_until_pattern(pattern=r"[>\]]")
class HuaweiTelnet(HuaweiBase):
"""Huawei Telnet driver."""
def telnet_login(
self,
pri_prompt_terminator: str = r"]\s*$",
alt_prompt_terminator: str = r">\s*$",
username_pattern: str = r"(?:user:|username|login|user name)",
pwd_pattern: str = r"assword",
delay_factor: float = 1.0,
max_loops: int = 20,
) -> str:
"""Telnet login for Huawei Devices"""
delay_factor = self.select_delay_factor(delay_factor)
password_change_prompt = r"(Change now|Please choose 'YES' or 'NO').+"
combined_pattern = r"({}|{}|{})".format(
pri_prompt_terminator, alt_prompt_terminator, password_change_prompt
)
output = ""
return_msg = ""
i = 1
while i <= max_loops:
try:
# Search for username pattern / send username
output = self.read_until_pattern(
pattern=username_pattern, re_flags=re.I
)
return_msg += output
self.write_channel(self.username + self.TELNET_RETURN)
# Search for password pattern / send password
output = self.read_until_pattern(pattern=pwd_pattern, re_flags=re.I)
return_msg += output
assert self.password is not None
self.write_channel(self.password + self.TELNET_RETURN)
# Waiting for combined output
output = self.read_until_pattern(pattern=combined_pattern)
return_msg += output
# Search for password change prompt, send "N"
if re.search(password_change_prompt, output):
self.write_channel("N" + self.TELNET_RETURN)
output = self.read_until_pattern(pattern=combined_pattern)
return_msg += output
# Check if proper data received
if re.search(pri_prompt_terminator, output, flags=re.M) or re.search(
alt_prompt_terminator, output, flags=re.M
):
return return_msg
self.write_channel(self.TELNET_RETURN)
time.sleep(0.5 * delay_factor)
i += 1
except EOFError:
assert self.remote_conn is not None
self.remote_conn.close()
msg = f"Login failed: {self.host}"
raise NetmikoAuthenticationException(msg)
# Last try to see if we already logged in
self.write_channel(self.TELNET_RETURN)
time.sleep(0.5 * delay_factor)
output = self.read_channel()
return_msg += output
if re.search(pri_prompt_terminator, output, flags=re.M) or re.search(
alt_prompt_terminator, output, flags=re.M
):
return return_msg
assert self.remote_conn is not None
self.remote_conn.close()
msg = f"Login failed: {self.host}"
raise NetmikoAuthenticationException(msg)
class HuaweiVrpv8SSH(HuaweiSSH):
def commit(
self,
comment: str = "",
read_timeout: float = 120.0,
delay_factor: Optional[float] = None,
) -> str:
"""
Commit the candidate configuration.
Commit the entered configuration. Raise an error and return the failure
if the commit fails.
default:
command_string = commit
comment:
command_string = commit comment <comment>
delay_factor: Deprecated in Netmiko 4.x. Will be eliminated in Netmiko 5.
"""
if delay_factor is not None:
warnings.warn(DELAY_FACTOR_DEPR_SIMPLE_MSG, DeprecationWarning)
error_marker = "Failed to generate committed config"
command_string = "commit"
if comment:
command_string += f' comment "{comment}"'
output = self.config_mode()
output += self._send_command_str(
command_string,
strip_prompt=False,
strip_command=False,
read_timeout=read_timeout,
expect_string=r"]",
)
output += self.exit_config_mode()
if error_marker in output:
raise ValueError(f"Commit failed with following errors:\n\n{output}")
return output
def save_config(self, *args: Any, **kwargs: Any) -> str:
"""Not Implemented"""
raise NotImplementedError
| 35.427386 | 88 | 0.613024 |
397f999a7513bf4407dd5181b9b32797dbf26b0d | 2,754 | py | Python | test/functional/mempool_limit.py | RitoProject/Ritocoin | 6950104b40ca6bec36ec98ea2046ea2fdcf4e92a | [
"MIT"
] | 18 | 2018-11-30T19:07:06.000Z | 2021-05-17T11:06:12.000Z | test/functional/mempool_limit.py | RitoProject/Ravencoin | 6950104b40ca6bec36ec98ea2046ea2fdcf4e92a | [
"MIT"
] | 1 | 2018-12-08T19:41:43.000Z | 2018-12-08T19:41:43.000Z | test/functional/mempool_limit.py | RitoProject/Ravencoin | 6950104b40ca6bec36ec98ea2046ea2fdcf4e92a | [
"MIT"
] | 17 | 2018-11-30T17:16:21.000Z | 2021-10-30T17:33:14.000Z | #!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Copyright (c) 2017 The Raven Core developers
# Copyright (c) 2018 The Rito Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool limiting together/eviction with the wallet."""
from test_framework.test_framework import RitoTestFramework
from test_framework.util import *
class MempoolLimitTest(RitoTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-maxmempool=9", "-spendzeroconfchange=0"]]
self.thirtyTransactions = 9 #tx are created in groups of 30. Value here will be multiplied by thirty for the number of tx.
def run_test(self):
txouts = gen_return_txouts()
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
txids = []
utxos = create_confirmed_utxos(relayfee, self.nodes[0], self.thirtyTransactions*30)
#create a mempool tx that will be evicted
us0 = utxos.pop()
inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}]
outputs = {self.nodes[0].getnewaddress() : 0.0001}
tx = self.nodes[0].createrawtransaction(inputs, outputs)
# Any fee calc method should work as longs as base_fee is set proportionally...
#1
txF = self.nodes[0].fundrawtransaction(tx)
base_fee = satoshi_round(0.00075*100) # DEFAULT_FALLBACK_FEE (settxfee(0) is default and falls through to this)
#2
# self.nodes[0].settxfee(relayfee) # specifically fund this tx with low fee (this is too low and will be bumped to MINFEE)
# txF = self.nodes[0].fundrawtransaction(tx)
# base_fee = satoshi_round(0.0005*100) # DEFAULT_TRANSACTION_MINFEE
# self.nodes[0].settxfee(0) # return to automatic fee selection
#3
# txF = self.nodes[0].fundrawtransaction(tx, {"feeRate": relayfee})
# relayfee = self.nodes[0].getnetworkinfo()['relayfee']
# base_fee = relayfee*100
txFS = self.nodes[0].signrawtransaction(txF['hex'])
txid = self.nodes[0].sendrawtransaction(txFS['hex'])
for i in range (self.thirtyTransactions):
txids.append([])
txids[i] = create_lots_of_big_transactions(self.nodes[0], txouts, utxos[30*i:30*i+30], 30, (i+1)*base_fee)
# by now, the tx should be evicted, check confirmation state
assert(txid not in self.nodes[0].getrawmempool())
txdata = self.nodes[0].gettransaction(txid)
assert(txdata['confirmations'] == 0) #confirmation should still be 0
if __name__ == '__main__':
MempoolLimitTest().main()
| 43.714286 | 132 | 0.669572 |
cd3239dd479f60b0123273982321f2edbc4a3720 | 1,059 | py | Python | nf_common_source/code/services/input_output_service/sql_server/sql_server_metadata_recogniser.py | boro-alpha/nf_common | 66d6844d9ae9a86a3e5b461f92e1ba0ec15e85ef | [
"MIT"
] | null | null | null | nf_common_source/code/services/input_output_service/sql_server/sql_server_metadata_recogniser.py | boro-alpha/nf_common | 66d6844d9ae9a86a3e5b461f92e1ba0ec15e85ef | [
"MIT"
] | null | null | null | nf_common_source/code/services/input_output_service/sql_server/sql_server_metadata_recogniser.py | boro-alpha/nf_common | 66d6844d9ae9a86a3e5b461f92e1ba0ec15e85ef | [
"MIT"
] | null | null | null | import re
table_pattern = \
r'TABLE (.+) \((.+) CONSTRAINT (.+)\)'
column_pattern = \
r'[(.+)] [(.+)]'
def find_all_tables_from_schema_via_regex(
schema_file_path: str):
schema = \
__read_schema(
schema_file_path)
table_compiled_pattern= \
re.compile(
table_pattern,
re.DOTALL)
col_compiled_pattern = \
re.compile(
column_pattern,
re.DOTALL)
for table_pattern_match in re.finditer(table_compiled_pattern, schema):
table_name = \
table_pattern_match.group(1)
col_data = \
table_pattern_match.group(2)
for col_pattern_match in re.finditer(col_compiled_pattern, col_data):
col_name = col_pattern_match.group(1)
col_type = col_pattern_match.group(2)
def __read_schema(
schema_file_path: str) -> str:
schema_file = \
open(
schema_file_path,
"r")
schema = \
schema_file.read()
return \
schema | 19.981132 | 77 | 0.570349 |
dcc749a995d8d81e9c8117b05c83fb3458dde7a8 | 1,812 | py | Python | pybtex/style/__init__.py | chfritz/pybtex | 1649bb985602ceb800c4fcb7d9a9ed0191f34504 | [
"MIT"
] | 1 | 2020-06-11T20:23:45.000Z | 2020-06-11T20:23:45.000Z | pybtex/style/__init__.py | chfritz/pybtex | 1649bb985602ceb800c4fcb7d9a9ed0191f34504 | [
"MIT"
] | null | null | null | pybtex/style/__init__.py | chfritz/pybtex | 1649bb985602ceb800c4fcb7d9a9ed0191f34504 | [
"MIT"
] | null | null | null | # Copyright (c) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Andrey Golovizin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
class FormattedEntry(object):
"""Formatted bibliography entry. Consists of
- key (which is used for sorting);
- label (which appears in the resulting bibliography)
- text (usually RichText)
"""
def __init__(self, key, text, label=None):
self.key = key
self.text = text
self.label = label
class FormattedBibliography(object):
def __init__(self, entries, style):
self.entries = list(entries)
self.style = style
def __iter__(self):
return iter(self.entries)
def get_longest_label(self):
label_style = self.style.label_style
return label_style.get_longest_label(self.entries)
| 39.391304 | 74 | 0.730684 |
04eacfc94159cd312cd4976e4fb22ca2752cd35e | 503 | py | Python | inhandpy/setup.py | psodhi/tactile-in-hand | 0bde0889e331bca0159a0254e38626da21d620c1 | [
"BSD-3-Clause"
] | 7 | 2021-11-16T21:55:01.000Z | 2022-02-09T10:26:04.000Z | inhandpy/setup.py | psodhi/tactile-in-hand | 0bde0889e331bca0159a0254e38626da21d620c1 | [
"BSD-3-Clause"
] | null | null | null | inhandpy/setup.py | psodhi/tactile-in-hand | 0bde0889e331bca0159a0254e38626da21d620c1 | [
"BSD-3-Clause"
] | 2 | 2022-02-09T02:07:18.000Z | 2022-03-09T16:24:32.000Z | from setuptools import setup, find_packages
install_requires = [line.rstrip() for line in open("requirements/requirements.txt", "r")]
setup(
name="inhandpy",
version="0.0.1",
description="PatchGraph: In-hand tactile tracking with learned surface normals",
url="",
author="Paloma Sodhi",
author_email="psodhi@cs.cmu.edu",
license="LICENSE",
packages=find_packages("src"),
package_dir={"": "src"},
install_requires=install_requires,
python_requires=">=3.6",
) | 29.588235 | 89 | 0.687873 |
d51462f8003439e709bcd154bf4d62cb34ac621d | 1,186 | py | Python | run_all_tests.py | RAbraham/logica | 9bb24de465a5363b2a045fd985198a6beb4f8ec2 | [
"Apache-2.0"
] | 1,434 | 2020-10-10T19:28:17.000Z | 2022-03-31T08:23:05.000Z | run_all_tests.py | RAbraham/logica | 9bb24de465a5363b2a045fd985198a6beb4f8ec2 | [
"Apache-2.0"
] | 55 | 2020-10-22T12:33:50.000Z | 2022-03-14T18:57:15.000Z | run_all_tests.py | RAbraham/logica | 9bb24de465a5363b2a045fd985198a6beb4f8ec2 | [
"Apache-2.0"
] | 79 | 2021-04-13T10:43:34.000Z | 2022-03-28T21:39:11.000Z | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Running all integration tests."""
import sys
from common import logica_test
from integration_tests import run_tests as integration_tests
from integration_tests.import_tests import run_tests as import_tests
if 'golden_run' in sys.argv:
logica_test.TestManager.SetGoldenRun(True)
if 'announce_tests' in sys.argv:
logica_test.TestManager.SetAnnounceTests(True)
for a in sys.argv:
if a.startswith('test_only='):
logica_test.TestManager.SetRunOnlyTests(a.split('=')[1].split(','))
logica_test.PrintHeader()
integration_tests.RunAll()
import_tests.RunAll()
| 29.65 | 74 | 0.771501 |
de1e22da84df9ac0ab287e30a697e7496fcb30a9 | 8,956 | py | Python | docs/tutorials/action_recognition/dive_deep_tsn_ucf101.py | Kentwhf/gluon-cv | bab5c029793d4da20b5c14846a0db58cfbe21d6d | [
"Apache-2.0"
] | null | null | null | docs/tutorials/action_recognition/dive_deep_tsn_ucf101.py | Kentwhf/gluon-cv | bab5c029793d4da20b5c14846a0db58cfbe21d6d | [
"Apache-2.0"
] | null | null | null | docs/tutorials/action_recognition/dive_deep_tsn_ucf101.py | Kentwhf/gluon-cv | bab5c029793d4da20b5c14846a0db58cfbe21d6d | [
"Apache-2.0"
] | null | null | null | """2. Dive Deep into Training TSN mdoels on UCF101
==================================================
This is a video action recognition tutorial using Gluon CV toolkit, a step-by-step example.
The readers should have basic knowledge of deep learning and should be familiar with Gluon API.
New users may first go through `A 60-minute Gluon Crash Course <http://gluon-crash-course.mxnet.io/>`_.
You can `Start Training Now`_ or `Dive into Deep`_.
Start Training Now
~~~~~~~~~~~~~~~~~~
.. note::
Feel free to skip the tutorial because the training script is self-complete and ready to launch.
:download:`Download Full Python Script: train_recognizer.py<../../../scripts/action-recognition/train_recognizer.py>`
Example training command::
# Finetune a pretrained VGG16 model without using temporal segment network.
python train_recognizer.py --model vgg16_ucf101 --num-classes 101 --num-gpus 8 --lr-mode step --lr 0.001 --lr-decay 0.1 --lr-decay-epoch 30,60,80 --num-epochs 80
# Finetune a pretrained VGG16 model using temporal segment network.
python train_recognizer.py --model vgg16_ucf101 --num-classes 101 --num-gpus 8 --num-segments 3 --lr-mode step --lr 0.001 --lr-decay 0.1 --lr-decay-epoch 30,60,80 --num-epochs 80
For more training command options, please run ``python train_recognizer.py -h``
Please checkout the `model_zoo <../model_zoo/index.html#action_recognition>`_ for training commands of reproducing the pretrained model.
Network Structure
-----------------
First, let's import the necessary libraries into python.
"""
from __future__ import division
import argparse, time, logging, os, sys, math
import numpy as np
import mxnet as mx
import gluoncv as gcv
from mxnet import gluon, nd, init, context
from mxnet import autograd as ag
from mxnet.gluon import nn
from mxnet.gluon.data.vision import transforms
from gluoncv.data.transforms import video
from gluoncv.data import UCF101
from gluoncv.model_zoo import get_model
from gluoncv.utils import makedirs, LRSequential, LRScheduler, split_and_load, TrainingHistory
################################################################
#
# Video action recognition is a classification problem.
# Here we pick a simple yet well-performing structure, ``vgg16_ucf101``, for the
# tutorial. In addition, we use the the idea of temporal segments (TSN) [Wang16]_
# to wrap the backbone VGG16 network for adaptation to video domain.
#
# `TSN <https://arxiv.org/abs/1608.00859>`_ is a widely adopted video
# classification method. It is proposed to incorporate temporal information from an entire video.
# The idea is straightforward: we can evenly divide the video into several segments,
# process each segment individually, obtain segmental consensus from each segment, and perform
# final prediction. TSN is more like a general algorithm, rather than a specific network architecture.
# It can work with both 2D and 3D neural networks.
# number of GPUs to use
num_gpus = 1
ctx = [mx.cpu(i) for i in range(num_gpus)]
# Get the model vgg16_ucf101 with temporal segment network, with 101 output classes, without pre-trained weights
net = get_model(name='vgg16_ucf101', nclass=101, num_segments=3)
net.collect_params().reset_ctx(ctx)
# print(net)
################################################################
# Data Augmentation and Data Loader
# ---------------------------------
#
# Data augmentation for video is different from image. For example, if you
# want to randomly crop a video sequence, you need to make sure all the video
# frames in this sequence undergo the same cropping process. We provide a
# new set of transformation functions, working with multiple images.
# Please checkout the `video.py <../../../gluoncv/data/transforms/video.py>`_ for more details.
# Most video data augmentation strategies used here are introduced in [Wang15]_.
transform_train = transforms.Compose([
# Fix the input video frames size as 256×340 and randomly sample the cropping width and height from
# {256,224,192,168}. After that, resize the cropped regions to 224 × 224.
video.VideoMultiScaleCrop(size=(224, 224), scale_ratios=[1.0, 0.875, 0.75, 0.66]),
# Randomly flip the video frames horizontally
video.VideoRandomHorizontalFlip(),
# Transpose the video frames from height*width*num_channels to num_channels*height*width
# and map values from [0, 255] to [0,1]
video.VideoToTensor(),
# Normalize the video frames with mean and standard deviation calculated across all images
video.VideoNormalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
##################################################################
# With the transform functions, we can define data loaders for our
# training datasets.
# Batch Size for Each GPU
per_device_batch_size = 16
# Number of data loader workers
num_workers = 8
# Calculate effective total batch size
batch_size = per_device_batch_size * num_gpus
# Set train=True for training the model. Here we set num_segments to 3 to enable TSN training.
train_dataset = UCF101(train=True, num_segments=3, transform=transform_train)
print('Load %d training samples.' % len(train_dataset))
train_data = gluon.data.DataLoader(train_dataset, batch_size=batch_size,
shuffle=True)
################################################################
# Optimizer, Loss and Metric
# --------------------------
# Learning rate decay factor
lr_decay = 0.1
# Epochs where learning rate decays
lr_decay_epoch = [30, 60, np.inf]
# Stochastic gradient descent
optimizer = 'sgd'
# Set parameters
optimizer_params = {'learning_rate': 0.001, 'wd': 0.0001, 'momentum': 0.9}
# Define our trainer for net
trainer = gluon.Trainer(net.collect_params(), optimizer, optimizer_params)
################################################################
# In order to optimize our model, we need a loss function.
# For classification tasks, we usually use softmax cross entropy as the
# loss function.
loss_fn = gluon.loss.SoftmaxCrossEntropyLoss()
################################################################
# For simplicity, we use accuracy as the metric to monitor our training
# process. Besides, we record metric values, and will print them at the
# end of training.
train_metric = mx.metric.Accuracy()
train_history = TrainingHistory(['training-acc'])
################################################################
# Training
# --------
#
# After all the preparations, we can finally start training!
# Following is the script.
#
# .. note::
# In order to finish the tutorial quickly, we only train for 3 epochs, and 100 iterations per epoch.
# In your experiments, we recommend setting ``epochs=80`` for the full UCF101 dataset.
epochs = 10
lr_decay_count = 0
for epoch in range(epochs):
tic = time.time()
train_metric.reset()
train_loss = 0
# Learning rate decay
if epoch == lr_decay_epoch[lr_decay_count]:
trainer.set_learning_rate(trainer.learning_rate*lr_decay)
lr_decay_count += 1
# Loop through each batch of training data
for i, batch in enumerate(train_data):
# Extract data and label
data = split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
label = split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
# AutoGrad
with ag.record():
output = []
for _, X in enumerate(data):
X = X.reshape((-1,) + X.shape[2:])
pred = net(X)
output.append(pred)
loss = [loss_fn(yhat, y) for yhat, y in zip(output, label)]
# Backpropagation
for l in loss:
l.backward()
# Optimize
trainer.step(batch_size)
# Update metrics
train_loss += sum([l.mean().asscalar() for l in loss])
train_metric.update(label, output)
if i == 100:
break
name, acc = train_metric.get()
# Update history and print metrics
train_history.update([acc])
print('[Epoch %d] train=%f loss=%f time: %f' %
(epoch, acc, train_loss / (i+1), time.time()-tic))
# We can plot the metric scores with:
train_history.plot()
##############################################################################
# You can `Start Training Now`_.
#
# If you would like to use a bigger 3D model (e.g., I3D) on a larger dataset (e.g., Kinetics400),
# feel free to read the next `tutorial on Kinetics400 <demo_i3d_kinetics400.html>`__.
#
# References
# ----------
#
# .. [Wang15] Limin Wang, Yuanjun Xiong, Zhe Wang, and Yu Qiao. \
# "Towards Good Practices for Very Deep Two-Stream ConvNets." \
# arXiv preprint arXiv:1507.02159 (2015).
#
# .. [Wang16] Limin Wang, Yuanjun Xiong, Zhe Wang, Yu Qiao, Dahua Lin, Xiaoou Tang and Luc Van Gool. \
# "Temporal Segment Networks: Towards Good Practices for Deep Action Recognition." \
# In European Conference on Computer Vision (ECCV). 2016.
| 39.10917 | 186 | 0.663801 |
40bde87c5a8c55517f3d008efc863a25c71c78c6 | 2,526 | py | Python | venv/lib/python3.7/site-packages/logging2/handlers/streaming.py | QinshanSun/timezoneSpider | 4ce0e5f84cf34d2b4fe7be3ec068209ec1c0333f | [
"MIT"
] | null | null | null | venv/lib/python3.7/site-packages/logging2/handlers/streaming.py | QinshanSun/timezoneSpider | 4ce0e5f84cf34d2b4fe7be3ec068209ec1c0333f | [
"MIT"
] | null | null | null | venv/lib/python3.7/site-packages/logging2/handlers/streaming.py | QinshanSun/timezoneSpider | 4ce0e5f84cf34d2b4fe7be3ec068209ec1c0333f | [
"MIT"
] | null | null | null | from io import TextIOWrapper
from sys import stderr, stdout
from typing import Optional
from logging2.handlers.abc import Handler
from logging2.levels import LogLevel
class StreamingHandler(Handler):
"""A generic ``Handler`` for writing log entries to a streaming endpoint such as STDOUT or STDERR.
"""
def __init__(
self,
stream: TextIOWrapper,
name: Optional[str]=None,
level: Optional[LogLevel]=None
):
"""Initializes a new ``StreamingHandler``
:param stream: the output stream object
:param name: the name of the handler
:param level: the minimum level of verbosity/priority of the messages this will log
"""
self.stream: TextIOWrapper = stream
super().__init__(name=name, level=level)
def write(self, message: str, level: LogLevel) -> None:
"""Writes the full log entry to a configured stream
:param message: the entire message to be written, full formatted
:param level: the priority level of the message
"""
if level >= self.min_level:
self.stream.write(message)
def _create_name(self) -> str:
"""Creates the name for the handler - called from ``__init__`` if a name is not given.
:returns: the class name of the stream
"""
return self.stream.__class__.__name__
class StdOutHandler(StreamingHandler):
"""An implementation of the ``StreamingHandler`` with ``sys.stdout`` preconfigured as the stream and
named as ``stdout``
"""
def __init__(
self,
name: Optional[str]='stdout',
level: Optional[LogLevel]=None
):
"""Initializes a new ``StdOutHandler``
:param name: the name of the handler
:param level: the minimum level of verbosity/priority of the messages this will log
"""
super().__init__(name=name, level=level, stream=stdout)
class StdErrHandler(StreamingHandler):
"""An implementation of the ``StreamingHandler`` with ``sys.stderr`` preconfigured as the stream and
named as ``stderr``
"""
def __init__(
self,
name: Optional[str]='stderr',
level: Optional[LogLevel]=None
):
"""Initializes a new ``StdErrHandler``
:param name: the name of the handler
:param level: the minimum level of verbosity/priority of the messages this will log
"""
super().__init__(name=name, level=level, stream=stderr)
| 31.974684 | 104 | 0.634996 |
f5e859ada6fc0f55528d53245baaf324106b2e5f | 5,127 | py | Python | doc/source/conf.py | awesome-archive/HarvestText | 0d537f190a1f4b00b1ea06ef2d51e9efac4249a0 | [
"MIT"
] | 14 | 2019-10-29T03:24:18.000Z | 2022-03-21T06:19:13.000Z | doc/source/conf.py | BoChen-Daniel/HarvestText | bc32dfed5b6d5123a9f4b065fb37089d21c979da | [
"MIT"
] | null | null | null | doc/source/conf.py | BoChen-Daniel/HarvestText | bc32dfed5b6d5123a9f4b065fb37089d21c979da | [
"MIT"
] | 6 | 2019-10-04T10:25:58.000Z | 2020-10-23T09:55:06.000Z | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'harvesttext'
copyright = '2019, blmoistawinde'
author = 'blmoistawinde'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = 'V0.5'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh_CN'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'harvesttextdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'harvesttext.tex', 'harvesttext Documentation',
'blmoistawinde', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'harvesttext', 'harvesttext Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'harvesttext', 'harvesttext Documentation',
author, 'harvesttext', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True | 30.517857 | 79 | 0.646187 |
009bd2b1d52343f97a348e3772bbed98b43048f9 | 114 | py | Python | test_contest/solutions/facts.py | tbuzzelli/Veris | b2e9bd5f944a60365de8c18f17e041fa65f9e74a | [
"Apache-2.0"
] | 7 | 2018-09-26T17:17:01.000Z | 2020-12-20T17:23:33.000Z | test_contest/solutions/facts.py | tbuzzelli/Veris | b2e9bd5f944a60365de8c18f17e041fa65f9e74a | [
"Apache-2.0"
] | 4 | 2018-09-26T17:49:24.000Z | 2020-12-20T17:15:37.000Z | test_contest/solutions/facts.py | tbuzzelli/Veris | b2e9bd5f944a60365de8c18f17e041fa65f9e74a | [
"Apache-2.0"
] | 1 | 2021-12-03T17:49:50.000Z | 2021-12-03T17:49:50.000Z |
n = int(input())
for testCase in range(n):
# Negate each answer
a = int(input())
print(-a)
| 12.666667 | 26 | 0.517544 |
346a15ac44941142a84961bab0bd3c314190955b | 6,035 | py | Python | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2021_06_01/operations/_private_link_resources_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-03-09T08:59:13.000Z | 2022-03-09T08:59:13.000Z | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2021_06_01/operations/_private_link_resources_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2021_06_01/operations/_private_link_resources_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-03-04T06:21:56.000Z | 2022-03-04T06:21:56.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_storage_account_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateLinkResources')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class PrivateLinkResourcesOperations(object):
"""PrivateLinkResourcesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2021_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_storage_account(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> "_models.PrivateLinkResourceListResult":
"""Gets the private link resources that need to be created for a storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResourceListResult, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_06_01.models.PrivateLinkResourceListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkResourceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_by_storage_account_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_storage_account.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkResourceListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_storage_account.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/privateLinkResources'} # type: ignore
| 44.051095 | 211 | 0.706877 |
1ba11c52a4b6c44296bf489c8c3a020349fa6de4 | 5,184 | py | Python | lib/python3.8/site-packages/ansible_collections/community/network/tests/unit/plugins/modules/network/slxos/test_slxos_interface.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | lib/python3.8/site-packages/ansible_collections/community/network/tests/unit/plugins/modules/network/slxos/test_slxos_interface.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | lib/python3.8/site-packages/ansible_collections/community/network/tests/unit/plugins/modules/network/slxos/test_slxos_interface.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | #
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible_collections.community.network.tests.unit.compat.mock import patch
from ansible_collections.community.network.tests.unit.plugins.modules.utils import set_module_args
from ansible_collections.community.network.plugins.modules.network.slxos import slxos_interface
from .slxos_module import TestSlxosModule, load_fixture
class TestSlxosInterfaceModule(TestSlxosModule):
module = slxos_interface
def setUp(self):
super(TestSlxosInterfaceModule, self).setUp()
self._patch_get_config = patch(
'ansible_collections.community.network.plugins.modules.network.slxos.slxos_interface.get_config'
)
self._patch_load_config = patch(
'ansible_collections.community.network.plugins.modules.network.slxos.slxos_interface.load_config'
)
self._patch_exec_command = patch(
'ansible_collections.community.network.plugins.modules.network.slxos.slxos_interface.exec_command'
)
self._get_config = self._patch_get_config.start()
self._load_config = self._patch_load_config.start()
self._exec_command = self._patch_exec_command.start()
def tearDown(self):
super(TestSlxosInterfaceModule, self).tearDown()
self._patch_get_config.stop()
self._patch_load_config.stop()
self._patch_exec_command.stop()
def load_fixtures(self, commands=None):
config_file = 'slxos_config_config.cfg'
self._get_config.return_value = load_fixture(config_file)
self._load_config.return_value = None
def test_slxos_interface_description(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/2',
description='show version'
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface Ethernet 0/2',
'description show version'
],
'changed': True
}
)
def test_slxos_interface_speed(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/2',
speed=1000
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface Ethernet 0/2',
'speed 1000'
],
'changed': True
}
)
def test_slxos_interface_mtu(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/2',
mtu=1548
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface Ethernet 0/2',
'mtu 1548'
],
'changed': True
}
)
def test_slxos_interface_mtu_out_of_range(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/2',
mtu=15000
))
result = self.execute_module(failed=True)
self.assertEqual(
result,
{
'msg': 'mtu must be between 1548 and 9216',
'failed': True
}
)
def test_slxos_interface_enabled(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/1',
enabled=True
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface Ethernet 0/1',
'no shutdown'
],
'changed': True
}
)
def test_slxos_interface_invalid_argument(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/1',
shawshank='Redemption'
))
result = self.execute_module(failed=True)
self.assertEqual(result['failed'], True)
self.assertTrue(re.match(
r'Unsupported parameters for \((basic.py|basic.pyc)\) module: '
'shawshank Supported parameters include: aggregate, '
'delay, description, enabled, mtu, name, neighbors, '
'rx_rate, speed, state, tx_rate',
result['msg']
))
| 33.230769 | 110 | 0.595679 |
fe240d5869bdc012e5080a75df9b239069a5fbf3 | 1,797 | py | Python | demo/memory_tree/xml_amazoncat_13K_script.py | Ark-kun/vowpal_wabbit | d811c93fa6adbb513729698202984e3662a3d8df | [
"BSD-3-Clause"
] | 4,332 | 2015-01-01T10:26:51.000Z | 2018-10-01T14:05:43.000Z | demo/memory_tree/xml_amazoncat_13K_script.py | chrinide/vowpal_wabbit | 40e1fef676ca6a461d71cf0631ab5c63d1af5d8a | [
"BSD-3-Clause"
] | 1,004 | 2015-01-01T12:00:54.000Z | 2018-09-30T22:13:42.000Z | demo/memory_tree/xml_amazoncat_13K_script.py | chrinide/vowpal_wabbit | 40e1fef676ca6a461d71cf0631ab5c63d1af5d8a | [
"BSD-3-Clause"
] | 1,182 | 2015-01-02T20:38:55.000Z | 2018-09-26T02:47:37.000Z | import os
import time
import numpy as np
# from IPython import embed
print("perform experiments on amazoncat 13K (multilabel)")
leaf_example_multiplier = 2
lr = 1
bits = 30
alpha = 0.1 # 0.3
passes = 4
learn_at_leaf = True
use_oas = True
# num_queries = 1 #does not really use
dream_at_update = 1
# hal_version = 1 #does not really use
loss = "squared"
dream_repeats = 3
# Precision_at_K = 5
num_examples = 1186239
max_num_labels = 13330
tree_node = int(
num_examples / (np.log(num_examples) / np.log(2) * leaf_example_multiplier)
)
train_data = "amazoncat_train.mat.mult_label.vw.txt"
test_data = "amazoncat_test.mat.mult_label.vw.txt"
if os.path.exists(train_data) is not True:
os.system("wget http://kalman.ml.cmu.edu/wen_datasets/{}".format(train_data))
if os.path.exists(test_data) is not True:
os.system("wget http://kalman.ml.cmu.edu/wen_datasets/{}".format(test_data))
saved_model = "{}.vw".format(train_data)
print("## Training...")
start = time.time()
# train_data = 'tmp_rcv1x.vw.txt'
command_line = f"../../build/vowpalwabbit/vw -d {train_data} --memory_tree {tree_node} {'--learn_at_leaf' if learn_at_leaf else ''} --dream_at_update {dream_at_update}\
--max_number_of_labels {max_num_labels} --dream_repeats {dream_repeats} {'--oas' if use_oas else ''} \
--leaf_example_multiplier {leaf_example_multiplier} --alpha {alpha} -l {lr} -b {bits} -c --passes {passes} --loss_function {loss} --holdout_off -f {saved_model}"
os.system(command_line)
train_time = time.time() - start
print("## Testing...")
start = time.time()
os.system(
"../../build/vowpalwabbit/vw {} --oas {} -i {}".format(
test_data, use_oas, saved_model
)
)
test_time = time.time() - start
print("## train time {}, and test time {}".format(train_time, test_time))
| 32.089286 | 171 | 0.703395 |
5f238a849f9b853516f555f676663299345c2f96 | 17,089 | py | Python | tests/testenv/__init__.py | konradxyz/dev_fileserver | 2c57520c447fc4bfda78668df575431be5c39276 | [
"Apache-2.0"
] | null | null | null | tests/testenv/__init__.py | konradxyz/dev_fileserver | 2c57520c447fc4bfda78668df575431be5c39276 | [
"Apache-2.0"
] | null | null | null | tests/testenv/__init__.py | konradxyz/dev_fileserver | 2c57520c447fc4bfda78668df575431be5c39276 | [
"Apache-2.0"
] | null | null | null | ########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import StringIO
import shutil
import logging
import os
import sys
import threading
import time
import traceback
import unittest
import json
import pika
import yaml
from os.path import dirname
from os import path
from cloudify.utils import setup_logger
from cloudify.logs import create_event_message_prefix
import mock_plugins
from testenv.constants import MANAGER_REST_PORT
from testenv.constants import RABBITMQ_VERBOSE_MESSAGES_ENABLED
from testenv.constants import RABBITMQ_POLLING_ENABLED
from testenv.constants import FILE_SERVER_RESOURCES_URI
from testenv.constants import FILE_SERVER_UPLOADED_BLUEPRINTS_FOLDER
from testenv.constants import FILE_SERVER_BLUEPRINTS_FOLDER
from testenv.processes.elastic import ElasticSearchProcess
from testenv.processes.manager_rest import ManagerRestProcess
from testenv.processes.riemann import RiemannProcess
from testenv.processes.celery import CeleryWorkerProcess
from testenv import utils
logger = setup_logger('TESTENV')
setup_logger('cloudify.rest_client', logging.INFO)
testenv_instance = None
class TestCase(unittest.TestCase):
"""
A test case for cloudify integration tests.
"""
def setUp(self):
self.logger = setup_logger(self._testMethodName,
logging.INFO)
self.client = utils.create_rest_client()
utils.restore_provider_context()
TestEnvironment.start_celery_management_worker()
self.test_logs_file = path.join(testenv_instance.events_and_logs_dir,
'{0}.log'.format(self.id()))
testenv_instance.handle_logs = \
self._write_test_events_and_logs_to_file
def tearDown(self):
TestEnvironment.stop_celery_management_worker()
TestEnvironment.stop_all_celery_processes()
TestEnvironment.reset_elasticsearch_data()
def _write_test_events_and_logs_to_file(self, output, event):
with open(self.test_logs_file, 'a') as f:
f.write('{0}\n'.format(output))
def get_plugin_data(self,
plugin_name,
deployment_id):
"""
Retrieve the plugin state for a certain deployment.
:param deployment_id: the deployment id in question.
:param plugin_name: the plugin in question.
:return: plugin data relevant for the deployment.
:rtype dict
"""
return self._get_plugin_data(
plugin_name=plugin_name,
deployment_id=deployment_id
)
def clear_plugin_data(self, plugin_name):
"""
Clears plugin state.
:param plugin_name: the plugin in question.
"""
return self._clear_plugin_data(
plugin_name=plugin_name
)
def _get_plugin_data(self,
plugin_name,
deployment_id):
storage_file_path = os.path.join(
testenv_instance.plugins_storage_dir,
'{0}.json'.format(plugin_name)
)
if not os.path.exists(storage_file_path):
return {}
with open(storage_file_path, 'r') as f:
data = json.load(f)
if deployment_id not in data:
data[deployment_id] = {}
return data.get(deployment_id)
def _clear_plugin_data(self,
plugin_name):
storage_file_path = os.path.join(
testenv_instance.plugins_storage_dir,
'{0}.json'.format(plugin_name)
)
if os.path.exists(storage_file_path):
os.remove(storage_file_path)
@staticmethod
def do_assertions(assertions_func, timeout=10, **kwargs):
return utils.do_retries(assertions_func,
timeout,
AssertionError,
**kwargs)
@property
def riemann_workdir(self):
return TestEnvironment.riemann_workdir()
def publish_riemann_event(self,
deployment_id,
node_name,
node_id='',
host='localhost',
service='service',
state='',
metric=0,
ttl=60):
event = {
'host': host,
'service': service,
'state': state,
'metric': metric,
'time': int(time.time()),
'node_name': node_name,
'node_id': node_id,
'ttl': ttl
}
queue = '{0}-riemann'.format(deployment_id)
routing_key = deployment_id
utils.publish_event(queue,
routing_key,
event)
class ProcessModeTestCase(TestCase):
def setUp(self):
# can actually be any string
# besides the empty one
os.environ['PROCESS_MODE'] = 'True'
super(ProcessModeTestCase, self).setUp()
def tearDown(self):
# empty string means false
os.environ['PROCESS_MODE'] = ''
super(ProcessModeTestCase, self).tearDown()
class TestEnvironment(object):
manager_rest_process = None
elasticsearch_process = None
riemann_process = None
file_server_process = None
celery_management_worker_process = None
def __init__(self, test_working_dir):
super(TestEnvironment, self).__init__()
self.test_working_dir = test_working_dir
self.plugins_storage_dir = os.path.join(
self.test_working_dir,
'plugins-storage'
)
os.makedirs(self.plugins_storage_dir)
self.fileserver_dir = path.join(self.test_working_dir, 'fileserver')
self.rest_service_log_level = 'DEBUG'
self.rest_service_log_path = path.join(
self.test_working_dir, 'cloudify-rest-service.log')
self.rest_service_log_file_size_MB = 100
self.rest_service_log_files_backup_count = 20
self.securest_log_level = 'DEBUG'
self.securest_log_file = path.join(
self.test_working_dir, 'rest-security-audit.log')
self.securest_log_file_size_MB = 100
self.securest_log_files_backup_count = 20
self.events_and_logs_dir = \
path.join(self.test_working_dir, 'tests-events-and-logs')
os.mkdir(self.events_and_logs_dir)
def create(self):
try:
logger.info('Setting up test environment... workdir=[{0}]'
.format(self.test_working_dir))
# events/logs polling
start_events_and_logs_polling(
logs_handler_retriever=self._logs_handler_retriever)
self.start_elasticsearch()
self.start_riemann()
self.start_fileserver()
self.start_manager_rest()
self.create_management_worker()
except BaseException as error:
s_traceback = StringIO.StringIO()
traceback.print_exc(file=s_traceback)
logger.error("Error in test environment setup: %s", error)
logger.error(s_traceback.getvalue())
self.destroy()
raise
def create_management_worker(self):
mock_plugins_path = os.path.dirname(mock_plugins.__file__)
os.environ['MOCK_PLUGINS_PATH'] = mock_plugins_path
self.celery_management_worker_process = CeleryWorkerProcess(
queues=['cloudify.management'],
test_working_dir=self.test_working_dir,
# these plugins are already installed.
# so we just need to append to the includes.
# note that these are not mocks, but the actual production
# code plugins.
additional_includes=[
'riemann_controller.tasks',
'cloudify_system_workflows.deployment_environment',
'cloudify.plugins.workflows',
'diamond_agent.tasks',
'script_runner.tasks'
],
# we need higher concurrency since
# 'deployment_environment.create' calls
# 'plugin_installer.install' as a sub-task
# and they are both executed inside
# this worker
concurrency=2
)
# copy plugins to worker env
mock_plugins_path = os.path.dirname(mock_plugins.__file__)
shutil.copytree(
src=mock_plugins_path,
dst=self.celery_management_worker_process.envdir,
ignore=shutil.ignore_patterns('*.pyc')
)
def start_riemann(self):
riemann_config_path = self._get_riemann_config()
libs_path = self._get_libs_path()
self.riemann_process = RiemannProcess(riemann_config_path,
libs_path)
self.riemann_process.start()
def start_manager_rest(self):
from manager_rest.file_server import PORT as FS_PORT
file_server_base_uri = 'http://localhost:{0}'.format(FS_PORT)
self.manager_rest_process = ManagerRestProcess(
MANAGER_REST_PORT,
self.fileserver_dir,
file_server_base_uri,
FILE_SERVER_BLUEPRINTS_FOLDER,
FILE_SERVER_UPLOADED_BLUEPRINTS_FOLDER,
FILE_SERVER_RESOURCES_URI,
self.rest_service_log_level,
self.rest_service_log_path,
self.rest_service_log_file_size_MB,
self.rest_service_log_files_backup_count,
self.securest_log_level,
self.securest_log_file,
self.securest_log_file_size_MB,
self.securest_log_files_backup_count,
self.test_working_dir)
self.manager_rest_process.start()
def start_elasticsearch(self):
# elasticsearch
self.elasticsearch_process = ElasticSearchProcess()
self.elasticsearch_process.start()
def start_fileserver(self):
# workaround to update path
manager_rest_path = \
path.dirname(path.dirname(path.dirname(__file__)))
manager_rest_path = path.join(manager_rest_path, 'rest-service')
sys.path.append(manager_rest_path)
os.mkdir(self.fileserver_dir)
from manager_rest.file_server import FileServer
from manager_rest.utils import copy_resources
self.file_server_process = FileServer(self.fileserver_dir)
self.file_server_process.start()
# copy resources (base yaml etc)
resources_path = path.abspath(__file__)
resources_path = path.dirname(resources_path)
resources_path = path.dirname(resources_path)
resources_path = path.dirname(resources_path)
resources_path = path.join(resources_path, 'resources')
copy_resources(self.fileserver_dir, resources_path)
self.patch_source_urls(self.fileserver_dir)
def destroy(self):
logger.info('Destroying test environment...')
if self.riemann_process:
self.riemann_process.close()
if self.elasticsearch_process:
self.elasticsearch_process.close()
if self.manager_rest_process:
self.manager_rest_process.close()
if self.file_server_process:
self.file_server_process.stop()
self.delete_working_directory()
def delete_working_directory(self):
if os.path.exists(self.test_working_dir):
logger.info('Deleting test environment from: %s',
self.test_working_dir)
shutil.rmtree(self.test_working_dir, ignore_errors=True)
def handle_logs(self, output, event):
pass
def _logs_handler_retriever(self):
return self.handle_logs
@classmethod
def _get_riemann_config(cls):
manager_dir = cls._get_manager_root()
plugins_dir = os.path.join(manager_dir, 'plugins')
riemann_dir = os.path.join(plugins_dir, 'riemann-controller')
package_dir = os.path.join(riemann_dir, 'riemann_controller')
resources_dir = os.path.join(package_dir, 'resources')
manager_config = os.path.join(resources_dir, 'manager.config')
return manager_config
@classmethod
def _get_libs_path(cls):
return path.join(cls._get_manager_root(), '.libs')
@staticmethod
def reset_elasticsearch_data():
global testenv_instance
testenv_instance.elasticsearch_process.reset_data()
@staticmethod
def stop_celery_management_worker():
global testenv_instance
testenv_instance.celery_management_worker_process.stop()
@staticmethod
def read_celery_management_logs():
global testenv_instance
process = testenv_instance.celery_management_worker_process
return process.try_read_logfile()
@classmethod
def stop_all_celery_processes(cls):
logger.info('Shutting down all celery processes')
os.system("pkill -9 -f 'celery worker'")
@staticmethod
def start_celery_management_worker():
global testenv_instance
testenv_instance.celery_management_worker_process.start()
@staticmethod
def riemann_cleanup():
global testenv_instance
shutil.rmtree(TestEnvironment.riemann_workdir())
os.mkdir(TestEnvironment.riemann_workdir())
testenv_instance.riemann_process.restart()
@staticmethod
def riemann_workdir():
global testenv_instance
return testenv_instance.\
celery_management_worker_process.\
riemann_config_dir
@staticmethod
def _get_manager_root():
init_file = __file__
testenv_dir = dirname(init_file)
tests_dir = dirname(testenv_dir)
manager_dir = dirname(tests_dir)
return manager_dir
@staticmethod
def patch_source_urls(resources):
with open(path.join(resources,
'cloudify', 'types', 'types.yaml')) as f:
types_yaml = yaml.safe_load(f.read())
for policy_type in types_yaml.get('policy_types', {}).values():
in_path = '/cloudify/policies/'
source = policy_type['source']
if in_path in source:
source = source[source.index(in_path) + 1:]
policy_type['source'] = source
for policy_trigger in types_yaml.get('policy_triggers', {}).values():
in_path = '/cloudify/triggers/'
source = policy_trigger['source']
if in_path in source:
source = source[source.index(in_path) + 1:]
policy_trigger['source'] = source
with open(path.join(resources,
'cloudify', 'types', 'types.yaml'), 'w') as f:
f.write(yaml.safe_dump(types_yaml))
def start_events_and_logs_polling(logs_handler_retriever=None):
"""
Fetches events and logs from RabbitMQ.
"""
if not RABBITMQ_POLLING_ENABLED:
return
setup_logger('pika', logging.INFO)
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
queues = ['cloudify-events', 'cloudify-logs']
for q in queues:
channel.queue_declare(queue=q, auto_delete=True, durable=True,
exclusive=False)
def callback(ch, method, properties, body):
try:
event = json.loads(body)
if RABBITMQ_VERBOSE_MESSAGES_ENABLED:
output = '\n{0}'.format(json.dumps(event, indent=4))
else:
output = create_event_message_prefix(event)
logger.info(output)
if logs_handler_retriever:
logs_handler_retriever()(output, event)
except Exception as e:
logger.error('event/log format error - output: {0} [message={1}]'
.format(body, e.message))
s_traceback = StringIO.StringIO()
traceback.print_exc(file=s_traceback)
logger.error(s_traceback.getvalue())
def consume():
channel.basic_consume(callback, queue=queues[0], no_ack=True)
channel.basic_consume(callback, queue=queues[1], no_ack=True)
channel.start_consuming()
logger.info("Starting RabbitMQ events/logs polling - queues={0}".format(
queues))
polling_thread = threading.Thread(target=consume)
polling_thread.daemon = True
polling_thread.start()
| 35.162551 | 79 | 0.636374 |
b52de0945309f71cb0ebe333bee64345934f7a35 | 10,422 | py | Python | src/cryptography/hazmat/backends/openssl/ciphers.py | andrewjroth/cryptography | 2dbc6bbf19274f768f03ea82dcf59c371951ad2c | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/cryptography/hazmat/backends/openssl/ciphers.py | andrewjroth/cryptography | 2dbc6bbf19274f768f03ea82dcf59c371951ad2c | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2022-01-19T01:52:49.000Z | 2022-01-20T14:14:46.000Z | src/cryptography/hazmat/backends/openssl/ciphers.py | messense/cryptography | 44f9703906d118cf689ce964cd310445e44b8853 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import typing
from cryptography import utils
from cryptography.exceptions import InvalidTag, UnsupportedAlgorithm, _Reasons
from cryptography.hazmat.primitives import ciphers
from cryptography.hazmat.primitives.ciphers import modes
@utils.register_interface(ciphers.CipherContext)
@utils.register_interface(ciphers.AEADCipherContext)
@utils.register_interface(ciphers.AEADEncryptionContext)
@utils.register_interface(ciphers.AEADDecryptionContext)
class _CipherContext(object):
_ENCRYPT = 1
_DECRYPT = 0
_MAX_CHUNK_SIZE = 2 ** 30 - 1
def __init__(self, backend, cipher, mode, operation):
self._backend = backend
self._cipher = cipher
self._mode = mode
self._operation = operation
self._tag = None
if isinstance(self._cipher, ciphers.BlockCipherAlgorithm):
self._block_size_bytes = self._cipher.block_size // 8
else:
self._block_size_bytes = 1
ctx = self._backend._lib.EVP_CIPHER_CTX_new()
ctx = self._backend._ffi.gc(
ctx, self._backend._lib.EVP_CIPHER_CTX_free
)
registry = self._backend._cipher_registry
try:
adapter = registry[type(cipher), type(mode)]
except KeyError:
raise UnsupportedAlgorithm(
"cipher {} in {} mode is not supported "
"by this backend.".format(
cipher.name, mode.name if mode else mode
),
_Reasons.UNSUPPORTED_CIPHER,
)
evp_cipher = adapter(self._backend, cipher, mode)
if evp_cipher == self._backend._ffi.NULL:
msg = "cipher {0.name} ".format(cipher)
if mode is not None:
msg += "in {0.name} mode ".format(mode)
msg += (
"is not supported by this backend (Your version of OpenSSL "
"may be too old. Current version: {}.)"
).format(self._backend.openssl_version_text())
raise UnsupportedAlgorithm(msg, _Reasons.UNSUPPORTED_CIPHER)
if isinstance(mode, modes.ModeWithInitializationVector):
iv_nonce = self._backend._ffi.from_buffer(
mode.initialization_vector
)
elif isinstance(mode, modes.ModeWithTweak):
iv_nonce = self._backend._ffi.from_buffer(mode.tweak)
elif isinstance(mode, modes.ModeWithNonce):
iv_nonce = self._backend._ffi.from_buffer(mode.nonce)
elif isinstance(cipher, modes.ModeWithNonce):
iv_nonce = self._backend._ffi.from_buffer(cipher.nonce)
else:
iv_nonce = self._backend._ffi.NULL
# begin init with cipher and operation type
res = self._backend._lib.EVP_CipherInit_ex(
ctx,
evp_cipher,
self._backend._ffi.NULL,
self._backend._ffi.NULL,
self._backend._ffi.NULL,
operation,
)
self._backend.openssl_assert(res != 0)
# set the key length to handle variable key ciphers
res = self._backend._lib.EVP_CIPHER_CTX_set_key_length(
ctx, len(cipher.key)
)
self._backend.openssl_assert(res != 0)
if isinstance(mode, modes.GCM):
res = self._backend._lib.EVP_CIPHER_CTX_ctrl(
ctx,
self._backend._lib.EVP_CTRL_AEAD_SET_IVLEN,
len(iv_nonce),
self._backend._ffi.NULL,
)
self._backend.openssl_assert(res != 0)
if mode.tag is not None:
res = self._backend._lib.EVP_CIPHER_CTX_ctrl(
ctx,
self._backend._lib.EVP_CTRL_AEAD_SET_TAG,
len(mode.tag),
mode.tag,
)
self._backend.openssl_assert(res != 0)
self._tag = mode.tag
# pass key/iv
res = self._backend._lib.EVP_CipherInit_ex(
ctx,
self._backend._ffi.NULL,
self._backend._ffi.NULL,
self._backend._ffi.from_buffer(cipher.key),
iv_nonce,
operation,
)
# Check for XTS mode duplicate keys error
errors = self._backend._consume_errors()
lib = self._backend._lib
if res == 0 and (
(
lib.CRYPTOGRAPHY_OPENSSL_111D_OR_GREATER
and errors[0]._lib_reason_match(
lib.ERR_LIB_EVP, lib.EVP_R_XTS_DUPLICATED_KEYS
)
)
or (
lib.Cryptography_HAS_PROVIDERS
and errors[0]._lib_reason_match(
lib.ERR_LIB_PROV, lib.PROV_R_XTS_DUPLICATED_KEYS
)
)
):
raise ValueError("In XTS mode duplicated keys are not allowed")
self._backend.openssl_assert(res != 0, errors=errors)
# We purposely disable padding here as it's handled higher up in the
# API.
self._backend._lib.EVP_CIPHER_CTX_set_padding(ctx, 0)
self._ctx = ctx
def update(self, data: bytes) -> bytes:
buf = bytearray(len(data) + self._block_size_bytes - 1)
n = self.update_into(data, buf)
return bytes(buf[:n])
def update_into(self, data: bytes, buf) -> int:
total_data_len = len(data)
if len(buf) < (total_data_len + self._block_size_bytes - 1):
raise ValueError(
"buffer must be at least {} bytes for this "
"payload".format(len(data) + self._block_size_bytes - 1)
)
data_processed = 0
total_out = 0
outlen = self._backend._ffi.new("int *")
baseoutbuf = self._backend._ffi.from_buffer(buf)
baseinbuf = self._backend._ffi.from_buffer(data)
while data_processed != total_data_len:
outbuf = baseoutbuf + total_out
inbuf = baseinbuf + data_processed
inlen = min(self._MAX_CHUNK_SIZE, total_data_len - data_processed)
res = self._backend._lib.EVP_CipherUpdate(
self._ctx, outbuf, outlen, inbuf, inlen
)
if res == 0 and isinstance(self._mode, modes.XTS):
self._backend._consume_errors()
raise ValueError(
"In XTS mode you must supply at least a full block in the "
"first update call. For AES this is 16 bytes."
)
else:
self._backend.openssl_assert(res != 0)
data_processed += inlen
total_out += outlen[0]
return total_out
def finalize(self) -> bytes:
if (
self._operation == self._DECRYPT
and isinstance(self._mode, modes.ModeWithAuthenticationTag)
and self.tag is None
):
raise ValueError(
"Authentication tag must be provided when decrypting."
)
buf = self._backend._ffi.new("unsigned char[]", self._block_size_bytes)
outlen = self._backend._ffi.new("int *")
res = self._backend._lib.EVP_CipherFinal_ex(self._ctx, buf, outlen)
if res == 0:
errors = self._backend._consume_errors()
if not errors and isinstance(self._mode, modes.GCM):
raise InvalidTag
lib = self._backend._lib
self._backend.openssl_assert(
errors[0]._lib_reason_match(
lib.ERR_LIB_EVP,
lib.EVP_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH,
)
or (
lib.Cryptography_HAS_PROVIDERS
and errors[0]._lib_reason_match(
lib.ERR_LIB_PROV,
lib.PROV_R_WRONG_FINAL_BLOCK_LENGTH,
)
)
or (
lib.CRYPTOGRAPHY_IS_BORINGSSL
and errors[0].reason
== lib.CIPHER_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH
),
errors=errors,
)
raise ValueError(
"The length of the provided data is not a multiple of "
"the block length."
)
if (
isinstance(self._mode, modes.GCM)
and self._operation == self._ENCRYPT
):
tag_buf = self._backend._ffi.new(
"unsigned char[]", self._block_size_bytes
)
res = self._backend._lib.EVP_CIPHER_CTX_ctrl(
self._ctx,
self._backend._lib.EVP_CTRL_AEAD_GET_TAG,
self._block_size_bytes,
tag_buf,
)
self._backend.openssl_assert(res != 0)
self._tag = self._backend._ffi.buffer(tag_buf)[:]
res = self._backend._lib.EVP_CIPHER_CTX_reset(self._ctx)
self._backend.openssl_assert(res == 1)
return self._backend._ffi.buffer(buf)[: outlen[0]]
def finalize_with_tag(self, tag: bytes) -> bytes:
tag_len = len(tag)
if tag_len < self._mode._min_tag_length:
raise ValueError(
"Authentication tag must be {} bytes or longer.".format(
self._mode._min_tag_length
)
)
elif tag_len > self._block_size_bytes:
raise ValueError(
"Authentication tag cannot be more than {} bytes.".format(
self._block_size_bytes
)
)
res = self._backend._lib.EVP_CIPHER_CTX_ctrl(
self._ctx, self._backend._lib.EVP_CTRL_AEAD_SET_TAG, len(tag), tag
)
self._backend.openssl_assert(res != 0)
self._tag = tag
return self.finalize()
def authenticate_additional_data(self, data: bytes) -> None:
outlen = self._backend._ffi.new("int *")
res = self._backend._lib.EVP_CipherUpdate(
self._ctx,
self._backend._ffi.NULL,
outlen,
self._backend._ffi.from_buffer(data),
len(data),
)
self._backend.openssl_assert(res != 0)
@property
def tag(self) -> typing.Optional[bytes]:
return self._tag
| 36.957447 | 79 | 0.567645 |
5d86bbb7d45b07807b8d2a8bfb782d7715928191 | 31,428 | py | Python | espnet2/train/trainer.py | lahiruts/espnet | 0afa20fa1d5105374f8998fba596b01b04dea6c3 | [
"Apache-2.0"
] | 1 | 2021-09-29T03:14:14.000Z | 2021-09-29T03:14:14.000Z | espnet2/train/trainer.py | xbsdsongnan/espnet | 1dc734839d34e2f2dd13cfa375713aecf232ae25 | [
"Apache-2.0"
] | 83 | 2020-09-14T15:28:59.000Z | 2021-04-19T23:07:45.000Z | espnet2/train/trainer.py | xbsdsongnan/espnet | 1dc734839d34e2f2dd13cfa375713aecf232ae25 | [
"Apache-2.0"
] | 1 | 2021-11-27T03:10:00.000Z | 2021-11-27T03:10:00.000Z | import argparse
from contextlib import contextmanager
import dataclasses
from dataclasses import is_dataclass
from distutils.version import LooseVersion
import logging
from pathlib import Path
import time
from typing import Dict
from typing import Iterable
from typing import List
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import Union
import humanfriendly
import numpy as np
import torch
import torch.nn
import torch.optim
from typeguard import check_argument_types
from espnet2.iterators.abs_iter_factory import AbsIterFactory
from espnet2.main_funcs.average_nbest_models import average_nbest_models
from espnet2.main_funcs.calculate_all_attentions import calculate_all_attentions
from espnet2.schedulers.abs_scheduler import AbsBatchStepScheduler
from espnet2.schedulers.abs_scheduler import AbsEpochStepScheduler
from espnet2.schedulers.abs_scheduler import AbsScheduler
from espnet2.schedulers.abs_scheduler import AbsValEpochStepScheduler
from espnet2.torch_utils.add_gradient_noise import add_gradient_noise
from espnet2.torch_utils.device_funcs import to_device
from espnet2.torch_utils.recursive_op import recursive_average
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
from espnet2.train.abs_espnet_model import AbsESPnetModel
from espnet2.train.distributed_utils import DistributedOption
from espnet2.train.reporter import Reporter
from espnet2.train.reporter import SubReporter
from espnet2.utils.build_dataclass import build_dataclass
if LooseVersion(torch.__version__) >= LooseVersion("1.1.0"):
from torch.utils.tensorboard import SummaryWriter
else:
from tensorboardX import SummaryWriter
if torch.distributed.is_available():
if LooseVersion(torch.__version__) > LooseVersion("1.0.1"):
from torch.distributed import ReduceOp
else:
from torch.distributed import reduce_op as ReduceOp
else:
ReduceOp = None
if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
from torch.cuda.amp import autocast
from torch.cuda.amp import GradScaler
else:
# Nothing to do if torch<1.6.0
@contextmanager
def autocast(enabled=True):
yield
GradScaler = None
try:
import fairscale
except ImportError:
fairscale = None
@dataclasses.dataclass
class TrainerOptions:
ngpu: int
resume: bool
use_amp: bool
train_dtype: str
grad_noise: bool
accum_grad: int
grad_clip: float
grad_clip_type: float
log_interval: Optional[int]
no_forward_run: bool
use_tensorboard: bool
use_wandb: bool
output_dir: Union[Path, str]
max_epoch: int
seed: int
sharded_ddp: bool
patience: Optional[int]
keep_nbest_models: Union[int, List[int]]
early_stopping_criterion: Sequence[str]
best_model_criterion: Sequence[Sequence[str]]
val_scheduler_criterion: Sequence[str]
unused_parameters: bool
class Trainer:
"""Trainer having a optimizer.
If you'd like to use multiple optimizers, then inherit this class
and override the methods if necessary - at least "train_one_epoch()"
>>> class TwoOptimizerTrainer(Trainer):
... @classmethod
... def add_arguments(cls, parser):
... ...
...
... @classmethod
... def train_one_epoch(cls, model, optimizers, ...):
... loss1 = model.model1(...)
... loss1.backward()
... optimizers[0].step()
...
... loss2 = model.model2(...)
... loss2.backward()
... optimizers[1].step()
"""
def __init__(self):
raise RuntimeError("This class can't be instantiated.")
@classmethod
def build_options(cls, args: argparse.Namespace) -> TrainerOptions:
"""Build options consumed by train(), eval(), and plot_attention()"""
assert check_argument_types()
return build_dataclass(TrainerOptions, args)
@classmethod
def add_arguments(cls, parser: argparse.ArgumentParser):
"""Reserved for future development of another Trainer"""
pass
@staticmethod
def resume(
checkpoint: Union[str, Path],
model: torch.nn.Module,
reporter: Reporter,
optimizers: Sequence[torch.optim.Optimizer],
schedulers: Sequence[Optional[AbsScheduler]],
scaler: Optional[GradScaler],
ngpu: int = 0,
):
states = torch.load(
checkpoint,
map_location=f"cuda:{torch.cuda.current_device()}" if ngpu > 0 else "cpu",
)
model.load_state_dict(states["model"])
reporter.load_state_dict(states["reporter"])
for optimizer, state in zip(optimizers, states["optimizers"]):
optimizer.load_state_dict(state)
for scheduler, state in zip(schedulers, states["schedulers"]):
if scheduler is not None:
scheduler.load_state_dict(state)
if scaler is not None:
if states["scaler"] is None:
logging.warning("scaler state is not found")
else:
scaler.load_state_dict(states["scaler"])
logging.info(f"The training was resumed using {checkpoint}")
@classmethod
def run(
cls,
model: AbsESPnetModel,
optimizers: Sequence[torch.optim.Optimizer],
schedulers: Sequence[Optional[AbsScheduler]],
train_iter_factory: AbsIterFactory,
valid_iter_factory: AbsIterFactory,
plot_attention_iter_factory: Optional[AbsIterFactory],
trainer_options,
distributed_option: DistributedOption,
) -> None:
"""Perform training. This method performs the main process of training."""
assert check_argument_types()
# NOTE(kamo): Don't check the type more strictly as far trainer_options
assert is_dataclass(trainer_options), type(trainer_options)
assert len(optimizers) == len(schedulers), (len(optimizers), len(schedulers))
if isinstance(trainer_options.keep_nbest_models, int):
keep_nbest_models = trainer_options.keep_nbest_models
else:
if len(trainer_options.keep_nbest_models) == 0:
logging.warning("No keep_nbest_models is given. Change to [1]")
trainer_options.keep_nbest_models = [1]
keep_nbest_models = max(trainer_options.keep_nbest_models)
output_dir = Path(trainer_options.output_dir)
reporter = Reporter()
if trainer_options.use_amp:
if LooseVersion(torch.__version__) < LooseVersion("1.6.0"):
raise RuntimeError(
"Require torch>=1.6.0 for Automatic Mixed Precision"
)
if trainer_options.sharded_ddp:
if fairscale is None:
raise RuntimeError(
"Requiring fairscale. Do 'pip install fairscale'"
)
scaler = fairscale.optim.grad_scaler.ShardedGradScaler()
else:
scaler = GradScaler()
else:
scaler = None
if trainer_options.resume and (output_dir / "checkpoint.pth").exists():
cls.resume(
checkpoint=output_dir / "checkpoint.pth",
model=model,
optimizers=optimizers,
schedulers=schedulers,
reporter=reporter,
scaler=scaler,
ngpu=trainer_options.ngpu,
)
start_epoch = reporter.get_epoch() + 1
if start_epoch == trainer_options.max_epoch + 1:
logging.warning(
f"The training has already reached at max_epoch: {start_epoch}"
)
if distributed_option.distributed:
if trainer_options.sharded_ddp:
dp_model = fairscale.nn.data_parallel.ShardedDataParallel(
module=model,
sharded_optimizer=optimizers,
)
else:
dp_model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=(
# Perform multi-Process with multi-GPUs
[torch.cuda.current_device()]
if distributed_option.ngpu == 1
# Perform single-Process with multi-GPUs
else None
),
output_device=(
torch.cuda.current_device()
if distributed_option.ngpu == 1
else None
),
find_unused_parameters=trainer_options.unused_parameters,
)
elif distributed_option.ngpu > 1:
dp_model = torch.nn.parallel.DataParallel(
model,
device_ids=list(range(distributed_option.ngpu)),
)
else:
# NOTE(kamo): DataParallel also should work with ngpu=1,
# but for debuggability it's better to keep this block.
dp_model = model
if trainer_options.use_tensorboard and (
not distributed_option.distributed or distributed_option.dist_rank == 0
):
summary_writer = SummaryWriter(str(output_dir / "tensorboard"))
else:
summary_writer = None
start_time = time.perf_counter()
for iepoch in range(start_epoch, trainer_options.max_epoch + 1):
if iepoch != start_epoch:
logging.info(
"{}/{}epoch started. Estimated time to finish: {}".format(
iepoch,
trainer_options.max_epoch,
humanfriendly.format_timespan(
(time.perf_counter() - start_time)
/ (iepoch - start_epoch)
* (trainer_options.max_epoch - iepoch + 1)
),
)
)
else:
logging.info(f"{iepoch}/{trainer_options.max_epoch}epoch started")
set_all_random_seed(trainer_options.seed + iepoch)
reporter.set_epoch(iepoch)
# 1. Train and validation for one-epoch
with reporter.observe("train") as sub_reporter:
all_steps_are_invalid = cls.train_one_epoch(
model=dp_model,
optimizers=optimizers,
schedulers=schedulers,
iterator=train_iter_factory.build_iter(iepoch),
reporter=sub_reporter,
scaler=scaler,
summary_writer=summary_writer,
options=trainer_options,
distributed_option=distributed_option,
)
with reporter.observe("valid") as sub_reporter:
cls.validate_one_epoch(
model=dp_model,
iterator=valid_iter_factory.build_iter(iepoch),
reporter=sub_reporter,
options=trainer_options,
distributed_option=distributed_option,
)
if not distributed_option.distributed or distributed_option.dist_rank == 0:
# att_plot doesn't support distributed
if plot_attention_iter_factory is not None:
with reporter.observe("att_plot") as sub_reporter:
cls.plot_attention(
model=model,
output_dir=output_dir / "att_ws",
summary_writer=summary_writer,
iterator=plot_attention_iter_factory.build_iter(iepoch),
reporter=sub_reporter,
options=trainer_options,
)
# 2. LR Scheduler step
for scheduler in schedulers:
if isinstance(scheduler, AbsValEpochStepScheduler):
scheduler.step(
reporter.get_value(*trainer_options.val_scheduler_criterion)
)
elif isinstance(scheduler, AbsEpochStepScheduler):
scheduler.step()
if trainer_options.sharded_ddp:
for optimizer in optimizers:
if isinstance(optimizer, fairscale.optim.oss.OSS):
optimizer.consolidate_state_dict()
if not distributed_option.distributed or distributed_option.dist_rank == 0:
# 3. Report the results
logging.info(reporter.log_message())
reporter.matplotlib_plot(output_dir / "images")
if summary_writer is not None:
reporter.tensorboard_add_scalar(summary_writer)
if trainer_options.use_wandb:
reporter.wandb_log()
# 4. Save/Update the checkpoint
torch.save(
{
"model": model.state_dict(),
"reporter": reporter.state_dict(),
"optimizers": [o.state_dict() for o in optimizers],
"schedulers": [
s.state_dict() if s is not None else None
for s in schedulers
],
"scaler": scaler.state_dict() if scaler is not None else None,
},
output_dir / "checkpoint.pth",
)
# 5. Save the model and update the link to the best model
torch.save(model.state_dict(), output_dir / f"{iepoch}epoch.pth")
# Creates a sym link latest.pth -> {iepoch}epoch.pth
p = output_dir / "latest.pth"
if p.is_symlink() or p.exists():
p.unlink()
p.symlink_to(f"{iepoch}epoch.pth")
_improved = []
for _phase, k, _mode in trainer_options.best_model_criterion:
# e.g. _phase, k, _mode = "train", "loss", "min"
if reporter.has(_phase, k):
best_epoch = reporter.get_best_epoch(_phase, k, _mode)
# Creates sym links if it's the best result
if best_epoch == iepoch:
p = output_dir / f"{_phase}.{k}.best.pth"
if p.is_symlink() or p.exists():
p.unlink()
p.symlink_to(f"{iepoch}epoch.pth")
_improved.append(f"{_phase}.{k}")
if len(_improved) == 0:
logging.info("There are no improvements in this epoch")
else:
logging.info(
"The best model has been updated: " + ", ".join(_improved)
)
# 6. Remove the model files excluding n-best epoch and latest epoch
_removed = []
# Get the union set of the n-best among multiple criterion
nbests = set().union(
*[
set(reporter.sort_epochs(ph, k, m)[:keep_nbest_models])
for ph, k, m in trainer_options.best_model_criterion
if reporter.has(ph, k)
]
)
for e in range(1, iepoch):
p = output_dir / f"{e}epoch.pth"
if p.exists() and e not in nbests:
p.unlink()
_removed.append(str(p))
if len(_removed) != 0:
logging.info("The model files were removed: " + ", ".join(_removed))
# 7. If any updating haven't happened, stops the training
if all_steps_are_invalid:
logging.warning(
f"The gradients at all steps are invalid in this epoch. "
f"Something seems wrong. This training was stopped at {iepoch}epoch"
)
break
# 8. Check early stopping
if trainer_options.patience is not None:
if reporter.check_early_stopping(
trainer_options.patience, *trainer_options.early_stopping_criterion
):
break
else:
logging.info(
f"The training was finished at {trainer_options.max_epoch} epochs "
)
if not distributed_option.distributed or distributed_option.dist_rank == 0:
# Generated n-best averaged model
average_nbest_models(
reporter=reporter,
output_dir=output_dir,
best_model_criterion=trainer_options.best_model_criterion,
nbest=keep_nbest_models,
)
@classmethod
def train_one_epoch(
cls,
model: torch.nn.Module,
iterator: Iterable[Tuple[List[str], Dict[str, torch.Tensor]]],
optimizers: Sequence[torch.optim.Optimizer],
schedulers: Sequence[Optional[AbsScheduler]],
scaler: Optional[GradScaler],
reporter: SubReporter,
summary_writer: Optional[SummaryWriter],
options: TrainerOptions,
distributed_option: DistributedOption,
) -> bool:
assert check_argument_types()
grad_noise = options.grad_noise
accum_grad = options.accum_grad
grad_clip = options.grad_clip
grad_clip_type = options.grad_clip_type
log_interval = options.log_interval
no_forward_run = options.no_forward_run
ngpu = options.ngpu
use_wandb = options.use_wandb
distributed = distributed_option.distributed
if log_interval is None:
try:
log_interval = max(len(iterator) // 20, 10)
except TypeError:
log_interval = 100
model.train()
all_steps_are_invalid = True
# [For distributed] Because iteration counts are not always equals between
# processes, send stop-flag to the other processes if iterator is finished
iterator_stop = torch.tensor(0).to("cuda" if ngpu > 0 else "cpu")
start_time = time.perf_counter()
for iiter, (_, batch) in enumerate(
reporter.measure_iter_time(iterator, "iter_time"), 1
):
assert isinstance(batch, dict), type(batch)
if distributed:
torch.distributed.all_reduce(iterator_stop, ReduceOp.SUM)
if iterator_stop > 0:
break
batch = to_device(batch, "cuda" if ngpu > 0 else "cpu")
if no_forward_run:
all_steps_are_invalid = False
continue
with autocast(scaler is not None):
with reporter.measure_time("forward_time"):
retval = model(**batch)
# Note(kamo):
# Supporting two patterns for the returned value from the model
# a. dict type
if isinstance(retval, dict):
loss = retval["loss"]
stats = retval["stats"]
weight = retval["weight"]
optim_idx = retval.get("optim_idx")
if optim_idx is not None and not isinstance(optim_idx, int):
if not isinstance(optim_idx, torch.Tensor):
raise RuntimeError(
"optim_idx must be int or 1dim torch.Tensor, "
f"but got {type(optim_idx)}"
)
if optim_idx.dim() >= 2:
raise RuntimeError(
"optim_idx must be int or 1dim torch.Tensor, "
f"but got {optim_idx.dim()}dim tensor"
)
if optim_idx.dim() == 1:
for v in optim_idx:
if v != optim_idx[0]:
raise RuntimeError(
"optim_idx must be 1dim tensor "
"having same values for all entries"
)
optim_idx = optim_idx[0].item()
else:
optim_idx = optim_idx.item()
# b. tuple or list type
else:
loss, stats, weight = retval
optim_idx = None
stats = {k: v for k, v in stats.items() if v is not None}
if ngpu > 1 or distributed:
# Apply weighted averaging for loss and stats
loss = (loss * weight.type(loss.dtype)).sum()
# if distributed, this method can also apply all_reduce()
stats, weight = recursive_average(stats, weight, distributed)
# Now weight is summation over all workers
loss /= weight
if distributed:
# NOTE(kamo): Multiply world_size because DistributedDataParallel
# automatically normalizes the gradient by world_size.
loss *= torch.distributed.get_world_size()
loss /= accum_grad
reporter.register(stats, weight)
with reporter.measure_time("backward_time"):
if scaler is not None:
# Scales loss. Calls backward() on scaled loss
# to create scaled gradients.
# Backward passes under autocast are not recommended.
# Backward ops run in the same dtype autocast chose
# for corresponding forward ops.
scaler.scale(loss).backward()
else:
loss.backward()
if iiter % accum_grad == 0:
if scaler is not None:
# Unscales the gradients of optimizer's assigned params in-place
for iopt, optimizer in enumerate(optimizers):
if optim_idx is not None and iopt != optim_idx:
continue
scaler.unscale_(optimizer)
# gradient noise injection
if grad_noise:
add_gradient_noise(
model,
reporter.get_total_count(),
duration=100,
eta=1.0,
scale_factor=0.55,
)
# compute the gradient norm to check if it is normal or not
grad_norm = torch.nn.utils.clip_grad_norm_(
model.parameters(),
max_norm=grad_clip,
norm_type=grad_clip_type,
)
# PyTorch<=1.4, clip_grad_norm_ returns float value
if not isinstance(grad_norm, torch.Tensor):
grad_norm = torch.tensor(grad_norm)
if not torch.isfinite(grad_norm):
logging.warning(
f"The grad norm is {grad_norm}. Skipping updating the model."
)
# Must invoke scaler.update() if unscale_() is used in the iteration
# to avoid the following error:
# RuntimeError: unscale_() has already been called
# on this optimizer since the last update().
# Note that if the gradient has inf/nan values,
# scaler.step skips optimizer.step().
if scaler is not None:
for iopt, optimizer in enumerate(optimizers):
if optim_idx is not None and iopt != optim_idx:
continue
scaler.step(optimizer)
scaler.update()
else:
all_steps_are_invalid = False
with reporter.measure_time("optim_step_time"):
for iopt, (optimizer, scheduler) in enumerate(
zip(optimizers, schedulers)
):
if optim_idx is not None and iopt != optim_idx:
continue
if scaler is not None:
# scaler.step() first unscales the gradients of
# the optimizer's assigned params.
scaler.step(optimizer)
# Updates the scale for next iteration.
scaler.update()
else:
optimizer.step()
if isinstance(scheduler, AbsBatchStepScheduler):
scheduler.step()
optimizer.zero_grad()
# Register lr and train/load time[sec/step],
# where step refers to accum_grad * mini-batch
reporter.register(
dict(
{
f"optim{i}_lr{j}": pg["lr"]
for i, optimizer in enumerate(optimizers)
for j, pg in enumerate(optimizer.param_groups)
if "lr" in pg
},
train_time=time.perf_counter() - start_time,
),
)
start_time = time.perf_counter()
# NOTE(kamo): Call log_message() after next()
reporter.next()
if iiter % log_interval == 0:
logging.info(reporter.log_message(-log_interval))
if summary_writer is not None:
reporter.tensorboard_add_scalar(summary_writer, -log_interval)
if use_wandb:
reporter.wandb_log()
else:
if distributed:
iterator_stop.fill_(1)
torch.distributed.all_reduce(iterator_stop, ReduceOp.SUM)
return all_steps_are_invalid
@classmethod
@torch.no_grad()
def validate_one_epoch(
cls,
model: torch.nn.Module,
iterator: Iterable[Dict[str, torch.Tensor]],
reporter: SubReporter,
options: TrainerOptions,
distributed_option: DistributedOption,
) -> None:
assert check_argument_types()
ngpu = options.ngpu
no_forward_run = options.no_forward_run
distributed = distributed_option.distributed
model.eval()
# [For distributed] Because iteration counts are not always equals between
# processes, send stop-flag to the other processes if iterator is finished
iterator_stop = torch.tensor(0).to("cuda" if ngpu > 0 else "cpu")
for (_, batch) in iterator:
assert isinstance(batch, dict), type(batch)
if distributed:
torch.distributed.all_reduce(iterator_stop, ReduceOp.SUM)
if iterator_stop > 0:
break
batch = to_device(batch, "cuda" if ngpu > 0 else "cpu")
if no_forward_run:
continue
retval = model(**batch)
if isinstance(retval, dict):
stats = retval["stats"]
weight = retval["weight"]
else:
_, stats, weight = retval
if ngpu > 1 or distributed:
# Apply weighted averaging for stats.
# if distributed, this method can also apply all_reduce()
stats, weight = recursive_average(stats, weight, distributed)
reporter.register(stats, weight)
reporter.next()
else:
if distributed:
iterator_stop.fill_(1)
torch.distributed.all_reduce(iterator_stop, ReduceOp.SUM)
@classmethod
@torch.no_grad()
def plot_attention(
cls,
model: torch.nn.Module,
output_dir: Optional[Path],
summary_writer: Optional[SummaryWriter],
iterator: Iterable[Tuple[List[str], Dict[str, torch.Tensor]]],
reporter: SubReporter,
options: TrainerOptions,
) -> None:
assert check_argument_types()
import matplotlib
ngpu = options.ngpu
no_forward_run = options.no_forward_run
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
model.eval()
for ids, batch in iterator:
assert isinstance(batch, dict), type(batch)
assert len(next(iter(batch.values()))) == len(ids), (
len(next(iter(batch.values()))),
len(ids),
)
batch = to_device(batch, "cuda" if ngpu > 0 else "cpu")
if no_forward_run:
continue
# 1. Forwarding model and gathering all attentions
# calculate_all_attentions() uses single gpu only.
att_dict = calculate_all_attentions(model, batch)
# 2. Plot attentions: This part is slow due to matplotlib
for k, att_list in att_dict.items():
assert len(att_list) == len(ids), (len(att_list), len(ids))
for id_, att_w in zip(ids, att_list):
if isinstance(att_w, torch.Tensor):
att_w = att_w.detach().cpu().numpy()
if att_w.ndim == 2:
att_w = att_w[None]
elif att_w.ndim > 3 or att_w.ndim == 1:
raise RuntimeError(f"Must be 2 or 3 dimension: {att_w.ndim}")
w, h = plt.figaspect(1.0 / len(att_w))
fig = plt.Figure(figsize=(w * 1.3, h * 1.3))
axes = fig.subplots(1, len(att_w))
if len(att_w) == 1:
axes = [axes]
for ax, aw in zip(axes, att_w):
ax.imshow(aw.astype(np.float32), aspect="auto")
ax.set_title(f"{k}_{id_}")
ax.set_xlabel("Input")
ax.set_ylabel("Output")
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
if output_dir is not None:
p = output_dir / id_ / f"{k}.{reporter.get_epoch()}ep.png"
p.parent.mkdir(parents=True, exist_ok=True)
fig.savefig(p)
if summary_writer is not None:
summary_writer.add_figure(
f"{k}_{id_}", fig, reporter.get_epoch()
)
reporter.next()
| 40.709845 | 88 | 0.534905 |
0cd6d2490cc5f724f149a5963c423f99a0ac90ec | 3,424 | py | Python | asterioids-pygame-project/source_code_step_10/space_rocks/game.py | syberflea/materials | 54f44725b40edf00c1b523d7a85b34a85014d7eb | [
"MIT"
] | 3,682 | 2018-05-07T19:45:24.000Z | 2022-03-31T15:19:10.000Z | asterioids-pygame-project/source_code_step_10/space_rocks/game.py | sribarrow/materials | c17c4a4d6f8487e59eac1df8c88ca92b73d6d2a5 | [
"MIT"
] | 148 | 2018-05-15T21:18:49.000Z | 2022-03-21T11:25:39.000Z | asterioids-pygame-project/source_code_step_10/space_rocks/game.py | sribarrow/materials | c17c4a4d6f8487e59eac1df8c88ca92b73d6d2a5 | [
"MIT"
] | 5,535 | 2018-05-25T23:36:08.000Z | 2022-03-31T16:55:52.000Z | import pygame
from models import Asteroid, Spaceship
from utils import get_random_position, load_sprite, print_text
class SpaceRocks:
MIN_ASTEROID_DISTANCE = 250
def __init__(self):
self._init_pygame()
self.screen = pygame.display.set_mode((800, 600))
self.background = load_sprite("space", False)
self.clock = pygame.time.Clock()
self.font = pygame.font.Font(None, 64)
self.message = ""
self.asteroids = []
self.bullets = []
self.spaceship = Spaceship((400, 300), self.bullets.append)
for _ in range(6):
while True:
position = get_random_position(self.screen)
if (
position.distance_to(self.spaceship.position)
> self.MIN_ASTEROID_DISTANCE
):
break
self.asteroids.append(Asteroid(position, self.asteroids.append))
def main_loop(self):
while True:
self._handle_input()
self._process_game_logic()
self._draw()
def _init_pygame(self):
pygame.init()
pygame.display.set_caption("Space Rocks")
def _handle_input(self):
for event in pygame.event.get():
if event.type == pygame.QUIT or (
event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE
):
quit()
elif (
self.spaceship
and event.type == pygame.KEYDOWN
and event.key == pygame.K_SPACE
):
self.spaceship.shoot()
is_key_pressed = pygame.key.get_pressed()
if self.spaceship:
if is_key_pressed[pygame.K_RIGHT]:
self.spaceship.rotate(clockwise=True)
elif is_key_pressed[pygame.K_LEFT]:
self.spaceship.rotate(clockwise=False)
if is_key_pressed[pygame.K_UP]:
self.spaceship.accelerate()
def _process_game_logic(self):
for game_object in self._get_game_objects():
game_object.move(self.screen)
if self.spaceship:
for asteroid in self.asteroids:
if asteroid.collides_with(self.spaceship):
self.spaceship = None
self.message = "You lost!"
break
for bullet in self.bullets[:]:
for asteroid in self.asteroids[:]:
if asteroid.collides_with(bullet):
self.asteroids.remove(asteroid)
self.bullets.remove(bullet)
asteroid.split()
break
for bullet in self.bullets[:]:
if not self.screen.get_rect().collidepoint(bullet.position):
self.bullets.remove(bullet)
if not self.asteroids and self.spaceship:
self.message = "You won!"
def _draw(self):
self.screen.blit(self.background, (0, 0))
for game_object in self._get_game_objects():
game_object.draw(self.screen)
if self.message:
print_text(self.screen, self.message, self.font)
pygame.display.flip()
self.clock.tick(60)
def _get_game_objects(self):
game_objects = [*self.asteroids, *self.bullets]
if self.spaceship:
game_objects.append(self.spaceship)
return game_objects
| 30.846847 | 77 | 0.5625 |
6770b06296751a6cab58a0ea73c68da5469d8880 | 89 | py | Python | app/main/__init__.py | DennisKipkirui/Pitch_app | d2272b12c61df545bf4a16e7235631becbf2a901 | [
"Unlicense"
] | 1 | 2020-07-17T06:52:43.000Z | 2020-07-17T06:52:43.000Z | app/main/__init__.py | DennisKipkirui/Pitch_app | d2272b12c61df545bf4a16e7235631becbf2a901 | [
"Unlicense"
] | null | null | null | app/main/__init__.py | DennisKipkirui/Pitch_app | d2272b12c61df545bf4a16e7235631becbf2a901 | [
"Unlicense"
] | 1 | 2020-07-18T10:58:43.000Z | 2020-07-18T10:58:43.000Z | from flask import Blueprint
main = Blueprint('main',__name__)
from .import errors,views | 17.8 | 33 | 0.786517 |
7c9c871ce31cd4de64d9c67717fb566efbbe4da2 | 96 | py | Python | sample_dashboard/google_analytics/apps.py | Georgitanev/django_dashboard | 5be2680712fcedf33965fd66a69d46e1e32ab04b | [
"MIT"
] | null | null | null | sample_dashboard/google_analytics/apps.py | Georgitanev/django_dashboard | 5be2680712fcedf33965fd66a69d46e1e32ab04b | [
"MIT"
] | null | null | null | sample_dashboard/google_analytics/apps.py | Georgitanev/django_dashboard | 5be2680712fcedf33965fd66a69d46e1e32ab04b | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class SalesConfig(AppConfig):
name = "google_analytics"
| 16 | 33 | 0.770833 |
4d4bf1a1e81845f3b9b84d3b4b6b336dbb4c4bfd | 1,914 | py | Python | scripts/generate_meta_info_pairdata.py | wacky6/Real-ESRGAN | c9023b3d7a5b711b0505a3e39671e3faab9de1fe | [
"BSD-3-Clause"
] | 58 | 2021-12-21T03:57:31.000Z | 2022-03-26T15:04:02.000Z | scripts/generate_meta_info_pairdata.py | wacky6/Real-ESRGAN | c9023b3d7a5b711b0505a3e39671e3faab9de1fe | [
"BSD-3-Clause"
] | 5 | 2021-12-24T07:11:50.000Z | 2022-02-10T01:20:27.000Z | scripts/generate_meta_info_pairdata.py | wacky6/Real-ESRGAN | c9023b3d7a5b711b0505a3e39671e3faab9de1fe | [
"BSD-3-Clause"
] | 4 | 2022-01-27T14:46:35.000Z | 2022-02-13T11:52:10.000Z | import argparse
import glob
import os
def main(args):
txt_file = open(args.meta_info, 'w')
img_paths_gt = sorted(glob.glob(os.path.join(args.input[0], '*')))
img_paths_lq = sorted(glob.glob(os.path.join(args.input[1], '*')))
assert len(img_paths_gt) == len(img_paths_lq), ('GT folder and LQ folder should have the same length, but got '
f'{len(img_paths_gt)} and {len(img_paths_lq)}.')
for img_path_gt, img_path_lq in zip(img_paths_gt, img_paths_lq):
img_name_gt = os.path.relpath(img_path_gt, args.root[0])
img_name_lq = os.path.relpath(img_path_lq, args.root[1])
print(f'{img_name_gt}, {img_name_lq}')
txt_file.write(f'{img_name_gt}, {img_name_lq}\n')
if __name__ == '__main__':
"""Generate meta info (txt file) for paired images.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
nargs='+',
default=['datasets/DF2K/DIV2K_train_HR_sub', 'datasets/DF2K/DIV2K_train_LR_bicubic_X4_sub'],
help='Input folder, should be [gt_folder, lq_folder]')
parser.add_argument('--root', nargs='+', default=[None, None], help='Folder root, will use the ')
parser.add_argument(
'--meta_info',
type=str,
default='datasets/DF2K/meta_info/meta_info_DIV2K_sub_pair.txt',
help='txt path for meta info')
args = parser.parse_args()
assert len(args.input) == 2, 'Input folder should have two elements: gt folder and lq folder'
assert len(args.root) == 2, 'Root path should have two elements: root for gt folder and lq folder'
os.makedirs(os.path.dirname(args.meta_info), exist_ok=True)
for i in range(2):
if args.input[i].endswith('/'):
args.input[i] = args.input[i][:-1]
if args.root[i] is None:
args.root[i] = os.path.dirname(args.input[i])
main(args)
| 39.875 | 115 | 0.634796 |
3f6cf3d4f6ebdc61a5f730f6eb99f2f0bc0b2325 | 1,738 | py | Python | torchlib/module/layers/fft_layers.py | antsfamily/torchtool | fd0d6e6fe6701206b15f95af145d6178a87233f9 | [
"MIT"
] | 1 | 2019-08-15T15:32:36.000Z | 2019-08-15T15:32:36.000Z | torchlib/module/layers/fft_layers.py | antsfamily/torchtool | fd0d6e6fe6701206b15f95af145d6178a87233f9 | [
"MIT"
] | null | null | null | torchlib/module/layers/fft_layers.py | antsfamily/torchtool | fd0d6e6fe6701206b15f95af145d6178a87233f9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2019-11-07 17:00:48
# @Author : Zhi Liu (zhiliu.mind@gmail.com)
# @Link : http://iridescent.ink
# @Version : $1.0$
import torch as th
import torch.nn.functional as F
import torchlib as tl
class FFTLayer1d(th.nn.Module):
def __init__(self, nfft=None):
super(FFTLayer1d, self).__init__()
self.nfft = nfft
def forward(self, x):
n, d, _ = x.size()
if self.nfft is None:
self.nfft = d
if d != self.nfft:
x = F.pad(x, [0, self.nfft - d, 0], mode='constant', value=0)
# y = th.fft.fft(x, n=None, dim=0, norm=None)
y = tl.fft(x, n, axis=0, norm=None)
return y
if __name__ == '__main__':
import numpy as np
import torch as th
import matplotlib.pyplot as plt
PI = np.pi
f0 = 100
Fs = 1000
Ts = 0.1
Ns = int(Fs * Ts)
f = np.linspace(0., Fs, Ns)
t = np.linspace(0, Ts, Ns)
x_np = np.cos(2. * PI * f0 * t) + 1j * np.sin(2. * PI * f0 * t)
device = th.device('cuda:0')
# x_th = th.tensor(x_np, dtype=th.complex64)
x_th = th.tensor([x_np.real, x_np.imag], dtype=th.float32).transpose(1, 0)
x_th = x_th.to(device)
print(x_th.shape, type(x_th))
x_ths = th.tensor([x_th.cpu().numpy(), x_th.cpu().numpy(),
x_th.cpu().numpy()], dtype=th.float32)
print(x_ths.shape)
fftlayer = FFTLayer1d()
ys = fftlayer.forward(x_ths)
ys = th.abs(ys[:, :, 0] + 1j * ys[:, :, 1]).cpu()
plt.figure()
plt.subplot(131)
plt.plot(f, ys[0])
plt.grid()
plt.subplot(132)
plt.plot(f, ys[1])
plt.grid()
plt.subplot(133)
plt.plot(f, ys[2])
plt.grid()
plt.show()
| 23.808219 | 78 | 0.546605 |
c69bcd062fd2add7775ac68577ec73f3dd41e27e | 1,566 | py | Python | dask/dataframe/tests/test_extensions.py | srijan-deepsource/dask | 0673d9084e02f985f3fdf5ba6ede80e8de5ac15c | [
"BSD-3-Clause"
] | 20 | 2015-01-19T14:04:10.000Z | 2020-01-14T03:43:19.000Z | dask/dataframe/tests/test_extensions.py | srijan-deepsource/dask | 0673d9084e02f985f3fdf5ba6ede80e8de5ac15c | [
"BSD-3-Clause"
] | 12 | 2015-01-22T22:00:43.000Z | 2020-07-28T19:22:16.000Z | dask/dataframe/tests/test_extensions.py | srijan-deepsource/dask | 0673d9084e02f985f3fdf5ba6ede80e8de5ac15c | [
"BSD-3-Clause"
] | 7 | 2015-01-04T18:50:00.000Z | 2020-07-29T11:00:04.000Z | from decimal import Decimal
import pytest
import dask.dataframe as dd
from dask.dataframe.utils import assert_eq, PANDAS_VERSION
pd = pytest.importorskip("pandas", minversion="0.23.4")
from pandas.tests.extension.decimal.array import DecimalArray, DecimalDtype
from dask.dataframe.extensions import make_array_nonempty, make_scalar
@make_array_nonempty.register(DecimalDtype)
def _(dtype):
kwargs = {}
if PANDAS_VERSION >= "0.24.0rc1":
kwargs["dtype"] = dtype
return DecimalArray._from_sequence([Decimal("0"), Decimal("NaN")], **kwargs)
@make_scalar.register(Decimal)
def _(x):
return Decimal("1")
def test_register_extension_type():
arr = DecimalArray._from_sequence([Decimal("1.0")] * 10)
ser = pd.Series(arr)
dser = dd.from_pandas(ser, 2)
assert_eq(ser, dser)
df = pd.DataFrame({"A": ser})
ddf = dd.from_pandas(df, 2)
assert_eq(df, ddf)
def test_reduction():
pytest.importorskip("pandas", minversion="0.24.0")
ser = pd.Series(DecimalArray._from_sequence([Decimal("0"), Decimal("1")]))
dser = dd.from_pandas(ser, 2)
assert_eq(ser.mean(skipna=False), dser.mean(skipna=False))
# It's unclear whether this can be reliably provided, at least with the current
# implementation, which uses pandas.DataFrame.sum(), returning a (homogenous)
# series which has potentially cast values.
# assert_eq(ser.to_frame().mean(skipna=False), dser.to_frame().mean(skipna=False))
def test_scalar():
result = dd.utils.make_meta(Decimal("1.0"))
assert result == Decimal("1.0")
| 29 | 86 | 0.706897 |
a7f4c3629119458d22490618c7884c0009b8900d | 24,201 | py | Python | eodatasets3/wagl.py | ohehir/eo-datasets | 836bb8613e1549b8ae82a5ac1198a99bbdf08a4d | [
"Apache-2.0"
] | null | null | null | eodatasets3/wagl.py | ohehir/eo-datasets | 836bb8613e1549b8ae82a5ac1198a99bbdf08a4d | [
"Apache-2.0"
] | null | null | null | eodatasets3/wagl.py | ohehir/eo-datasets | 836bb8613e1549b8ae82a5ac1198a99bbdf08a4d | [
"Apache-2.0"
] | null | null | null | """
Package WAGL HDF5 Outputs
This will convert the HDF5 file (and sibling fmask/gqa files) into
GeoTIFFS (COGs) with datacube metadata using the DEA naming conventions
for files.
"""
import contextlib
import os
import re
import sys
from datetime import timedelta, datetime
from pathlib import Path
from typing import List, Sequence, Optional, Iterable, Any, Tuple, Dict, Mapping
from uuid import UUID
import attr
import numpy
import rasterio
from affine import Affine
from boltons.iterutils import get_path, PathAccessError
from click import secho
from rasterio import DatasetReader
from rasterio.crs import CRS
from rasterio.enums import Resampling
from eodatasets3 import serialise, utils, images, DatasetAssembler
from eodatasets3.images import GridSpec
from eodatasets3.model import DatasetDoc
from eodatasets3.serialise import loads_yaml
from eodatasets3.ui import bool_style
from eodatasets3.utils import default_utc
try:
import h5py
except ImportError:
sys.stderr.write(
"eodatasets3 has not been installed with the wagl extras. \n"
" Try `pip install eodatasets3[wagl]\n"
)
raise
POSSIBLE_PRODUCTS = ("nbar", "nbart", "lambertian", "sbt")
DEFAULT_PRODUCTS = ("nbar", "nbart")
_THUMBNAILS = {
("landsat-5", "nbar"): ("nbar:red", "nbar:green", "nbar:blue"),
("landsat-5", "nbart"): ("nbart:red", "nbart:green", "nbart:blue"),
("landsat-7", "nbar"): ("nbar:red", "nbar:green", "nbar:blue"),
("landsat-7", "nbart"): ("nbart:red", "nbart:green", "nbart:blue"),
("landsat-8", "nbar"): ("nbar:red", "nbar:green", "nbar:blue"),
("landsat-8", "nbart"): ("nbart:red", "nbart:green", "nbart:blue"),
}
os.environ["CPL_ZIP_ENCODING"] = "UTF-8"
FILENAME_TIF_BAND = re.compile(
r"(?P<prefix>(?:.*_)?)(?P<band_name>B[0-9][A0-9]|B[0-9]*|B[0-9a-zA-z]*)"
r"(?P<extension>\....)"
)
PRODUCT_SUITE_FROM_GRANULE = re.compile("(L1[GTPCS]{1,2})")
def _find_h5_paths(h5_obj: h5py.Group, dataset_class: str = "") -> List[str]:
"""
Find all objects in a h5 of the given class, returning their path.
(class examples: IMAGE, TABLE. SCALAR)
"""
items = []
def _find(name, obj):
if obj.attrs.get("CLASS") == dataset_class:
items.append(name)
h5_obj.visititems(_find)
return items
def _unpack_products(
p: DatasetAssembler, product_list: Iterable[str], h5group: h5py.Group
) -> None:
"""
Unpack and package the NBAR and NBART products.
"""
# listing of all datasets of IMAGE CLASS type
img_paths = _find_h5_paths(h5group, "IMAGE")
for product in product_list:
with do(f"Starting {product}", heading=True):
for pathname in [
p for p in img_paths if "/{}/".format(product.upper()) in p
]:
with do(f"Path {pathname!r}"):
dataset = h5group[pathname]
band_name = utils.normalise_band_name(dataset.attrs["alias"])
write_measurement_h5(
p,
f"{product}:{band_name}",
dataset,
overview_resampling=Resampling.average,
file_id=_file_id(dataset),
)
if (p.platform, product) in _THUMBNAILS:
red, green, blue = _THUMBNAILS[(p.platform, product)]
with do(f"Thumbnailing {product}"):
p.write_thumbnail(
red, green, blue, kind=product, static_stretch=(1, 3000)
)
def write_measurement_h5(
p: DatasetAssembler,
name: str,
g: h5py.Dataset,
overviews=images.DEFAULT_OVERVIEWS,
overview_resampling=Resampling.nearest,
expand_valid_data=True,
file_id: str = None,
):
"""
Write a measurement by copying it from a hdf5 dataset.
"""
if hasattr(g, "chunks"):
data = g[:]
else:
data = g
p.write_measurement_numpy(
name=name,
array=data,
grid_spec=images.GridSpec(
shape=g.shape,
transform=Affine.from_gdal(*g.attrs["geotransform"]),
crs=CRS.from_wkt(g.attrs["crs_wkt"]),
),
nodata=(g.attrs.get("no_data_value")),
overviews=overviews,
overview_resampling=overview_resampling,
expand_valid_data=expand_valid_data,
file_id=file_id,
)
def _file_id(dataset: h5py.Dataset) -> str:
"""
Devise a file id for the given dataset (using its attributes)
Eg. 'band01'
"""
# What we have to work with:
# >>> print(repr((dataset.attrs["band_id"], dataset.attrs["band_name"], dataset.attrs["alias"])))
# ('1', 'BAND-1', 'Blue')
band_name = dataset.attrs["band_id"]
# A purely numeric id needs to be formatted 'band01' according to naming conventions.
return utils.normalise_band_name(band_name)
def _unpack_observation_attributes(
p: DatasetAssembler,
product_list: Iterable[str],
h5group: h5py.Group,
infer_datetime_range=False,
):
"""
Unpack the angles + other supplementary datasets produced by wagl.
Currently only the mode resolution group gets extracted.
"""
resolution_groups = sorted(g for g in h5group.keys() if g.startswith("RES-GROUP-"))
# Use the highest resolution as the ground sample distance.
del p.properties["eo:gsd"]
p.properties["eo:gsd"] = min(
min(h5group[grp].attrs["resolution"]) for grp in resolution_groups
)
if len(resolution_groups) not in (1, 2):
raise NotImplementedError(
f"Unexpected set of res-groups. "
f"Expected either two (with pan) or one (without pan), "
f"got {resolution_groups!r}"
)
# Res groups are ordered in descending resolution, so res-group-0 is the highest resolution.
# (ie. res-group-0 in landsat 7/8 is Panchromatic)
# We only care about packaging OA data for the "common" bands: not panchromatic.
# So we always pick the lowest resolution: the last (or only) group.
res_grp = h5group[resolution_groups[-1]]
def _write(section: str, dataset_names: Sequence[str]):
"""
Write supplementary attributes as measurement.
"""
for dataset_name in dataset_names:
o = f"{section}/{dataset_name}"
with do(f"Path {o!r} "):
measurement_name = utils.normalise_band_name(dataset_name)
write_measurement_h5(
p,
f"oa:{measurement_name}",
res_grp[o],
# We only use the product bands for valid data calc, not supplementary.
# According to Josh: Supplementary pixels outside of the product bounds are implicitly invalid.
expand_valid_data=False,
overviews=None,
)
_write(
"SATELLITE-SOLAR",
[
"SATELLITE-VIEW",
"SATELLITE-AZIMUTH",
"SOLAR-ZENITH",
"SOLAR-AZIMUTH",
"RELATIVE-AZIMUTH",
"TIME-DELTA",
],
)
_write("INCIDENT-ANGLES", ["INCIDENT-ANGLE", "AZIMUTHAL-INCIDENT"])
_write("EXITING-ANGLES", ["EXITING-ANGLE", "AZIMUTHAL-EXITING"])
_write("RELATIVE-SLOPE", ["RELATIVE-SLOPE"])
_write("SHADOW-MASKS", ["COMBINED-TERRAIN-SHADOW"])
timedelta_data = (
res_grp["SATELLITE-SOLAR/TIME-DELTA"] if infer_datetime_range else None
)
with do("Contiguity", timedelta=bool(timedelta_data)):
_create_contiguity(
p,
product_list,
resolution_yx=tuple(res_grp.attrs["resolution"]),
timedelta_data=timedelta_data,
)
def _create_contiguity(
p: DatasetAssembler,
product_list: Iterable[str],
resolution_yx: Tuple[float, float],
timedelta_product: str = "nbar",
timedelta_data: numpy.ndarray = None,
):
"""
Create the contiguity (all pixels valid) dataset.
Write a contiguity mask file based on the intersection of valid data pixels across all
bands from the input files.
"""
for product in product_list:
contiguity = None
for grid, band_name, path in p.iter_measurement_paths():
if not band_name.startswith(f"{product.lower()}:"):
continue
# Only our given res group (no pan band in Landsat)
if grid.resolution_yx != resolution_yx:
continue
with rasterio.open(path) as ds:
ds: DatasetReader
if contiguity is None:
contiguity = numpy.ones((ds.height, ds.width), dtype="uint8")
geobox = GridSpec.from_rio(ds)
elif ds.shape != contiguity.shape:
raise NotImplementedError(
"Contiguity from measurements of different shape"
)
for band in ds.indexes:
contiguity &= ds.read(band) > 0
if contiguity is None:
secho(f"No images found for requested product {product}", fg="red")
continue
p.write_measurement_numpy(
f"oa:{product.lower()}_contiguity",
contiguity,
geobox,
nodata=255,
overviews=None,
expand_valid_data=False,
)
# masking the timedelta_data with contiguity mask to get max and min timedelta within the NBAR product
# footprint for Landsat sensor. For Sentinel sensor, it inherits from level 1 yaml file
if timedelta_data is not None and product.lower() == timedelta_product:
valid_timedelta_data = numpy.ma.masked_where(
contiguity == 0, timedelta_data
)
def offset_from_center(v: numpy.datetime64):
return p.datetime + timedelta(
microseconds=v.astype(float) * 1_000_000.0
)
p.datetime_range = (
offset_from_center(numpy.ma.min(valid_timedelta_data)),
offset_from_center(numpy.ma.max(valid_timedelta_data)),
)
@contextlib.contextmanager
def do(name: str, heading=False, **fields):
"""
Informational logging.
TODO: move this to the cli. It shouldn't be part of library usage.
"""
single_line = not heading
def val(v: Any):
if isinstance(v, bool):
return bool_style(v)
if isinstance(v, Path):
return repr(str(v))
return repr(v)
if heading:
name = f"\n{name}"
fields = " ".join(f"{k}:{val(v)}" for k, v in fields.items())
secho(f"{name} {fields} ", nl=not single_line, fg="blue" if heading else None)
yield
if single_line:
secho("(done)")
def _extract_reference_code(p: DatasetAssembler, granule: str) -> Optional[str]:
matches = None
if p.platform.startswith("landsat"):
matches = re.match(r"L\w\d(?P<reference_code>\d{6}).*", granule)
elif p.platform.startswith("sentinel-2"):
matches = re.match(r".*_T(?P<reference_code>\d{1,2}[A-Z]{3})_.*", granule)
if matches:
[reference_code] = matches.groups()
# TODO name properly
return reference_code
return None
@attr.s(auto_attribs=True)
class Granule:
"""
A single granule in a hdf5 file, with optional corresponding fmask/gqa/etc files.
You probably want to make one by using `Granule.for_path()`
"""
name: str
wagl_hdf5: Path
wagl_metadata: Dict
source_level1_metadata: DatasetDoc
fmask_doc: Optional[Dict] = None
fmask_image: Optional[Path] = None
gqa_doc: Optional[Dict] = None
tesp_doc: Optional[Dict] = None
@classmethod
def for_path(
cls,
wagl_hdf5: Path,
granule_names: Optional[Sequence[str]] = None,
level1_metadata_path: Optional[Path] = None,
fmask_image_path: Optional[Path] = None,
fmask_doc_path: Optional[Path] = None,
gqa_doc_path: Optional[Path] = None,
tesp_doc_path: Optional[Path] = None,
):
"""
Create granules by scanning the given hdf5 file.
Optionally specify additional files and level1 path.
If they are not specified it look for them using WAGL's output naming conventions.
"""
if not wagl_hdf5.exists():
raise ValueError(f"Input hdf5 doesn't exist {wagl_hdf5}")
with h5py.File(wagl_hdf5, "r") as fid:
granule_names = granule_names or fid.keys()
for granule_name in granule_names:
if granule_name not in fid:
raise ValueError(
f"Granule {granule_name!r} not found in file {wagl_hdf5}"
)
wagl_doc_field = get_path(fid, (granule_name, "METADATA", "CURRENT"))
if not wagl_doc_field:
raise ValueError(
f"Granule contains no wagl metadata: {granule_name} in {wagl_hdf5}"
)
[wagl_doc] = loads_yaml(wagl_doc_field[()])
if not level1_metadata_path:
level1_tar_path = Path(
get_path(wagl_doc, ("source_datasets", "source_level1"))
)
level1_metadata_path = level1_tar_path.with_suffix(
".odc-metadata.yaml"
)
if not level1_metadata_path.exists():
raise ValueError(
f"No level1 metadata found at {level1_metadata_path}"
)
level1 = serialise.from_path(level1_metadata_path)
fmask_image_path = fmask_image_path or wagl_hdf5.with_name(
f"{granule_name}.fmask.img"
)
if not fmask_image_path.exists():
raise ValueError(f"No fmask image found at {fmask_image_path}")
fmask_doc_path = fmask_doc_path or fmask_image_path.with_suffix(".yaml")
if not fmask_doc_path.exists():
raise ValueError(f"No fmask found at {fmask_doc_path}")
with fmask_doc_path.open("r") as fl:
[fmask_doc] = loads_yaml(fl)
gqa_doc_path = gqa_doc_path or wagl_hdf5.with_name(
f"{granule_name}.gqa.yaml"
)
if not gqa_doc_path.exists():
raise ValueError(f"No gqa found at {gqa_doc_path}")
with gqa_doc_path.open("r") as fl:
[gqa_doc] = loads_yaml(fl)
# Optional doc
if tesp_doc_path:
# But if they gave us a path, we're strict about it existing.
if not tesp_doc_path.exists():
raise ValueError(
f"Supplied tesp doc path doesn't exist: {tesp_doc_path}"
)
else:
tesp_doc_path = wagl_hdf5.with_name(f"{granule_name}.tesp.yaml")
if tesp_doc_path.exists():
with tesp_doc_path.open("r") as fl:
[tesp_doc] = loads_yaml(fl)
yield cls(
name=granule_name,
wagl_hdf5=wagl_hdf5,
wagl_metadata=wagl_doc,
source_level1_metadata=level1,
fmask_doc=fmask_doc,
fmask_image=fmask_image_path,
gqa_doc=gqa_doc,
tesp_doc=tesp_doc,
)
def package_file(
out_directory: Path,
hdf_file: Path,
included_products: Iterable[str] = DEFAULT_PRODUCTS,
include_oa: bool = True,
) -> Dict[UUID, Path]:
"""
Simple alternative to package().
Takes a single HDF5 and infers other paths (gqa etc) via naming conventions.
Returns a dictionary of the output datasets: Mapping UUID to the their metadata path.
"""
out = {}
for granule in Granule.for_path(hdf_file):
dataset_id, metadata_path = package(
out_directory,
granule,
included_products=included_products,
include_oa=include_oa,
)
out[dataset_id] = metadata_path
return out
def package(
out_directory: Path,
granule: Granule,
included_products: Iterable[str] = DEFAULT_PRODUCTS,
include_oa: bool = True,
) -> Tuple[UUID, Path]:
"""
Package an L2 product.
:param include_oa:
:param out_directory:
The base directory for output datasets. A DEA-naming-conventions folder hierarchy
will be created inside this folder.
:param granule:
Granule information. You probably want to make one with Granule.from_path()
:param included_products:
A list of imagery products to include in the package.
Defaults to all products.
:return:
The dataset UUID and output metadata path
"""
included_products = tuple(s.lower() for s in included_products)
with h5py.File(granule.wagl_hdf5, "r") as fid:
granule_group = fid[granule.name]
with DatasetAssembler(
out_directory,
# WAGL stamps a good, random ID already.
dataset_id=granule.wagl_metadata.get("id"),
naming_conventions="dea",
) as p:
level1 = granule.source_level1_metadata
p.add_source_dataset(level1, auto_inherit_properties=True)
# It's a GA ARD product.
p.producer = "ga.gov.au"
p.product_family = "ard"
wagl_doc = _apply_wagl_metadata(p, granule_group)
p.maturity = (
# When level 1 is NRT, ARD is always NRT.
"nrt"
if level1.maturity == "nrt"
else _determine_maturity(
acq_date=p.datetime,
processed=p.processed,
wagl_doc=wagl_doc,
)
)
org_collection_number = utils.get_collection_number(
p.producer, p.properties["landsat:collection_number"]
)
p.dataset_version = f"{org_collection_number}.2.0"
p.region_code = _extract_reference_code(p, granule.name)
_read_gqa_doc(p, granule.gqa_doc)
_read_fmask_doc(p, granule.fmask_doc)
if granule.tesp_doc:
_take_software_versions(p, granule.tesp_doc)
_unpack_products(p, included_products, granule_group)
if include_oa:
with do("Starting OA", heading=True):
_unpack_observation_attributes(
p,
included_products,
granule_group,
infer_datetime_range=level1.platform.startswith("landsat"),
)
if granule.fmask_image:
with do(f"Writing fmask from {granule.fmask_image} "):
p.write_measurement(
"oa:fmask",
granule.fmask_image,
expand_valid_data=False,
overview_resampling=Resampling.mode,
)
with do("Finishing package"):
return p.done()
def _flatten_dict(d: Mapping, prefix=None, separator=".") -> Iterable[Tuple[str, Any]]:
"""
>>> dict(_flatten_dict({'a' : 1, 'b' : {'inner' : 2},'c' : 3}))
{'a': 1, 'b.inner': 2, 'c': 3}
>>> dict(_flatten_dict({'a' : 1, 'b' : {'inner' : {'core' : 2}}}, prefix='outside', separator=':'))
{'outside:a': 1, 'outside:b:inner:core': 2}
"""
for k, v in d.items():
name = f"{prefix}{separator}{k}" if prefix else k
if isinstance(v, Mapping):
yield from _flatten_dict(v, prefix=name, separator=separator)
else:
yield name, v
def _read_gqa_doc(p: DatasetAssembler, doc: Dict):
_take_software_versions(p, doc)
p.extend_user_metadata("gqa", doc)
# TODO: more of the GQA fields?
for k, v in _flatten_dict(doc["residual"], separator="_"):
p.properties[f"gqa:{k}"] = v
def _read_fmask_doc(p: DatasetAssembler, doc: Dict):
for name, value in doc["percent_class_distribution"].items():
# From Josh: fmask cloud cover trumps the L1 cloud cover.
if name == "cloud":
del p.properties["eo:cloud_cover"]
p.properties["eo:cloud_cover"] = value
p.properties[f"fmask:{name}"] = value
_take_software_versions(p, doc)
p.extend_user_metadata("fmask", doc)
def _take_software_versions(p: DatasetAssembler, doc: Dict):
versions = doc.pop("software_versions", {})
for name, o in versions.items():
p.note_software_version(name, o.get("repo_url"), o.get("version"))
def find_a_granule_name(wagl_hdf5: Path) -> str:
"""
Try to extract granule name from wagl filename,
>>> find_a_granule_name(Path('LT50910841993188ASA00.wagl.h5'))
'LT50910841993188ASA00'
>>> find_a_granule_name(Path('my-test-granule.h5'))
Traceback (most recent call last):
...
ValueError: No granule specified, and cannot find it on input filename 'my-test-granule'.
"""
granule_name = wagl_hdf5.stem.split(".")[0]
if not granule_name.startswith("L"):
raise ValueError(
f"No granule specified, and cannot find it on input filename {wagl_hdf5.stem!r}."
)
return granule_name
def _apply_wagl_metadata(p: DatasetAssembler, granule_group: h5py.Group) -> Dict:
try:
wagl_path, *ancil_paths = [
pth
for pth in (_find_h5_paths(granule_group, "SCALAR"))
if "METADATA" in pth
]
except ValueError:
raise ValueError("No nbar metadata found in granule")
[wagl_doc] = loads_yaml(granule_group[wagl_path][()])
try:
p.processed = get_path(wagl_doc, ("system_information", "time_processed"))
except PathAccessError:
raise ValueError(f"WAGL dataset contains no time processed. Path {wagl_path}")
for i, path in enumerate(ancil_paths, start=2):
wagl_doc.setdefault(f"wagl_{i}", {}).update(
list(loads_yaml(granule_group[path][()]))[0]["ancillary"]
)
_take_software_versions(p, wagl_doc)
p.extend_user_metadata("wagl", wagl_doc)
return wagl_doc
def _determine_maturity(acq_date: datetime, processed: datetime, wagl_doc: Dict):
"""
Determine maturity field of a dataset.
Based on the fallback logic in nbar pages of CMI, eg: https://cmi.ga.gov.au/ga_ls5t_nbart_3
"""
ancillary_tiers = {
key.lower(): o["tier"]
for key, o in wagl_doc["ancillary"].items()
if "tier" in o
}
if "water_vapour" not in ancillary_tiers:
# Perhaps this should be a warning, but I'm being strict until told otherwise.
# (a warning is easy to ignore)
raise ValueError(
f"No water vapour ancillary tier. Got {list(ancillary_tiers.keys())!r}"
)
water_vapour_is_definitive = ancillary_tiers["water_vapour"].lower() == "definitive"
if (processed - acq_date) < timedelta(hours=48):
return "nrt"
if not water_vapour_is_definitive:
return "interim"
# For accurate BRDF, both Aqua and Terra need to be operating.
# Aqua launched May 2002, and we add a ~2 month buffer of operation.
if acq_date < default_utc(datetime(2002, 7, 1)):
return "final"
if "brdf" not in ancillary_tiers:
# Perhaps this should be a warning, but I'm being strict until told otherwise.
# (a warning is easy to ignore)
raise ValueError(
f"No brdf tier available. Got {list(ancillary_tiers.keys())!r}"
)
brdf_tier = ancillary_tiers["brdf"].lower()
if "definitive" in brdf_tier:
return "final"
elif "fallback" in brdf_tier:
return "interim"
else:
# This value should not occur for production data, only for experiments
return "user"
| 33.990169 | 115 | 0.592455 |
e86e3281a2bd23a8c454b2a18ba951459b01645e | 3,265 | py | Python | antareslauncher/use_cases/launch/launch_controller.py | AntaresSimulatorTeam/antares-launcher | 8915f4ef0ce88565bd130b1230574247a9685a65 | [
"Apache-2.0"
] | null | null | null | antareslauncher/use_cases/launch/launch_controller.py | AntaresSimulatorTeam/antares-launcher | 8915f4ef0ce88565bd130b1230574247a9685a65 | [
"Apache-2.0"
] | 8 | 2021-04-02T12:16:28.000Z | 2021-09-17T08:19:56.000Z | antareslauncher/use_cases/launch/launch_controller.py | AntaresSimulatorTeam/antares-launcher | 8915f4ef0ce88565bd130b1230574247a9685a65 | [
"Apache-2.0"
] | 1 | 2021-03-11T09:08:43.000Z | 2021-03-11T09:08:43.000Z | from antareslauncher.data_repo.data_reporter import DataReporter
from antareslauncher.data_repo.idata_repo import IDataRepo
from antareslauncher.display.idisplay import IDisplay
from antareslauncher.file_manager.file_manager import FileManager
from antareslauncher.remote_environnement.iremote_environment import (
IRemoteEnvironment,
)
from antareslauncher.study_dto import StudyDTO
from antareslauncher.use_cases.launch.study_submitter import StudySubmitter
from antareslauncher.use_cases.launch.study_zip_cleaner import StudyZipCleaner
from antareslauncher.use_cases.launch.study_zip_uploader import (
StudyZipfileUploader,
)
from antareslauncher.use_cases.launch.study_zipper import StudyZipper
class StudyLauncher:
def __init__(
self,
zipper: StudyZipper,
study_uploader: StudyZipfileUploader,
zipfile_cleaner: StudyZipCleaner,
study_submitter: StudySubmitter,
reporter: DataReporter,
):
self._zipper = zipper
self._study_uploader = study_uploader
self._zipfile_cleaner = zipfile_cleaner
self._study_submitter = study_submitter
self.reporter = reporter
self._current_study: StudyDTO = None
def _zip_study(self):
self._current_study = self._zipper.zip(self._current_study)
self.reporter.save_study(self._current_study)
def _upload_zipfile(self):
self._current_study = self._study_uploader.upload(self._current_study)
self.reporter.save_study(self._current_study)
def _remove_input_zipfile(self):
if self._current_study.zip_is_sent is True:
self._current_study = self._zipfile_cleaner.remove_input_zipfile(
self._current_study
)
self.reporter.save_study(self._current_study)
def _submit_job(self):
self._current_study = self._study_submitter.submit_job(self._current_study)
self.reporter.save_study(self._current_study)
def launch_study(self, study):
self._current_study = study
self._zip_study()
self._upload_zipfile()
self._remove_input_zipfile()
self._submit_job()
class LaunchController:
def __init__(
self,
repo: IDataRepo,
env: IRemoteEnvironment,
file_manager: FileManager,
display: IDisplay,
):
self.repo = repo
self.env = env
self.file_manager = file_manager
self.display = display
zipper = StudyZipper(file_manager, display)
study_uploader = StudyZipfileUploader(env, display)
zipfile_cleaner = StudyZipCleaner(file_manager, display)
study_submitter = StudySubmitter(env, display)
self.study_launcher = StudyLauncher(
zipper,
study_uploader,
zipfile_cleaner,
study_submitter,
DataReporter(repo),
)
def launch_all_studies(self):
"""Processes all the studies and send them to the server to process the job
Steps of processing:
1. zip the study
2. upload the study
3. submit the slurm job
"""
studies = self.repo.get_list_of_studies()
for study in studies:
self.study_launcher.launch_study(study)
| 33.316327 | 83 | 0.700459 |
04c290c738ba877afac3d8f11b1780922c3cd131 | 3,071 | py | Python | src/components/game-server/juego/partida.py | adp1002/practica-dms-2019-2020 | 9ae0c71140d537bb43c0f8ec8a81b8fff38dec21 | [
"MIT"
] | null | null | null | src/components/game-server/juego/partida.py | adp1002/practica-dms-2019-2020 | 9ae0c71140d537bb43c0f8ec8a81b8fff38dec21 | [
"MIT"
] | null | null | null | src/components/game-server/juego/partida.py | adp1002/practica-dms-2019-2020 | 9ae0c71140d537bb43c0f8ec8a81b8fff38dec21 | [
"MIT"
] | null | null | null | from juego.fabrica_abstracta_juegos import FabricaJuegoMesa
class Partida:
""" Partida de un juego entre dos jugadores.
---
La clase almacena el estado de la partida.
"""
def __init__(self, fabrica_juego):
""" Constructor.
---
Parámetros:
- fabrica_juego: FabricaJuegoMesa
- jugador1: Jugador 1.
- jugador2: Jugador 2.
"""
self.__jugador1 = None
self.__jugador2 = None
self.__fabrica = fabrica_juego
self.__tablero = fabrica_juego.crear_tablero()
self.__arbitro = fabrica_juego.crear_arbitro(self.__tablero)
self.__turno = 0
self.__ganador = None
def registrar_jugador(self, jugador):
""" Método que añade un jugador a la partida.
---
Parámetros:
- jugador: Jugador a registrar.
Returns:
True si hay espacio en la partida, sino False.
"""
if self.__jugador1 is None:
self.__jugador1 = jugador
self.__jugador1.establecer_tipo('X')
elif self.__jugador2 is None:
self.__jugador2 = jugador
self.__jugador2.establecer_tipo('O')
else:
return False
return True
def jugar(self, x, y):
""" Método que realiza un movimiento.
---
Parámetros:
- x: Fila del tablero.
- y: Columna del tablero.
"""
if self.__arbitro.es_valido(x, y):
self.__tablero.colocar(x, y,
self.__fabrica.crear_pieza(self.obtener_turno().obtener_tipo()))
self.__turno += 1
else:
raise Exception('Partida llena')
def obtener_ganador(self):
""" Método que devuelve el ganador de la partida.
---
Returns:
El Jugador ganador, si no hay ganador None
"""
if self.__arbitro.hay_ganador():
return self.__jugador1 if self.__turno % 2 != 0 else self.__jugador2
return None
def obtener_perdedor(self):
""" Método que devuelve el perdedor de la partida.
---
Returns:
El Jugador perdedor, si no hay perdedor None
"""
if self.__arbitro.hay_ganador():
return self.__jugador1 if self.__turno % 2 == 0 else self.__jugador2
return None
def esta_acabado(self):
""" Método que decide si la partida ha terminado.
---
Returns:
True si la partida esta terminada, sino False
"""
return self.__arbitro.esta_acabado()
def obtener_turno(self):
""" Método que obtiene el jugador con el turno.
---
Returns:
Jugador con el turno.
"""
return self.__jugador1 if self.__turno % 2 == 0 else self.__jugador2
def obtener_tablero(self):
""" Método que devuelve el tablero de la partida.
---
Returns:
Tablero de la partida.
"""
return self.__tablero
| 30.71 | 80 | 0.558776 |
0c94e762abb81b50821fbe97b1a44d6b42e9e2ad | 14,588 | py | Python | tensorflow/python/keras/utils/losses_utils.py | jay-jang/tensorflow | 6c3b7f4c3e89718c1812e9132a3546cb0dc8f53c | [
"Apache-2.0"
] | 5 | 2021-04-01T15:14:48.000Z | 2021-04-02T02:56:07.000Z | tensorflow/python/keras/utils/losses_utils.py | jay-jang/tensorflow | 6c3b7f4c3e89718c1812e9132a3546cb0dc8f53c | [
"Apache-2.0"
] | 2 | 2021-01-26T13:15:46.000Z | 2021-01-26T16:46:46.000Z | tensorflow/python/keras/utils/losses_utils.py | jay-jang/tensorflow | 6c3b7f4c3e89718c1812e9132a3546cb0dc8f53c | [
"Apache-2.0"
] | 3 | 2021-01-26T11:51:18.000Z | 2021-01-26T12:13:40.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Utilities related to loss functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine import keras_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.losses.Reduction', v1=[])
class ReductionV2(object):
"""Types of loss reduction.
Contains the following values:
* `AUTO`: Indicates that the reduction option will be determined by the usage
context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When
used with `tf.distribute.Strategy`, outside of built-in training loops such
as `tf.keras` `compile` and `fit`, we expect reduction value to be
`SUM` or `NONE`. Using `AUTO` in that case will raise an error.
* `NONE`: Weighted losses with one dimension reduced (axis=-1, or axis
specified by loss function). When this reduction type used with built-in
Keras training loops like `fit`/`evaluate`, the unreduced vector loss is
passed to the optimizer but the reported loss will be a scalar value.
* `SUM`: Scalar sum of weighted losses.
* `SUM_OVER_BATCH_SIZE`: Scalar `SUM` divided by number of elements in losses.
This reduction type is not supported when used with
`tf.distribute.Strategy` outside of built-in training loops like `tf.keras`
`compile`/`fit`.
You can implement 'SUM_OVER_BATCH_SIZE' using global batch size like:
```
with strategy.scope():
loss_obj = tf.keras.losses.CategoricalCrossentropy(
reduction=tf.keras.losses.Reduction.NONE)
....
loss = tf.reduce_sum(loss_obj(labels, predictions)) *
(1. / global_batch_size)
```
Please see the [custom training guide](
https://www.tensorflow.org/tutorials/distribute/custom_training) for more
details on this.
"""
AUTO = 'auto'
NONE = 'none'
SUM = 'sum'
SUM_OVER_BATCH_SIZE = 'sum_over_batch_size'
@classmethod
def all(cls):
return (cls.AUTO, cls.NONE, cls.SUM, cls.SUM_OVER_BATCH_SIZE)
@classmethod
def validate(cls, key):
if key not in cls.all():
raise ValueError('Invalid Reduction Key %s.' % key)
def remove_squeezable_dimensions(
labels, predictions, expected_rank_diff=0, name=None):
"""Squeeze last dim if ranks differ from expected by exactly 1.
In the common case where we expect shapes to match, `expected_rank_diff`
defaults to 0, and we squeeze the last dimension of the larger rank if they
differ by 1.
But, for example, if `labels` contains class IDs and `predictions` contains 1
probability per class, we expect `predictions` to have 1 more dimension than
`labels`, so `expected_rank_diff` would be 1. In this case, we'd squeeze
`labels` if `rank(predictions) - rank(labels) == 0`, and
`predictions` if `rank(predictions) - rank(labels) == 2`.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
labels: Label values, a `Tensor` whose dimensions match `predictions`.
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
expected_rank_diff: Expected result of `rank(predictions) - rank(labels)`.
name: Name of the op.
Returns:
Tuple of `labels` and `predictions`, possibly with last dim squeezed.
"""
with K.name_scope(name or 'remove_squeezable_dimensions'):
if not isinstance(predictions, ragged_tensor.RaggedTensor):
predictions = ops.convert_to_tensor_v2_with_dispatch(predictions)
if not isinstance(labels, ragged_tensor.RaggedTensor):
labels = ops.convert_to_tensor_v2_with_dispatch(labels)
predictions_shape = predictions.shape
predictions_rank = predictions_shape.ndims
labels_shape = labels.shape
labels_rank = labels_shape.ndims
if (labels_rank is not None) and (predictions_rank is not None):
# Use static rank.
rank_diff = predictions_rank - labels_rank
if (rank_diff == expected_rank_diff + 1 and
predictions_shape.dims[-1].is_compatible_with(1)):
predictions = array_ops.squeeze(predictions, [-1])
elif (rank_diff == expected_rank_diff - 1 and
labels_shape.dims[-1].is_compatible_with(1)):
labels = array_ops.squeeze(labels, [-1])
return labels, predictions
# Use dynamic rank.
rank_diff = array_ops.rank(predictions) - array_ops.rank(labels)
if (predictions_rank is None) or (
predictions_shape.dims[-1].is_compatible_with(1)):
predictions = control_flow_ops.cond(
math_ops.equal(expected_rank_diff + 1, rank_diff),
lambda: array_ops.squeeze(predictions, [-1]),
lambda: predictions)
if (labels_rank is None) or (
labels_shape.dims[-1].is_compatible_with(1)):
labels = control_flow_ops.cond(
math_ops.equal(expected_rank_diff - 1, rank_diff),
lambda: array_ops.squeeze(labels, [-1]),
lambda: labels)
return labels, predictions
def squeeze_or_expand_dimensions(y_pred, y_true=None, sample_weight=None):
"""Squeeze or expand last dimension if needed.
1. Squeezes last dim of `y_pred` or `y_true` if their rank differs by 1
(using `remove_squeezable_dimensions`).
2. Squeezes or expands last dim of `sample_weight` if its rank differs by 1
from the new rank of `y_pred`.
If `sample_weight` is scalar, it is kept scalar.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
y_pred: Predicted values, a `Tensor` of arbitrary dimensions.
y_true: Optional label `Tensor` whose dimensions match `y_pred`.
sample_weight: Optional weight scalar or `Tensor` whose dimensions match
`y_pred`.
Returns:
Tuple of `y_pred`, `y_true` and `sample_weight`. Each of them possibly has
the last dimension squeezed,
`sample_weight` could be extended by one dimension.
If `sample_weight` is None, (y_pred, y_true) is returned.
"""
y_pred_shape = y_pred.shape
y_pred_rank = y_pred_shape.ndims
if y_true is not None:
# If sparse matrix is provided as `y_true`, the last dimension in `y_pred`
# may be > 1. Eg: y_true = [0, 1, 2] (shape=(3,)),
# y_pred = [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]] (shape=(3, 3))
# In this case, we should not try to remove squeezable dimension.
y_true_shape = y_true.shape
y_true_rank = y_true_shape.ndims
if (y_true_rank is not None) and (y_pred_rank is not None):
# Use static rank for `y_true` and `y_pred`.
if (y_pred_rank - y_true_rank != 1) or y_pred_shape[-1] == 1:
y_true, y_pred = remove_squeezable_dimensions(
y_true, y_pred)
else:
# Use dynamic rank.
rank_diff = array_ops.rank(y_pred) - array_ops.rank(y_true)
squeeze_dims = lambda: remove_squeezable_dimensions( # pylint: disable=g-long-lambda
y_true, y_pred)
is_last_dim_1 = math_ops.equal(1, array_ops.shape(y_pred)[-1])
maybe_squeeze_dims = lambda: control_flow_ops.cond( # pylint: disable=g-long-lambda
is_last_dim_1, squeeze_dims, lambda: (y_true, y_pred))
y_true, y_pred = control_flow_ops.cond(
math_ops.equal(1, rank_diff), maybe_squeeze_dims, squeeze_dims)
if sample_weight is None:
return y_pred, y_true
weights_shape = sample_weight.shape
weights_rank = weights_shape.ndims
if weights_rank == 0: # If weights is scalar, do nothing.
return y_pred, y_true, sample_weight
if (y_pred_rank is not None) and (weights_rank is not None):
# Use static rank.
if weights_rank - y_pred_rank == 1:
sample_weight = array_ops.squeeze(sample_weight, [-1])
elif y_pred_rank - weights_rank == 1:
sample_weight = array_ops.expand_dims(sample_weight, [-1])
return y_pred, y_true, sample_weight
# Use dynamic rank.
weights_rank_tensor = array_ops.rank(sample_weight)
rank_diff = weights_rank_tensor - array_ops.rank(y_pred)
maybe_squeeze_weights = lambda: array_ops.squeeze(sample_weight, [-1])
def _maybe_expand_weights():
expand_weights = lambda: array_ops.expand_dims(sample_weight, [-1])
return control_flow_ops.cond(
math_ops.equal(rank_diff, -1), expand_weights, lambda: sample_weight)
def _maybe_adjust_weights():
return control_flow_ops.cond(
math_ops.equal(rank_diff, 1), maybe_squeeze_weights,
_maybe_expand_weights)
# squeeze or expand last dim of `sample_weight` if its rank differs by 1
# from the new rank of `y_pred`.
sample_weight = control_flow_ops.cond(
math_ops.equal(weights_rank_tensor, 0), lambda: sample_weight,
_maybe_adjust_weights)
return y_pred, y_true, sample_weight
def _safe_mean(losses, num_present):
"""Computes a safe mean of the losses.
Args:
losses: `Tensor` whose elements contain individual loss measurements.
num_present: The number of measurable elements in `losses`.
Returns:
A scalar representing the mean of `losses`. If `num_present` is zero,
then zero is returned.
"""
total_loss = math_ops.reduce_sum(losses)
return math_ops.div_no_nan(total_loss, num_present, name='value')
def _num_elements(losses):
"""Computes the number of elements in `losses` tensor."""
with K.name_scope('num_elements') as scope:
return math_ops.cast(array_ops.size(losses, name=scope), dtype=losses.dtype)
def reduce_weighted_loss(weighted_losses,
reduction=ReductionV2.SUM_OVER_BATCH_SIZE):
"""Reduces the individual weighted loss measurements."""
if reduction == ReductionV2.NONE:
loss = weighted_losses
else:
loss = math_ops.reduce_sum(weighted_losses)
if reduction == ReductionV2.SUM_OVER_BATCH_SIZE:
loss = _safe_mean(loss, _num_elements(weighted_losses))
return loss
def compute_weighted_loss(losses,
sample_weight=None,
reduction=ReductionV2.SUM_OVER_BATCH_SIZE,
name=None):
"""Computes the weighted loss.
Args:
losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
sample_weight: Optional `Tensor` whose rank is either 0, or the same rank as
`losses`, or be broadcastable to `losses`.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `SUM_OVER_BATCH_SIZE`.
name: Optional name for the op.
Raises:
ValueError: If the shape of `sample_weight` is not compatible with `losses`.
Returns:
Weighted loss `Tensor` of the same type as `losses`. If `reduction` is
`NONE`, this has the same shape as `losses`; otherwise, it is scalar.
"""
ReductionV2.validate(reduction)
# If this function is called directly, then we just default 'AUTO' to
# 'SUM_OVER_BATCH_SIZE'. Eg. Canned estimator use cases.
if reduction == ReductionV2.AUTO:
reduction = ReductionV2.SUM_OVER_BATCH_SIZE
if sample_weight is None:
sample_weight = 1.0
with K.name_scope(name or 'weighted_loss'):
# Save the `reduction` argument for loss normalization when distributing
# to multiple replicas. Used only for estimator + v1 optimizer flow.
ops.get_default_graph()._last_loss_reduction = reduction # pylint: disable=protected-access
if not isinstance(losses, keras_tensor.KerasTensor):
losses = ops.convert_to_tensor_v2_with_dispatch(losses)
input_dtype = losses.dtype
if not isinstance(sample_weight, keras_tensor.KerasTensor):
sample_weight = ops.convert_to_tensor_v2_with_dispatch(sample_weight)
# TODO(psv): Handle casting here in a better way, eg. if losses is float64
# we do not want to lose precision.
losses = math_ops.cast(losses, 'float32')
sample_weight = math_ops.cast(sample_weight, 'float32')
# Update dimensions of `sample_weight` to match with `losses` if possible.
losses, _, sample_weight = squeeze_or_expand_dimensions( # pylint: disable=unbalanced-tuple-unpacking
losses, None, sample_weight)
weighted_losses = math_ops.multiply(losses, sample_weight)
# Apply reduction function to the individual weighted losses.
loss = reduce_weighted_loss(weighted_losses, reduction)
# Convert the result back to the input type.
loss = math_ops.cast(loss, input_dtype)
return loss
def scale_loss_for_distribution(loss_value):
"""Scales and returns the given loss value by the number of replicas."""
num_replicas = (
distribution_strategy_context.get_strategy().num_replicas_in_sync)
if num_replicas > 1:
loss_value *= (1. / num_replicas)
return loss_value
def cast_losses_to_common_dtype(losses):
"""Cast a list of losses to a common dtype.
If any loss is floating-point, they will all be casted to the most-precise
floating-point loss. Otherwise the losses are not casted. We also skip casting
losses if there are any complex losses.
Args:
losses: A list of losses.
Returns:
`losses`, but they have been casted to a common dtype.
"""
highest_float = None
for loss in losses:
if loss.dtype.is_floating:
if highest_float is None or loss.dtype.size > highest_float.size:
highest_float = loss.dtype
elif {loss.dtype, highest_float} == {'bfloat16', 'float16'}:
highest_float = 'float32'
if loss.dtype.is_complex:
return losses # If we find any complex losses, do not cast any losses
if highest_float:
losses = [math_ops.cast(loss, highest_float) for loss in losses]
return losses
| 40.635097 | 106 | 0.714834 |
6e102e78b755b61883a9eb060938da9674e1e10c | 7,544 | py | Python | neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py | congnt95/neutron | 6a73a362c5ff5b7c28c15a49f47a9900c0d2b4e1 | [
"Apache-2.0"
] | 1,080 | 2015-01-04T08:35:00.000Z | 2022-03-27T09:15:52.000Z | neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py | congnt95/neutron | 6a73a362c5ff5b7c28c15a49f47a9900c0d2b4e1 | [
"Apache-2.0"
] | 24 | 2015-02-21T01:48:28.000Z | 2021-11-26T02:38:56.000Z | neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py | congnt95/neutron | 6a73a362c5ff5b7c28c15a49f47a9900c0d2b4e1 | [
"Apache-2.0"
] | 1,241 | 2015-01-02T10:47:10.000Z | 2022-03-27T09:42:23.000Z | # Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from neutron_lib.agent import topics
from neutron_lib.api import extensions
from neutron_lib import constants
from neutron_lib.plugins import constants as plugin_constants
from neutron_lib.plugins import directory
from neutron_lib import rpc as n_rpc
from oslo_log import log as logging
import oslo_messaging
from neutron.api.rpc.agentnotifiers import utils as ag_utils
LOG = logging.getLogger(__name__)
# default messaging timeout is 60 sec, so 2 here is chosen to not block API
# call for more than 2 minutes
AGENT_NOTIFY_MAX_ATTEMPTS = 2
class L3AgentNotifyAPI(object):
"""API for plugin to notify L3 agent."""
def __init__(self, topic=topics.L3_AGENT):
target = oslo_messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
def _notification_host(self, context, method, host, use_call=False,
**kwargs):
"""Notify the agent that is hosting the router."""
LOG.debug('Notify agent at %(host)s the message '
'%(method)s', {'host': host,
'method': method})
cctxt = self.client.prepare(server=host)
rpc_method = (ag_utils.retry(cctxt.call, AGENT_NOTIFY_MAX_ATTEMPTS)
if use_call else cctxt.cast)
rpc_method(context, method, **kwargs)
def _agent_notification(self, context, method, router_ids, operation,
shuffle_agents):
"""Notify changed routers to hosting l3 agents."""
adminContext = context if context.is_admin else context.elevated()
plugin = directory.get_plugin(plugin_constants.L3)
for router_id in router_ids:
hosts = plugin.get_hosts_to_notify(adminContext, router_id)
if shuffle_agents:
random.shuffle(hosts)
for host in hosts:
LOG.debug('Notify agent at %(topic)s.%(host)s the message '
'%(method)s',
{'topic': topics.L3_AGENT,
'host': host,
'method': method})
cctxt = self.client.prepare(topic=topics.L3_AGENT,
server=host,
version='1.1')
cctxt.cast(context, method, routers=[router_id])
def _agent_notification_arp(self, context, method, router_id,
operation, data):
"""Notify arp details to l3 agents hosting router."""
if not router_id:
return
dvr_arptable = {'router_id': router_id, 'arp_table': data}
LOG.debug('Fanout dvr_arptable update: %s', dvr_arptable)
cctxt = self.client.prepare(fanout=True, version='1.2')
cctxt.cast(context, method, payload=dvr_arptable)
def _notification(self, context, method, router_ids, operation,
shuffle_agents, schedule_routers=True):
"""Notify all the agents that are hosting the routers."""
plugin = directory.get_plugin(plugin_constants.L3)
if not plugin:
LOG.error('No plugin for L3 routing registered. Cannot notify '
'agents with the message %s', method)
return
if extensions.is_extension_supported(
plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS):
adminContext = (context.is_admin and
context or context.elevated())
if schedule_routers:
plugin.schedule_routers(adminContext, router_ids)
self._agent_notification(
context, method, router_ids, operation, shuffle_agents)
else:
cctxt = self.client.prepare(fanout=True)
cctxt.cast(context, method, routers=router_ids)
def _notification_fanout(self, context, method, router_id=None, **kwargs):
"""Fanout the information to all L3 agents.
This function will fanout the router_id or ext_net_id
to the L3 Agents.
"""
ext_net_id = kwargs.get('ext_net_id')
if router_id:
kwargs['router_id'] = router_id
LOG.debug('Fanout notify agent at %(topic)s the message '
'%(method)s on router %(router_id)s',
{'topic': topics.L3_AGENT,
'method': method,
'router_id': router_id})
if ext_net_id:
LOG.debug('Fanout notify agent at %(topic)s the message '
'%(method)s for external_network %(ext_net_id)s',
{'topic': topics.L3_AGENT,
'method': method,
'ext_net_id': ext_net_id})
cctxt = self.client.prepare(fanout=True)
cctxt.cast(context, method, **kwargs)
def agent_updated(self, context, admin_state_up, host):
self._notification_host(context, 'agent_updated', host,
payload={'admin_state_up': admin_state_up})
def router_deleted(self, context, router_id):
self._notification_fanout(context, 'router_deleted', router_id)
def routers_updated(self, context, router_ids, operation=None, data=None,
shuffle_agents=False, schedule_routers=True):
if router_ids:
self._notification(context, 'routers_updated', router_ids,
operation, shuffle_agents, schedule_routers)
def add_arp_entry(self, context, router_id, arp_table, operation=None):
self._agent_notification_arp(context, 'add_arp_entry', router_id,
operation, arp_table)
def del_arp_entry(self, context, router_id, arp_table, operation=None):
self._agent_notification_arp(context, 'del_arp_entry', router_id,
operation, arp_table)
def delete_fipnamespace_for_ext_net(self, context, ext_net_id):
self._notification_fanout(
context, 'fipnamespace_delete_on_ext_net',
ext_net_id=ext_net_id)
def router_removed_from_agent(self, context, router_id, host):
self._notification_host(context, 'router_removed_from_agent', host,
payload={'router_id': router_id})
def router_added_to_agent(self, context, router_ids, host):
# need to use call here as we want to be sure agent received
# notification and router will not be "lost". However using call()
# itself is not a guarantee, calling code should handle exceptions and
# retry
self._notification_host(context, 'router_added_to_agent', host,
use_call=True, payload=router_ids)
def routers_updated_on_host(self, context, router_ids, host):
self._notification_host(context, 'routers_updated', host,
routers=router_ids)
| 44.639053 | 78 | 0.621023 |
d85061cfb9c89e8c95868604a1eabdb6053b235a | 26,925 | py | Python | metalibm_core/targets/common/llvm_ir.py | metalibm/metalibm | e3133bb95e13f797bb902ef7cd1d2f8f352c4454 | [
"MIT"
] | 12 | 2019-10-29T21:30:58.000Z | 2022-02-05T16:28:01.000Z | metalibm_core/targets/common/llvm_ir.py | metalibm/metalibm | e3133bb95e13f797bb902ef7cd1d2f8f352c4454 | [
"MIT"
] | 20 | 2021-03-11T19:46:48.000Z | 2022-02-05T16:03:29.000Z | metalibm_core/targets/common/llvm_ir.py | metalibm/metalibm | e3133bb95e13f797bb902ef7cd1d2f8f352c4454 | [
"MIT"
] | 4 | 2021-03-10T15:06:58.000Z | 2021-07-14T17:39:53.000Z | # -*- coding: utf-8 -*-
###############################################################################
# This file is part of metalibm (https://github.com/kalray/metalibm)
###############################################################################
# MIT License
#
# Copyright (c) 2018 Kalray
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
# created: Apr 4th, 2018
# last-modified: Apr 4th, 2018
#
# author(s): Nicolas Brunie (nicolas.brunie@kalray.eu)
###############################################################################
from metalibm_core.core.ml_formats import (
ML_Bool,
v2bool, v4bool, v8bool,
ML_Int32, ML_Int64, ML_Binary32, ML_Binary64,
ML_Int128, ML_Int256,
v2int32, v2int64, v2float32, v2float64,
v4int32, v4int64, v4float32, v4float64,
v8int32, v8int64, v8float32, v8float64,
ML_FP_Format,
)
from metalibm_core.core.target import TargetRegister
from metalibm_core.core.ml_operations import (
Addition, Subtraction, Multiplication,
Negation,
BitLogicRightShift, BitLogicLeftShift,
BitLogicAnd,
NearestInteger,
ExponentInsertion,
LogicalAnd, LogicalNot, LogicalOr,
Test, Comparison,
Return,
FunctionObject,
Conversion, TypeCast,
VectorElementSelection,
Constant,
FusedMultiplyAdd,
ReciprocalSeed,
)
from metalibm_core.core.legalizer import (
min_legalizer, max_legalizer, legalize_test, legalize_exp_insertion,
legalize_comp_sign, legalize_fma_to_std,
legalize_reciprocal_seed,
)
from metalibm_core.code_generation.generator_utility import (
ConstantOperator, FunctionOperator,
type_strict_match, type_strict_match_list
)
from metalibm_core.code_generation.complex_generator import (
ComplexOperator
)
from metalibm_core.code_generation.code_constant import LLVM_IR_Code
from metalibm_core.code_generation.abstract_backend import (
LOG_BACKEND_INIT
)
from metalibm_core.code_generation.generic_processor import (
GenericProcessor
)
from metalibm_core.code_generation.llvm_utils import llvm_ir_format
from metalibm_core.utility.log_report import Log
def llvm_negation_function(precision):
""" build code generation operator for Negation operation.
As LLVM-IR does not have neg we must build a subtraction from 0 """
op = "fsub" if ML_FP_Format.is_fp_format(precision) else "sub"
zero = "0.0" if ML_FP_Format.is_fp_format(precision) else "0"
return LLVMIrTemplateOperator(
"{op} {precision} {zero}, {{}}".format(
zero=zero,
op=op,
precision=llvm_ir_format(precision),
),
arity=1
)
def llvm_not_function(precision):
""" build a code generation operator for LogicalNot operation (unary)
from the binary operations supported in LLVM-IR """
op = "xor"
one = 1
return LLVMIrTemplateOperator(
"{op} {precision} {one}, {{}}".format(
op=op,
one=one,
precision=llvm_ir_format(precision)
), arity=1
)
def llvm_ret_function(precision):
return LLVMIrFunctionOperator(
"ret", arity=1, void_function=True, output_precision=precision
)
def llvm_bitcast_function(dst_precision, src_precision):
return LLVMIrTemplateOperator(
"bitcast {src_format} {{}} to {dst_format}".format(
src_format=llvm_ir_format(src_precision),
dst_format=llvm_ir_format(dst_precision)
),
arity=1
)
def llvm_extract_element_function(src_precision, index_precision):
return LLVMIrTemplateOperator(
"extractelement {src_format} {{}}, {index_format} {{}}".format(
src_format=llvm_ir_format(src_precision),
index_format=llvm_ir_format(index_precision)
),
arity=1
)
def llvm_fcomp_function(predicate, precision):
return LLVMIrFunctionOperator("fcmp {}".format(predicate), arity=2, output_precision=precision)
def llvm_icomp_function(predicate, precision):
return LLVMIrFunctionOperator("icmp {}".format(predicate), arity=2, output_precision=precision)
def llvm_op_function(name, precision, arity=2):
return LLVMIrFunctionOperator(name, arity=2, output_precision=precision)
class LLVMIrFunctionOperator(FunctionOperator):
default_prefix = "tmp"
def generate_call_code(self, result_arg_list):
return "{function_name} {output_precision} {arg_list}".format(
output_precision=llvm_ir_format(self.output_precision),
function_name=self.function_name,
arg_list = ", ".join(
[var_arg.get() for var_arg in result_arg_list]
)
)
class LLVMIrIntrinsicOperator(LLVMIrFunctionOperator):
def __init__(self, function_name, input_formats=None, **kw):
self.input_formats = [] if input_formats is None else input_formats
LLVMIrFunctionOperator.__init__(self, function_name, **kw)
def register_prototype(self, optree, code_object):
if self.declare_prototype:
code_object.declare_function(
self.function_name, self.declare_prototype
)
@property
def declare_prototype(self):
return FunctionObject(
self.function_name,
self.input_formats,
self.output_precision,
self,
)
@declare_prototype.setter
def declare_prototype(self, value):
# discard declare_prototype change
pass
def generate_call_code(self, result_arg_list):
return "call {output_precision} @{function_name}({arg_list})".format(
output_precision=llvm_ir_format(self.output_precision),
function_name=self.function_name,
arg_list = ", ".join(
["%s %s" % (llvm_ir_format(var_arg.precision), var_arg.get()) for var_arg in result_arg_list]
)
)
class LLVMIrTemplateOperator(LLVMIrFunctionOperator):
def generate_call_code(self, result_arg_list):
return self.function_name.format(
*tuple(var_arg.get() for var_arg in result_arg_list)
)
def generate_comp_mapping(predicate, fdesc, idesc):
return dict(
# floating-point comparison mapping
[(
type_strict_match_list([ML_Bool, ML_Int32], [precision], [precision]),
llvm_fcomp_function(fdesc, precision)
) for precision in [
ML_Binary32, ML_Binary64,
]] +
# vectorial floating-point comparison mapping
[(
type_strict_match(v4bool, precision, precision),
llvm_fcomp_function(fdesc, precision)
) for precision in [
v4float32, v4float64,
]] +
# integer comparison mapping
[(
type_strict_match_list([ML_Bool, ML_Int32], [precision], [precision]),
llvm_icomp_function(idesc, precision)
) for precision in [
ML_Int32, ML_Int64, ML_Int128, ML_Int256
]] +
# vectorial integer comparison mapping
[(
type_strict_match(v4bool, precision, precision),
llvm_icomp_function(idesc, precision)
) for precision in [
v4int32
]]
)
def legalize_integer_nearest(optree):
""" transform a NearestInteger node floating-point to integer
into a sequence of floating-point NearestInteger and Conversion.
This conversion is lossy """
op_input = optree.get_input(0)
int_precision = {
v4float32: v4int32,
ML_Binary32: ML_Int32
}[optree.get_precision()]
return Conversion(
NearestInteger(
op_input,
precision=int_precision
),
precision=optree.get_precision()
)
def legalize_vector_reduction_test(optree):
""" Legalize a vector test (e.g. IsMaskNotAnyZero) to a sub-graph
of basic operations """
op_input = optree.get_input(0)
vector_size = op_input.get_precision().get_vector_size()
conv_format = {
2: v2int32,
4: v4int32,
8: v8int32,
}[vector_size]
cast_format = {
2: ML_Int64,
4: ML_Int128,
8: ML_Int256,
}[vector_size]
return Comparison(
TypeCast(
Conversion(op_input, precision=conv_format),
precision=cast_format
),
Constant(0, precision=cast_format),
specifier=Comparison.Equal,
precision=ML_Bool
)
llvm_ir_code_generation_table = {
Conversion: {
None: {
lambda _: True: {
type_strict_match(ML_Int64, ML_Int32):
LLVMIrTemplateOperator("sext i32 {} to i64", arity=1),
type_strict_match(v2int32, v2bool):
LLVMIrTemplateOperator("sext <2 x i1> {} to <2 x i32>", arity=1),
type_strict_match(v4int32, v4bool):
LLVMIrTemplateOperator("sext <4 x i1> {} to <4 x i32>", arity=1),
type_strict_match(v8int32, v8bool):
LLVMIrTemplateOperator("sext <8 x i1> {} to <8 x i32>", arity=1),
},
},
},
NearestInteger: {
None: {
lambda _: True: {
type_strict_match(ML_Int32, ML_Binary32):
LLVMIrTemplateOperator("fptosi float {} to i32", arity=1),
type_strict_match(ML_Binary32, ML_Binary32):
LLVMIrIntrinsicOperator("llvm.nearbyint.f32", arity=1, output_precision=ML_Binary32, input_formats=[ML_Binary32]),
type_strict_match(ML_Int64, ML_Binary64):
LLVMIrTemplateOperator("fptosi float {} to i64", arity=1),
type_strict_match(ML_Binary64, ML_Binary64):
LLVMIrIntrinsicOperator("llvm.nearbyint.f64", arity=1, output_precision=ML_Binary64, input_formats=[ML_Binary64]),
# vector version
type_strict_match(v4float32, v4float32):
LLVMIrIntrinsicOperator("llvm.nearbyint.f32", arity=1, output_precision=v4float32, input_formats=[v4float32]),
type_strict_match(v4int32, v4float32):
LLVMIrTemplateOperator("fptosi <4 x float> {} to <4 x i32>", arity=1),
#ComplexOperator(optree_modifier=legalize_integer_nearest),
},
},
},
FusedMultiplyAdd: {
FusedMultiplyAdd.Standard: {
(lambda _ : True): {
type_strict_match(ML_Binary32, ML_Binary32, ML_Binary32, ML_Binary32):
LLVMIrIntrinsicOperator("llvm.fma.f32", arity=3, output_precision=ML_Binary32, input_formats=3 * [ML_Binary32]),
type_strict_match(ML_Binary64, ML_Binary64, ML_Binary64, ML_Binary64):
LLVMIrIntrinsicOperator("llvm.fma.f64", arity=3, output_precision=ML_Binary64, input_formats=3 * [ML_Binary64]),
}
},
FusedMultiplyAdd.Subtract: {
(lambda _ : True): {
type_strict_match(ML_Binary32, ML_Binary32, ML_Binary32, ML_Binary32):
ComplexOperator(optree_modifier=legalize_fma_to_std),
type_strict_match(ML_Binary64, ML_Binary64, ML_Binary64, ML_Binary64):
ComplexOperator(optree_modifier=legalize_fma_to_std),
},
},
FusedMultiplyAdd.Negate: {
(lambda _ : True): {
type_strict_match(ML_Binary32, ML_Binary32, ML_Binary32, ML_Binary32):
ComplexOperator(optree_modifier=legalize_fma_to_std),
type_strict_match(ML_Binary64, ML_Binary64, ML_Binary64, ML_Binary64):
ComplexOperator(optree_modifier=legalize_fma_to_std),
},
},
FusedMultiplyAdd.SubtractNegate: {
(lambda _ : True): {
type_strict_match(ML_Binary32, ML_Binary32, ML_Binary32, ML_Binary32):
ComplexOperator(optree_modifier=legalize_fma_to_std),
type_strict_match(ML_Binary64, ML_Binary64, ML_Binary64, ML_Binary64):
ComplexOperator(optree_modifier=legalize_fma_to_std),
},
},
},
Addition: {
None: {
(lambda _: True):
dict(
[
(
type_strict_match(precision, precision, precision),
llvm_op_function("add", precision)
) for precision in [
ML_Int32, ML_Int64,
v2int32, v4int32, v8int32,
v2int64, v4int64, v8int64,
]
] + [
(
type_strict_match(precision, precision, precision),
llvm_op_function("fadd", precision),
) for precision in [
ML_Binary32, ML_Binary64,
v2float32, v4float32, v8float32,
v2float64, v4float64, v8float64,
]
]
)
},
},
Negation: {
None: {
(lambda _: True):
dict(
[
(
type_strict_match(precision, precision),
llvm_negation_function(precision)
) for precision in [
ML_Binary32, ML_Binary64,
ML_Int32, ML_Int64,
v2int32, v4int32, v8int32,
v2int64, v4int64, v8int64,
v2float32, v4float32, v8float32,
v2float64, v4float64, v8float64,
]
]
)
},
},
BitLogicAnd: {
None: {
(lambda _: True):
dict(
[
(
type_strict_match(precision, precision, precision),
llvm_op_function("and", precision)
) for precision in [
ML_Int32, ML_Int64,
v2int32, v4int32, v8int32,
v2int64, v4int64, v8int64,
]
]
)
},
},
LogicalAnd: {
None: {
(lambda _: True):
dict(
[
(
type_strict_match(precision, precision, precision),
llvm_op_function("and", precision)
) for precision in [
ML_Bool,
v2bool, v4bool, v8bool,
]
]
)
},
},
LogicalOr: {
None: {
(lambda _: True):
dict(
[
(
type_strict_match(precision, precision, precision),
llvm_op_function("or", precision)
) for precision in [
ML_Bool,
v2bool, v4bool, v8bool,
]
]
)
},
},
LogicalNot: {
None: {
(lambda _: True):
dict(
[
(
type_strict_match(precision, precision),
llvm_not_function(precision)
) for precision in [
ML_Bool,
v2bool, v4bool, v8bool,
]
]
)
},
},
BitLogicRightShift: {
None: {
(lambda _: True):
dict(
[
(
type_strict_match(precision, precision, precision),
llvm_op_function("lshr", precision)
) for precision in [
ML_Int32, ML_Int64,
v2int32, v4int32, v8int32,
v2int64, v4int64, v8int64,
]
]
)
},
},
BitLogicLeftShift: {
None: {
(lambda _: True):
dict(
[
(
type_strict_match(precision, precision, precision),
llvm_op_function("shl", precision)
) for precision in [
ML_Int32, ML_Int64,
v2int32, v4int32, v8int32,
v2int64, v4int64, v8int64,
]
]
)
},
},
TypeCast: {
None: {
(lambda _: True): {
type_strict_match(ML_Int32, ML_Binary32):
llvm_bitcast_function(ML_Int32, ML_Binary32),
type_strict_match(ML_Binary32, ML_Int32):
llvm_bitcast_function(ML_Binary32, ML_Int32),
type_strict_match(ML_Int64, ML_Binary64):
llvm_bitcast_function(ML_Int64, ML_Binary64),
type_strict_match(ML_Binary64, ML_Int64):
llvm_bitcast_function(ML_Binary64, ML_Int64),
type_strict_match(ML_Int128, v4int32):
llvm_bitcast_function(ML_Int128, v4int32),
type_strict_match(ML_Int256, v8int32):
llvm_bitcast_function(ML_Int256, v8int32),
type_strict_match(v4float32, v4int32):
llvm_bitcast_function(v4float32, v4int32),
type_strict_match(v4int32, v4float32):
llvm_bitcast_function(v4int32, v4float32),
},
},
},
Subtraction: {
None: {
(lambda _: True):
dict(
[
(
type_strict_match(precision, precision, precision),
llvm_op_function("sub", precision)
) for precision in [
ML_Int32, ML_Int64,
v2int32, v4int32, v8int32,
v2int64, v4int64, v8int64,
]
] + [
(
type_strict_match(precision, precision, precision),
llvm_op_function("fsub", precision),
) for precision in [
ML_Binary32, ML_Binary64,
v2float32, v4float32, v8float32,
v2float64, v4float64, v8float64,
]
]
)
},
},
Multiplication: {
None: {
(lambda _: True):
dict(
[
(
type_strict_match(precision, precision, precision),
llvm_op_function("mul", precision)
) for precision in [
ML_Int32, ML_Int64,
v2int32, v4int32, v8int32,
v2int64, v4int64, v8int64,
]
] + [
(
type_strict_match(precision, precision, precision),
llvm_op_function("fmul", precision),
) for precision in [
ML_Binary32, ML_Binary64,
v2float32, v4float32, v8float32,
v2float64, v4float64, v8float64,
]
]
)
},
},
# operation class
ExponentInsertion: {
# operation specifier
ExponentInsertion.Default: {
# predicate : ML_Operation -> bool
(lambda _: True): {
type_strict_match(ML_Binary32, ML_Int32):
ComplexOperator(
optree_modifier=legalize_exp_insertion(ML_Binary32)
),
type_strict_match(v4float32, v4int32):
ComplexOperator(
optree_modifier=legalize_exp_insertion(v4float32)
),
},
},
},
Return: {
None: {
lambda _: True:
dict(
(
type_strict_match(precision, precision),
llvm_ret_function(precision)
) for precision in [
ML_Int32, ML_Int64, ML_Binary32, ML_Binary64,
v2float32, v4float32, v8float32,
v2float64, v4float64, v8float64,
]
)
},
},
Comparison: {
Comparison.GreaterOrEqual: {
lambda _: True :
generate_comp_mapping(Comparison.GreaterOrEqual, "oge", "ge")
},
Comparison.Greater: {
lambda _: True :
generate_comp_mapping(Comparison.Greater, "ogt", "sgt")
},
Comparison.Less: {
lambda _: True :
generate_comp_mapping(Comparison.Less, "olt", "slt")
},
Comparison.LessOrEqual: {
lambda _: True :
generate_comp_mapping(Comparison.LessOrEqual, "ole", "sle")
},
Comparison.Equal: {
lambda _: True :
generate_comp_mapping(Comparison.Equal, "oeq", "eq")
},
Comparison.NotEqual: {
lambda _: True :
generate_comp_mapping(Comparison.NotEqual, "ne", "ne")
},
},
VectorElementSelection: {
None: {
lambda _: True: {
type_strict_match(ML_Bool, v4bool, ML_Int32):
llvm_extract_element_function(v4bool, ML_Int32),
type_strict_match(ML_Binary32, v4float32, ML_Int32):
llvm_extract_element_function(v4float32, ML_Int32),
}
},
},
Test: {
Test.IsInfOrNaN: {
lambda _: True: {
type_strict_match_list([ML_Int32, ML_Bool], [ML_Binary32]):
ComplexOperator(legalize_test),
type_strict_match_list([v4bool], [v4float32]):
ComplexOperator(legalize_test),
}
},
Test.IsSubnormal: {
lambda _: True: {
type_strict_match_list([ML_Int32, ML_Bool], [ML_Binary32]):
ComplexOperator(legalize_test),
}
},
Test.IsNaN: {
lambda _: True: {
type_strict_match_list([ML_Int32, ML_Bool], [ML_Binary32]):
ComplexOperator(legalize_test),
}
},
Test.IsInfty: {
lambda _: True: {
type_strict_match_list([ML_Int32, ML_Bool], [ML_Binary32]):
ComplexOperator(legalize_test),
}
},
Test.IsSignalingNaN: {
lambda _: True: {
type_strict_match_list([ML_Int32, ML_Bool], [ML_Binary32]):
ComplexOperator(legalize_test),
}
},
Test.CompSign: {
lambda _: True: {
type_strict_match_list([ML_Int32, ML_Bool], [ML_Binary32], [ML_Binary32]):
ComplexOperator(legalize_comp_sign),
}
},
Test.IsMaskNotAnyZero: {
lambda _: True: {
type_strict_match(ML_Bool, v4bool):
ComplexOperator(optree_modifier=legalize_vector_reduction_test),
}
},
},
ReciprocalSeed: {
None: {
lambda optree: True: {
type_strict_match(ML_Binary32, ML_Binary32):
ComplexOperator(optree_modifier=legalize_reciprocal_seed),
type_strict_match(ML_Binary64, ML_Binary64):
ComplexOperator(optree_modifier=legalize_reciprocal_seed),
},
},
},
}
## Generic C Capable Backend
class LLVMBackend(GenericProcessor):
""" Generic class for instruction selection,
corresponds to a portable C-implementation """
target_name = "llvm"
TargetRegister.register_new_target(target_name, lambda _: LLVMBackend)
default_compiler = "clang"
# code generation table map
code_generation_table = {
LLVM_IR_Code: llvm_ir_code_generation_table,
}
def __init__(self, *args):
GenericProcessor.__init__(self, *args)
self.simplified_rec_op_map[LLVM_IR_Code] = self.generate_supported_op_map(language=LLVM_IR_Code)
## return the compiler command line program to use to build
# test programs
def get_compiler(self):
return LLVMBackend.default_compiler
## Return a list of compiler option strings for the @p self target
def get_compilation_options(self, ML_SRC_DIR):
""" return list of compiler options """
return [" "]
def instanciate_pass_pipeline(self, pass_scheduler, processor, extra_passes, language=LLVM_IR_Code):
""" instanciate an optimization pass pipeline for VectorBackend targets """
EXTRA_VECTOR_PASSES = [
"beforecodegen:lowering_exception",
"beforecodegen:gen_basic_block",
"beforecodegen:basic_block_simplification",
"beforecodegen:ssa_translation",
]
return GenericProcessor.instanciate_pass_pipeline(self, pass_scheduler, processor,
EXTRA_VECTOR_PASSES + extra_passes,
language=language)
# debug message
Log.report(LOG_BACKEND_INIT, "Initializing llvm backend target")
| 36.044177 | 134 | 0.542433 |
e0c49ab82e41f935037eeed7ccd2b76334f0dbd4 | 1,062 | py | Python | scripts/python/sequencer_tcs_start_of_night.py | lsst-ts/ts_ocs_sequencer | e99b43e5264bcc22d664c12f1988c42411c73d5a | [
"BSD-3-Clause"
] | null | null | null | scripts/python/sequencer_tcs_start_of_night.py | lsst-ts/ts_ocs_sequencer | e99b43e5264bcc22d664c12f1988c42411c73d5a | [
"BSD-3-Clause"
] | null | null | null | scripts/python/sequencer_tcs_start_of_night.py | lsst-ts/ts_ocs_sequencer | e99b43e5264bcc22d664c12f1988c42411c73d5a | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# +
# import(s)
# -
from OcsSequencerEntity import *
# +
# main()
# -
if __name__ == '__main__':
sequencer = None
try:
sequencer = OcsSequencerEntity('OCS', 'Sequencer', False)
except OcsGenericEntityException as e:
print(e.errstr)
if sequencer:
# enter control
cmd = 'enterControl entity=tcs'
msg = "sequencer.sequence('{0:s}')".format(cmd)
sequencer.logger.info(msg)
sequencer.sequence(cmd)
# start
startid = [sys.argv[1] if len(sys.argv)>1 else 'Normal']
cmd = 'start entity=tcs startid={0:s}'.format(startid[0])
msg = "sequencer.sequence('{0:s}')".format(cmd)
sequencer.logger.info(msg)
sequencer.sequence(cmd)
# enable
cmd = 'enable entity=tcs'
msg = "sequencer.sequence('{0:s}')".format(cmd)
sequencer.logger.info(msg)
sequencer.sequence(cmd)
# execute destructor
sequencer.logger.info('del sequencer')
del sequencer
| 23.6 | 65 | 0.584746 |
c86d85d5bb02074e6751c1de1bc341e5e4c579f0 | 5,669 | py | Python | Reproduction package/RQ3_CoM_Testing.py | lhmtriet/MSR2019 | ff5100c9ae3c8180852df92134a0b090d9b420ab | [
"MIT"
] | 2 | 2020-10-28T01:05:52.000Z | 2022-03-18T02:34:20.000Z | Reproduction package/RQ3_CoM_Testing.py | lhmtriet/MSR2019 | ff5100c9ae3c8180852df92134a0b090d9b420ab | [
"MIT"
] | null | null | null | Reproduction package/RQ3_CoM_Testing.py | lhmtriet/MSR2019 | ff5100c9ae3c8180852df92134a0b090d9b420ab | [
"MIT"
] | 2 | 2019-12-02T17:11:08.000Z | 2022-02-20T15:34:15.000Z | # Import libraries
import pandas as pd
import numpy as np
import time
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score, f1_score
from xgboost import XGBClassifier
from sklearn.linear_model import LogisticRegression
from lightgbm import LGBMClassifier
# Load datasets
df = pd.read_csv('Data/all_cves_cleaned.csv')
# Extract year
import re
def extractYearFromId(id):
return re.match(r'CVE-(\d+)-\d+', id).group(1)
# Year in CVE-ID
df['Year'] = df.ID.map(extractYearFromId).astype(np.int64)
# Extract non-null CVSS2 training & testing sets
notnull_indices = np.where(df.CVSS2_Avail.notnull())[0]
# Remove null values
df_notnull = df.iloc[notnull_indices]
# Year to split dataset
split_year = 2016
# Training set: Vuln. before 2016
train_indices = np.where(df_notnull.Year < split_year)[0]
# Testing set: Vuln. from 2016
test_indices = np.where(df_notnull.Year >= split_year)[0]
X = df_notnull.Cleaned_Desc
X_train, X_test = X.iloc[train_indices], X.iloc[test_indices]
# Extract the character n-grams as vocabulary and transform n-grams into the feature vectors
def feature_model(X_train, X_test, config):
X_train = X_train.astype(str)
X_test = X_test.astype(str)
start_char_ngram = 3
end_char_ngram = 6
use_idf = False
norm = None
if config == 2 or config == 6 or config == 7 or config == 8:
use_idf = True
norm = 'l2'
vocab_generator = TfidfVectorizer(stop_words=['aka'], ngram_range=(start_char_ngram, end_char_ngram), use_idf=use_idf, min_df=0.1,
analyzer='char', norm=norm)
# Generate character vocabulary
vocab_generator.fit(X_train)
char_vocabs = vocab_generator.vocabulary_
# Filter character vocabulary
slt_char_vocabs = []
for w in char_vocabs.keys():
toks = w.split()
if len(toks) == 1 and len(toks[0]) > 1:
slt_char_vocabs.append(w.strip())
slt_char_vocabs = set(slt_char_vocabs)
# Feature transformation
char_vectorizer = TfidfVectorizer(stop_words=['aka'], ngram_range=(start_char_ngram - 1, end_char_ngram), use_idf=use_idf, min_df=0,
analyzer='char', norm=norm, vocabulary=slt_char_vocabs)
X_train_transformed = char_vectorizer.fit_transform(X_train)
X_test_transformed = char_vectorizer.transform(X_test)
X_train_transformed = X_train_transformed.astype(np.float64)
X_test_transformed = X_test_transformed.astype(np.float64)
return X_train_transformed, X_test_transformed
# Test the data
def evaluate(clf, X_train_transformed, X_test_transformed, y):
y_train, y_test = y[train_indices], y[test_indices]
t_start = time.clock()
clf.fit(X_train_transformed, y_train)
train_time = time.clock() - t_start
p_start = time.clock()
y_pred = clf.predict(X_test_transformed)
pred_time = time.clock() - p_start
return "{:.3f}".format(accuracy_score(y_test, y_pred)) + "\t" + "{:.3f}".format(
f1_score(y_test, y_pred, average='macro')) + "\t" + "{:.3f}".format(
f1_score(y_test, y_pred, average='weighted')) + "\t" + "{:.3f}".format(train_time) + "\t" + "{:.3f}".format(pred_time)
labels = ['CVSS2_Conf', 'CVSS2_Integrity', 'CVSS2_Avail', 'CVSS2_AccessVect',
'CVSS2_AccessComp', 'CVSS2_Auth', 'CVSS2_Severity']
clfs = {'CVSS2_Conf': {'LGBM': LGBMClassifier(num_leaves=100, max_depth=-1, objective='multiclass', n_jobs=-1, random_state=42)},
'CVSS2_Integrity': {'XGB': XGBClassifier(objective='multiclass', max_depth=0, max_leaves=100, grow_policy='lossguide',
n_jobs=-1, random_state=42, tree_method='hist')},
'CVSS2_Avail': {'LGBM': LGBMClassifier(num_leaves=100, max_depth=-1, objective='multiclass', n_jobs=-1, random_state=42)},
'CVSS2_AccessVect': {'XGB': XGBClassifier(objective='multiclass', max_depth=0, max_leaves=100, grow_policy='lossguide',
n_jobs=-1, random_state=42, tree_method='hist')},
'CVSS2_AccessComp': {'LGBM': LGBMClassifier(num_leaves=100, max_depth=-1, objective='multiclass', n_jobs=-1, random_state=42)},
'CVSS2_Auth': {'LR': LogisticRegression(C=0.1, multi_class='ovr', n_jobs=-1, solver='lbfgs', max_iter=1000,
random_state=42)},
'CVSS2_Severity': {'LGBM': LGBMClassifier(num_leaves=100, max_depth=-1, objective='multiclass', n_jobs=-1, random_state=42)}}
configs = {'CVSS2_Conf': 1, 'CVSS2_Integrity': 4, 'CVSS2_Avail': 1, 'CVSS2_AccessVect': 7, 'CVSS2_AccessComp': 1, 'CVSS2_Auth': 3, 'CVSS2_Severity': 5}
def get_config(config):
if config == 1:
print("Bag-of-word without tf-idf")
elif config == 2:
print("Bag-of-word with tf-idf")
elif config <= 5:
print("N-gram without tf-idf")
else:
print("N-gram with tf-idf")
# Iterate over each label and corresponding optimal classifier and NLP representation for testing
for label in labels:
print("Current output:", label, "\n")
t_time = time.clock()
config = configs[label]
get_config(config)
cur_clf = clfs[label]
X_train_transformed, X_test_transformed = feature_model(X_train, X_test, config)
print("Building vocab time:", time.clock() - t_time)
print("Classifier\tAccuracy\tMacro F-Score\tWeighted F-Score\tTrain time\tPredict time\n")
for clf_name, clf in cur_clf.items():
print(clf_name + "\t" + "", end='')
y = df_notnull[label].values
print(evaluate(clf, X_train_transformed, X_test_transformed, y))
print("##############################################")
| 34.150602 | 151 | 0.676486 |
fa46355eed5d812ea7495975171cd9f431b80973 | 2,730 | py | Python | lib/surface/app/instances/list.py | bshaffer/google-cloud-sdk | f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9 | [
"Apache-2.0"
] | null | null | null | lib/surface/app/instances/list.py | bshaffer/google-cloud-sdk | f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9 | [
"Apache-2.0"
] | null | null | null | lib/surface/app/instances/list.py | bshaffer/google-cloud-sdk | f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*- #
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The `app instances list` command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.app import appengine_api_client
from googlecloudsdk.calliope import base
APPENGINE_PATH_START = 'https://appengine.googleapis.com/{0}/'.format(
appengine_api_client.AppengineApiClient.ApiVersion())
def _GetUri(resource):
# TODO(b/29539463): Use parser when instances collection adds simple URIs
# and a Get method
try:
return APPENGINE_PATH_START + resource.instance.name
except AttributeError:
return APPENGINE_PATH_START + resource['instance']['name']
class List(base.ListCommand):
"""List the instances affiliated with the current App Engine project."""
detailed_help = {
'EXAMPLES': """\
To list all App Engine instances, run:
$ {command}
To list all App Engine instances for a given service, run:
$ {command} -s myservice
To list all App Engine instances for a given version, run:
$ {command} -v v1
""",
}
@staticmethod
def Args(parser):
parser.add_argument('--service', '-s',
help=('If specified, only list instances belonging to '
'the given service.'))
parser.add_argument('--version', '-v',
help=('If specified, only list instances belonging to '
'the given version.'))
parser.display_info.AddFormat("""
table(
service:sort=1,
version:sort=2,
id:sort=3,
instance.vmStatus.yesno(no="N/A"),
instance.vmDebugEnabled.yesno(yes="YES", no=""):label=DEBUG_MODE
)
""")
parser.display_info.AddUriFunc(_GetUri)
# TODO(b/29539463) Resources of this API are not parsable.
parser.display_info.AddCacheUpdater(None)
def Run(self, args):
api_client = appengine_api_client.GetApiClientForTrack(self.ReleaseTrack())
return api_client.GetAllInstances(args.service, args.version)
| 33.703704 | 79 | 0.668498 |
780e27e9b11b1b098fe321c0a2854c70068ce2c0 | 6,259 | py | Python | check_distro.py | ros-arch/arch_ros_package_monitor | 8940d98b672b3897ae66395da7d2336b9fb0a191 | [
"BSD-3-Clause"
] | 1 | 2022-02-16T03:29:15.000Z | 2022-02-16T03:29:15.000Z | check_distro.py | ros-arch/arch_ros_package_monitor | 8940d98b672b3897ae66395da7d2336b9fb0a191 | [
"BSD-3-Clause"
] | null | null | null | check_distro.py | ros-arch/arch_ros_package_monitor | 8940d98b672b3897ae66395da7d2336b9fb0a191 | [
"BSD-3-Clause"
] | 1 | 2020-06-27T06:28:04.000Z | 2020-06-27T06:28:04.000Z | #!/usr/bin/env python3
# Copyright © 2020 Felix Exner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the organization nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Felix Exner ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Felix Exner BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import sys
import catkin_pkg
from helpers.aur import AURAdapter
from helpers.github import GHAdapter
from helpers.rosdistro_adapter import RosdistroAdapter
from helpers.package import Package
def aur_pkg_name_from_name(pkg_name, distro_name):
return "ros-%s-%s" % (distro_name, pkg_name.replace('_', '-'))
def main():
parser = argparse.ArgumentParser(
description='A small package to get an overview of Archlinux ROS packages')
parser.add_argument('--distro_name', type=str,
help='The ROS distribution that should be used. Defaults to "noetic"',
default='noetic')
parser.add_argument('--hide_outdated', dest='show_outdated', action='store_false',
help='Hide packages that are outdated in AUR')
parser.add_argument('--hide_outofsync', dest='show_outofsync', action='store_false',
help="Hide packages where the github version doesn't match the AUR version")
parser.add_argument('--show_installed_only', dest='show_installed', action='store_true',
help='Show only outdated packages that are installed.')
parser.add_argument('--hide_missing', dest='show_missing', action='store_false',
help='Hide packages that are missing in AUR',
default=True)
parser.add_argument('--hide_ahead', dest='show_ahead', action='store_false',
help='Hide packages that are ahead in AUR')
parser.set_defaults(show_outdated=True)
parser.set_defaults(show_outofsync=True)
parser.set_defaults(show_installed=False)
parser.set_defaults(show_missing=True)
parser.set_defaults(show_ahead=True)
args = parser.parse_args()
print('Checking distro "%s". this might take a while...' % args.distro_name)
rosdistro = RosdistroAdapter(args.distro_name)
package_distribution_list = rosdistro.get_package_list()
aur_adapter = AURAdapter(args.distro_name)
gh_adapter = GHAdapter(args.distro_name)
outdated_pkgs = list()
outofsync_pkgs = list()
missing_pkgs = list()
error_pkgs = list()
ahead_pkgs = list()
for pkg_name in package_distribution_list:
# if pkg_name == 'capabilities':
# # DEBUG stop
# break
# print("---\nChecking %s" % pkg_name)
pkg = Package(pkg_name)
try:
pkg_info = rosdistro.get_package_by_name(pkg_name)
pkg.add_rosdistro_information(pkg_info)
aur_pkg_name = aur_pkg_name_from_name(pkg_name, args.distro_name)
aur_pkg = aur_adapter.get_package_info(aur_pkg_name)
# print('Upstream version: %s' % pkg_info.version)
if aur_pkg:
pkg.add_aur_information(aur_pkg)
# print('AUR version: %s' % aur_pkg['Version'])
# As querying this takes a lot of time enable it only if desired
if args.show_outofsync:
gh_pkg = gh_adapter.get_package_info(aur_pkg_name)
if gh_pkg:
pkg.add_gh_information(gh_pkg)
if pkg.is_outdated():
outdated_pkgs.append(pkg)
if pkg.is_outofsync():
outofsync_pkgs.append(pkg)
elif pkg.is_missing():
missing_pkgs.append(pkg)
if pkg.is_ahead():
ahead_pkgs.append(pkg)
except TypeError as err:
error_pkgs.append(pkg)
print("Parsing error: %s\n%s" % (pkg_name, err), file=sys.stderr)
except catkin_pkg.package.InvalidPackage as err:
error_pkgs.append(pkg)
print("Invalid package: %s\n%s" % (pkg_name, err), file=sys.stderr)
if args.show_missing:
print("\nMissing packages:")
for pkg in missing_pkgs:
if args.show_installed and not pkg.is_installed():
# skip this package
continue
print(pkg)
if args.show_outdated:
print("\nOutdated packages:")
for pkg in outdated_pkgs:
if args.show_installed and not pkg.is_installed():
# skip this package
continue
print(pkg)
if args.show_outofsync:
print("\nOut of sync packages:")
for pkg in outofsync_pkgs:
if args.show_installed and not pkg.is_installed():
# skip this package
continue
print(pkg)
if args.show_ahead:
print("\nAhead packages:")
for pkg in ahead_pkgs:
if args.show_installed and not pkg.is_installed():
# skip this package
continue
print(pkg)
if __name__ == "__main__":
main()
| 40.121795 | 100 | 0.657134 |
bfb506356bd2c436fa4d4c4afcf6d107334152c3 | 1,063 | py | Python | textprase/spiders/hexun.py | miemiekurisu/fnews | 33560d0561c1cf7d1a828df5e46a2d7475e39c4e | [
"Apache-2.0"
] | null | null | null | textprase/spiders/hexun.py | miemiekurisu/fnews | 33560d0561c1cf7d1a828df5e46a2d7475e39c4e | [
"Apache-2.0"
] | null | null | null | textprase/spiders/hexun.py | miemiekurisu/fnews | 33560d0561c1cf7d1a828df5e46a2d7475e39c4e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import scrapy
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from textprase.items import TextpraseItem
from scrapy.selector import HtmlXPathSelector
class HexunSpider(scrapy.Spider):
name = "hexun"
allowed_domains = ["hexun.com"]
start_urls = (
'http://www.hexun.com/',
)
#rules = [Rule(SgmlLinkExtractor(allow=['/2015-01-26/\d+']), 'parse_torrent')]
def parse(self, response):
x = scrapy.Selector(response)
#hx = TextpraseItem()
sites = x.xpath('//ul[@class="news-list"]/li')
items1 = []
for ct in sites:
item=TextpraseItem()
item['title']= ct.xpath('a/text()').extract()
# title=[]
# for i in item['title']:
# i=i.decode('unicode_escape')
# title.append(i)
# item['title']=title
item['url'] = ct.xpath('a/@href').extract()
items1.append(item)
#hx['content']=x.xpath('//*[@id="artibody"]/text()').extract()
#hx['publishdate']=NUL
return items1
| 27.25641 | 82 | 0.665099 |
868f2a5e16a6efc1b29e9f1c076fcd182c5714c5 | 3,297 | py | Python | test/algorithms/excited_state_solvers/eigensolver_factories/test_numpy_eigensolver_factory.py | divshacker/qiskit-nature | 08f6dcec5e4ac8c08f5b84e764ee78cc3d12facb | [
"Apache-2.0"
] | 1 | 2021-06-20T15:31:01.000Z | 2021-06-20T15:31:01.000Z | test/algorithms/excited_state_solvers/eigensolver_factories/test_numpy_eigensolver_factory.py | divshacker/qiskit-nature | 08f6dcec5e4ac8c08f5b84e764ee78cc3d12facb | [
"Apache-2.0"
] | null | null | null | test/algorithms/excited_state_solvers/eigensolver_factories/test_numpy_eigensolver_factory.py | divshacker/qiskit-nature | 08f6dcec5e4ac8c08f5b84e764ee78cc3d12facb | [
"Apache-2.0"
] | 3 | 2021-07-02T06:57:58.000Z | 2021-07-06T12:32:38.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test NumPyMinimumEigensovler Factory """
import unittest
from test import QiskitNatureTestCase
import numpy as np
from qiskit.algorithms import NumPyEigensolver
from qiskit_nature.algorithms import NumPyEigensolverFactory
from qiskit_nature import QiskitNatureError
from qiskit_nature.drivers import UnitsType
from qiskit_nature.drivers.second_quantization import PySCFDriver
from qiskit_nature.problems.second_quantization import ElectronicStructureProblem
class TestNumPyEigensolverFactory(QiskitNatureTestCase):
"""Test NumPyMinimumEigensovler Factory"""
# NOTE: The actual usage of this class is mostly tested in combination with the ground-state
# eigensolvers (one module above).
def setUp(self):
super().setUp()
try:
self.driver = PySCFDriver(
atom="H .0 .0 .0; H .0 .0 0.75",
unit=UnitsType.ANGSTROM,
charge=0,
spin=0,
basis="sto3g",
)
except QiskitNatureError:
self.skipTest("PYSCF driver does not appear to be installed")
self.electronic_structure_problem = ElectronicStructureProblem(self.driver)
# pylint: disable=unused-argument
def filter_criterion(eigenstate, eigenvalue, aux_values):
return np.isclose(aux_values[0][0], 2.0)
self.k = 99
self._numpy_eigensolver_factory = NumPyEigensolverFactory(
filter_criterion=filter_criterion, k=self.k
)
def test_setters_getters(self):
"""Test Getter/Setter"""
# filter_criterion
self.assertIsNotNone(self._numpy_eigensolver_factory.filter_criterion)
# pylint: disable=unused-argument
def filter_criterion(eigenstate, eigenvalue, aux_values):
return np.isclose(aux_values[0][0], 3.0)
self._numpy_eigensolver_factory.filter_criterion = filter_criterion
self.assertEqual(self._numpy_eigensolver_factory.filter_criterion, filter_criterion)
# k
self.assertEqual(self._numpy_eigensolver_factory.k, self.k)
self._numpy_eigensolver_factory.k = 100
self.assertEqual(self._numpy_eigensolver_factory.k, 100)
# use_default_filter_criterion
self.assertFalse(self._numpy_eigensolver_factory.use_default_filter_criterion)
self._numpy_eigensolver_factory.use_default_filter_criterion = True
self.assertTrue(self._numpy_eigensolver_factory.use_default_filter_criterion)
# get_solver
solver = self._numpy_eigensolver_factory.get_solver(self.electronic_structure_problem)
self.assertIsInstance(solver, NumPyEigensolver)
self.assertEqual(solver.k, 100)
self.assertEqual(solver.filter_criterion, filter_criterion)
if __name__ == "__main__":
unittest.main()
| 37.896552 | 96 | 0.721868 |
f194c4bf6074fd7e61553317d507b1801868edd2 | 3,690 | py | Python | lesson_3_shortest_path_GMNS/FIFO label correcting algorithm.py | asu-trans-ai-lab/traffic-engineering-and-analysis | 19e5718dabee7e3eb200937b5ce2d7e5e2d3fbf1 | [
"CC0-1.0"
] | null | null | null | lesson_3_shortest_path_GMNS/FIFO label correcting algorithm.py | asu-trans-ai-lab/traffic-engineering-and-analysis | 19e5718dabee7e3eb200937b5ce2d7e5e2d3fbf1 | [
"CC0-1.0"
] | null | null | null | lesson_3_shortest_path_GMNS/FIFO label correcting algorithm.py | asu-trans-ai-lab/traffic-engineering-and-analysis | 19e5718dabee7e3eb200937b5ce2d7e5e2d3fbf1 | [
"CC0-1.0"
] | 1 | 2020-12-03T16:01:35.000Z | 2020-12-03T16:01:35.000Z | # _*_coding:utf-8 _*_
# @Time :2020/3/1/10:47
# @Author : Dr.Prase
#@ File :modified label correcting algorithm.py
#@Software :PyCharm
"""import basic packages"""
import pandas as pd
import numpy as np
import copy
g_node_list=[] #set of nodes
g_node_zone={} #set of node types
g_link_list=[] #set of arcs
g_adjacent_arc_list={} #set of arcs emanating from that node
g_shortest_path=[] #set of shortest paths
g_node_status=[] #set of node status
g_number_of_nodes=0 #number of nodes
g_origin=None #source node
node_predecessor=[] #set of predecessor nodes
node_label_cost=[] #set of distance labels
SE_LIST=[] #scan eligible list
Max_label_cost=99999 #initialize distance label
"""import network data file and initialize corresponding variables"""
#read network node data
df_node=pd.read_csv('node.csv')
df_node=df_node.iloc[:,:].values
for i in range(len(df_node)):
g_node_list.append(df_node[i,0])
g_node_zone[df_node[i, 0]] = df_node[i, -1]
g_number_of_nodes+=1
g_adjacent_arc_list[df_node[i,0]]=[]
if df_node[i, 3] == 1:
g_origin = df_node[i, 0]
g_node_status=[0 for i in range(g_number_of_nodes)]#initialize node status
Distance=np.ones((g_number_of_nodes,g_number_of_nodes))*Max_label_cost #distance matrix
node_predecessor=[-1]*g_number_of_nodes
node_label_cost=[Max_label_cost]*g_number_of_nodes
node_predecessor[g_origin-1]=0
node_label_cost[g_origin-1] = 0
#read data of arcs
df_link=pd.read_csv('road_link.csv')
df_link=df_link.iloc[:,:].values
for i in range(len(df_link)):
g_link_list.append((df_link[i,1],df_link[i,2]))
Distance[df_link[i,1]-1,df_link[i,2]-1]=df_link[i,3]
g_adjacent_arc_list[df_link[i,1]].append(df_link[i,2])
SE_LIST=[g_origin]
g_node_status[g_origin-1]=1
"""finding shortest path: scan arcs and check their optimality conditions"""
while len(SE_LIST):
head=SE_LIST[0]#remove first node from scan eligible list
SE_LIST.pop(0)
g_node_status[head-1]=0
adjacent_arc_list=g_adjacent_arc_list[head]#get arcs emanating from that node
for tail in adjacent_arc_list:
if node_label_cost[tail-1]>node_label_cost[head-1]+Distance[head-1,tail-1]:
node_label_cost[tail-1]=node_label_cost[head-1]+Distance[head-1,tail-1]
node_predecessor[tail-1]=head
if g_node_status[tail-1]==0:
SE_LIST.append(tail)
g_node_status[tail-1]=1
"""generate shortest paths according to predecessor nodes"""
agent_id=1
o_zone_id=g_node_zone[g_origin]
for destination in g_node_list:
if g_origin!=destination:
d_zone_id=g_node_zone[destination]
if node_label_cost[destination-1]==Max_label_cost:
path = " "
g_shortest_path.append([agent_id,o_zone_id,d_zone_id, path, node_label_cost[destination - 1]])
else:
to_node=copy.copy(destination)
path = "%s" % to_node
while node_predecessor[to_node-1]!=g_origin:
path = "%s;" % node_predecessor[to_node - 1] + path
g=node_predecessor[to_node-1]
to_node=g
path="%s;"%g_origin+path
g_shortest_path.append([agent_id,o_zone_id,d_zone_id, path, node_label_cost[destination - 1]])
print('from {} to {} the path is {},length is {}'
.format(g_origin,destination,path,node_label_cost[destination-1]))
agent_id+=1
"""put result into csv file"""
#transfer data into DataFrame
g_shortest_path=np.array(g_shortest_path)
col=['agent_id','o_zone_id','d_zone_id','node_sequence','distance']
file_data = pd.DataFrame(g_shortest_path, index=range(len(g_shortest_path)),columns=col)
file_data.to_csv('agent.csv',index=False)
| 41.931818 | 106 | 0.707046 |
dcc7bb9224b03a7fdab09143ec06101a0af9c212 | 2,152 | py | Python | Day-31/FlashCard.py | Nishi-16-K/100DaysCodeChallenge-Python- | 96df953bbc60c2bf8802cf31ed6c593469521482 | [
"MIT"
] | 1 | 2021-08-29T12:44:23.000Z | 2021-08-29T12:44:23.000Z | Day-31/FlashCard.py | Nishi-16-K/100DaysofCodeChallenge-Python | 96df953bbc60c2bf8802cf31ed6c593469521482 | [
"MIT"
] | null | null | null | Day-31/FlashCard.py | Nishi-16-K/100DaysofCodeChallenge-Python | 96df953bbc60c2bf8802cf31ed6c593469521482 | [
"MIT"
] | null | null | null | from tkinter import *
import pandas
import random
BACKGROUND_COLOR = "#B1DDC6"
current_card = {}
to_learn = {}
try:
data = pandas.read_csv("data/french_words.csv")
except FileNotFoundError:
original_data = pandas.read_csv("data/french_words.csv")
to_learn = original_data.to_dict(orient="records")
else:
to_learn = data.to_dict(orient="records")
def next_card():
global current_card, flip_timer
window.after_cancel(flip_timer)
current_card = random.choice(to_learn)
canvas.itemconfig(card_title, text="French", fill = "black")
canvas.itemconfig(card_word, text=current_card["French"], fill = "black")
canvas.itemconfig(card_background, image= card_front_img)
flip_timer = window.after(3000, func=flip_card)
def flip_card():
canvas.itemconfig(card_title, text="English", fill = "white")
canvas.itemconfig(card_word, text=current_card["English"], fill = "white")
canvas.itemconfig(card_background, image = card_back_img)
def is_known():
to_learn.remove(current_card)
data = pandas.DataFrame(to_learn)
data.to_csv("data/words_to.csv", index=False)
next_card()
window = Tk()
window.title("Nishi's Flashy")
window.config(padx=50,pady=50, bg= BACKGROUND_COLOR)
flip_timer = window.after(3000, func=flip_card)
canvas = Canvas(width = 800, height=526)
card_front_img = PhotoImage(file="images/card_front.png")
card_back_img = PhotoImage(file="images/card_back.png")
card_background = canvas.create_image(400, 263,image=card_front_img)
card_title = canvas.create_text(400, 150, text= "Title", font=("Ariel", 40, "italic"))
card_word = canvas.create_text(400, 263, text="word", font=("Arial", 60, "bold"))
canvas.config(bg=BACKGROUND_COLOR, highlightthickness = 0)
canvas.grid(row=0, column=0, columnspan=2)
cross_image = PhotoImage(file="images/wrong.png")
unknown_button = Button(image=cross_image, highlightthickness = 0, command=next_card)
unknown_button.grid(row=1, column=0)
check_image = PhotoImage(file="images/right.png")
known_button = Button(image=check_image, highlightthickness = 0, command=is_known)
known_button.grid(row=1, column=1)
next_card()
window.mainloop()
| 34.709677 | 86 | 0.748606 |
d50d67041b9607f0dc098311df82e24073f1282e | 4,423 | py | Python | huaweicloud-sdk-vpc/huaweicloudsdkvpc/v3/model/page_info.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 64 | 2020-06-12T07:05:07.000Z | 2022-03-30T03:32:50.000Z | huaweicloud-sdk-vpc/huaweicloudsdkvpc/v3/model/page_info.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 11 | 2020-07-06T07:56:54.000Z | 2022-01-11T11:14:40.000Z | huaweicloud-sdk-vpc/huaweicloudsdkvpc/v3/model/page_info.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 24 | 2020-06-08T11:42:13.000Z | 2022-03-04T06:44:08.000Z | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class PageInfo:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'previous_marker': 'str',
'current_count': 'int',
'next_marker': 'str'
}
attribute_map = {
'previous_marker': 'previous_marker',
'current_count': 'current_count',
'next_marker': 'next_marker'
}
def __init__(self, previous_marker=None, current_count=None, next_marker=None):
"""PageInfo - a model defined in huaweicloud sdk"""
self._previous_marker = None
self._current_count = None
self._next_marker = None
self.discriminator = None
self.previous_marker = previous_marker
self.current_count = current_count
if next_marker is not None:
self.next_marker = next_marker
@property
def previous_marker(self):
"""Gets the previous_marker of this PageInfo.
当前页第一条记录
:return: The previous_marker of this PageInfo.
:rtype: str
"""
return self._previous_marker
@previous_marker.setter
def previous_marker(self, previous_marker):
"""Sets the previous_marker of this PageInfo.
当前页第一条记录
:param previous_marker: The previous_marker of this PageInfo.
:type: str
"""
self._previous_marker = previous_marker
@property
def current_count(self):
"""Gets the current_count of this PageInfo.
当前页总数
:return: The current_count of this PageInfo.
:rtype: int
"""
return self._current_count
@current_count.setter
def current_count(self, current_count):
"""Sets the current_count of this PageInfo.
当前页总数
:param current_count: The current_count of this PageInfo.
:type: int
"""
self._current_count = current_count
@property
def next_marker(self):
"""Gets the next_marker of this PageInfo.
当前页最后一条记录,最后一页时无next_marker字段
:return: The next_marker of this PageInfo.
:rtype: str
"""
return self._next_marker
@next_marker.setter
def next_marker(self, next_marker):
"""Sets the next_marker of this PageInfo.
当前页最后一条记录,最后一页时无next_marker字段
:param next_marker: The next_marker of this PageInfo.
:type: str
"""
self._next_marker = next_marker
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PageInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.48503 | 83 | 0.574271 |
44e34702007d7bcbf4c4ebe247c1f4559a51392b | 7,809 | py | Python | decitala/hm/contour.py | Luke-Poeppel/decitala | dc5e26123605606014e32f44bde11a7f85cc93b1 | [
"MIT"
] | 6 | 2021-03-24T13:43:10.000Z | 2021-07-22T08:00:35.000Z | decitala/hm/contour.py | Luke-Poeppel/decitala | dc5e26123605606014e32f44bde11a7f85cc93b1 | [
"MIT"
] | 177 | 2021-03-22T17:17:40.000Z | 2021-09-22T12:56:45.000Z | decitala/hm/contour.py | Luke-Poeppel/decitala | dc5e26123605606014e32f44bde11a7f85cc93b1 | [
"MIT"
] | null | null | null | ####################################################################################################
# File: contour.py
# Purpose: Pitch contour tools for the birdsong transcriptions.
#
# Author: Luke Poeppel
#
# Location: Kent, 2021
####################################################################################################
from .schultz import spc
from .contour_utils import (
_track_extrema,
_recheck_extrema,
_pitch_contour,
_adjacency_and_intervening_checks
)
NEUMES = {
(1, 0): "Clivis",
(0, 1): "Podatus",
(0, 1, 2): "Scandicus",
(2, 1, 0): "Climacus",
(0, 1, 0): "Torculus",
(1, 0, 1): "Porrectus"
}
# Morris's Prime Contour Classes (1993, 220-221)
# "Linear Prime Classes" (Schultz 92)
# NOTE: Schultz uses the same linear prime classes to refer to symmetries
# of these classes: e.g. <0 2 1> and <1 0 2> = L.
PRIME_CONTOUR_CLASSES = {
(0,): "A",
(0, 0): "B",
(0, 1): "D",
(0, 1, 0): "G",
(0, 2, 1): "L",
(1, 0, 2, 1): "P",
(1, 0, 3, 2): "X",
(1, 3, 0, 2): "Y",
(1, 0, 2, 0, 1): "12a",
(1, 0, 3, 0, 2): "12b"
}
class ContourException(Exception):
pass
def strip_monotonic_pitch_content(pitch_content):
"""
The pitch content extracted in the :obj:`decitala.search` module consists of lists of tuples.
This functions strips monotonic pitch content to a single list. If non-monotonic pitch content
is provided, the function chooses the lowest pitch.
:param list pitch_content: pitch content of the format returned in
:obj:`decitala.search.rolling_hash_search`.
:return: a list of MIDI tones.
:rtype: list
>>> pitch_content = [(60,), (61,), (65,)]
>>> strip_monotonic_pitch_content(pitch_content)
[60, 61, 65]
"""
return [x[0] for x in pitch_content]
def normalize_pitch_content(data, midi_start=60):
"""
Normalizes a list of MIDI tones to a a starting value.
:param list data: a list of MIDI tones.
:param int midi_start: the MIDI starting point to which the data are normalized.
:return: a numpy array of the pitch content, normalized to the starting value.
:rtype: numpy.array
>>> normalize_pitch_content(data=[58, 60, 62], midi_start=60)
[60, 62, 64]
"""
diff = data[0] - midi_start
return [x - diff for x in data]
def uds_contour(data):
"""
Returns the for "up-down-stay" contour (UDS) of a given list of MIDI tones. Normalized
to start at 0.
:param list data: a list of MIDI tones.
:return: a numpy array of the UDS contour of the given data.
:rtype: numpy.array
>>> midis = [47, 42, 45, 51, 51, 61, 58]
>>> uds_contour(midis)
[0, -1, 1, 1, 0, 1, -1]
"""
out = [0]
i = 1
while i < len(data):
prev = data[i - 1]
curr = data[i]
if curr > prev:
out.append(1)
elif curr < prev:
out.append(-1)
elif curr == prev:
out.append(0)
i += 1
return out
def pitch_contour(pitch_content, as_str=False):
"""
This function returns the contour of given pitch content. It accepts either a list of MIDI
tones, or the data returned in the :obj:`decitala.search` module. Like
:obj:`decitala.hm.contour.strip_monotonic_pitch_content`, if non-monotonic pitch content is
provided, it chooses the lowest pitch.
:param list pitch_content: pitch content from the output of rolling_search."
:param bool as_str: whether to return the pitch content as a string (standard format),
like ``"<0 1 1>"``.
:return: the contour of the given ``pitch_content``.
:rtype: numpy.array or str
>>> pitch_content_1 = [(80,), (91,), (78,), (85,)]
>>> pitch_contour(pitch_content_1)
[1, 3, 0, 2]
>>> pitch_content_2 = [80, 84, 84]
>>> pitch_contour(pitch_content_2, as_str=True)
'<0 1 1>'
"""
return _pitch_contour(pitch_content=pitch_content, as_str=as_str)
def contour_to_neume(contour):
"""
Oversimplified function for checking the associated neume of a given pitch contour. Only two and
three onset contours are supported.
:param contour: A pitch contour (iterable).
:return: The associated neume or ``None``.
:rtype: str or None
>>> contour = [1, 0, 1]
>>> contour_to_neume(contour)
'Porrectus'
"""
assert len(contour) <= 3, ContourException("Contour input must be of length three.")
try:
return NEUMES[tuple(contour)]
except KeyError:
raise ContourException(f"The contour {contour} was not found in the given current set.")
def contour_class(
contour,
allow_symmetries=False
):
"""
Returns the associated pitch contour class (a letter) from Morris (1993, 220-221)
of a contour.
:param contour: a pitch contour (iterable).
:param bool allow_symmetries: whether to allow permutations of the given contour to be found.
Default is ``False``. Note that ``X`` and ``Y`` are weird cases
for this symmetry. May currently fail (don't understand it).
:rtype: str
>>> contour_class((1, 0, 3, 2))
'X'
>>> contour_class((0, 1, 0), allow_symmetries=False)
'G'
>>> contour_class((0, 0, 1), allow_symmetries=True)
'G'
"""
try:
if not(allow_symmetries):
return PRIME_CONTOUR_CLASSES[contour]
elif contour in {(1, 0, 3, 2), (1, 3, 0, 2)}: # IDK about this case.
return PRIME_CONTOUR_CLASSES[contour]
else:
match = None
for key in PRIME_CONTOUR_CLASSES.keys():
if len(key) == len(contour) and len(set(key)) == len(set(contour)):
match = PRIME_CONTOUR_CLASSES[key]
break
return match
except KeyError:
ContourException(f"The contour {contour} is not prime.")
####################################################################################################
# Contour reduction tools.
# Implementation of Morris contour reduction algorithm (1993).
def _morris_reduce(contour, depth):
"""
Steps 4-7 of the contour reduction algorithm.
"""
contour = [x for x in contour if x[1]] # Step 4
depth += 1 # Step 5
# Step 6. Flag maxima and *delete* repetitions.
_recheck_extrema(contour=contour, mode="max")
_adjacency_and_intervening_checks(contour, mode="max", algorithm="morris")
# Step 7. Flag minima and *delete* repetitions.
_recheck_extrema(contour=contour, mode="min")
_adjacency_and_intervening_checks(contour, mode="min", algorithm="morris")
return contour, depth
def prime_contour(contour):
"""
Implementation of Robert Morris' Contour-Reduction algorithm (Morris, 1993). "The algorithm prunes
pitches from a contour until it is reduced to a prime." (Schultz)
:param contour: A pitch contour (iterable).
:return: the prime contour of the given pitch contour, along with the depth of the reduction.
:rtype: tuple
>>> contour_a = [0, 1]
>>> prime_contour(contour_a)
([0, 1], 0)
>>> contour_b = [0, 4, 3, 2, 5, 5, 1]
>>> prime_contour(contour_b)[0]
[0, 2, 1]
"""
depth = 0
# If the segment is of length <= 2, it is prime by definition.
if len(contour) <= 2:
return (pitch_contour(contour), depth)
# If all the values are extremas, it is already prime.
prime_contour = _track_extrema(contour)
initial_flags = [x[1] for x in prime_contour]
if all(x for x in initial_flags):
return (pitch_contour(contour), depth)
still_unflagged_values = True
while still_unflagged_values:
prime_contour, depth = _morris_reduce(prime_contour, depth)
if all(x[1] for x in prime_contour): # Step 3
still_unflagged_values = False
# Remove flags.
prime_contour = [x[0] for x in prime_contour]
return (pitch_contour(prime_contour), depth)
def schultz_prime_contour(contour):
"""
Implementation of Schultz's (2008) modification of Morris' contour-reduction algorithm.
See the Schultz module for the implementation. (It was complicated enough to warrent its
own module...)
:param contour: A pitch contour (iterable).
:return: the Schultz Prime Contour (SPC) of the given contour, along with the depth of the
reduction.
:rtype: tuple
>>> nightingale_5 = [2, 5, 3, 1, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0]
>>> spc(nightingale_5)
([1, 2, 0], 3)
"""
return spc(contour) | 30.267442 | 100 | 0.663977 |
9c3a8f5a65502481cfed8f9e37c869d8c274d48c | 1,984 | py | Python | hop/subscribe.py | lpsinger/hop-client | d9cc7a7257a06dcaf192ef06f7ec360912aefe8e | [
"BSD-3-Clause"
] | 9 | 2020-05-13T20:26:33.000Z | 2022-02-02T22:31:24.000Z | hop/subscribe.py | lpsinger/hop-client | d9cc7a7257a06dcaf192ef06f7ec360912aefe8e | [
"BSD-3-Clause"
] | 108 | 2020-04-10T19:18:39.000Z | 2022-03-07T19:57:49.000Z | hop/subscribe.py | lpsinger/hop-client | d9cc7a7257a06dcaf192ef06f7ec360912aefe8e | [
"BSD-3-Clause"
] | 3 | 2021-03-08T16:44:53.000Z | 2022-02-02T22:30:50.000Z | import json
import logging
import sys
from . import cli
from . import io
from . import models
logger = logging.getLogger("hop")
def print_message(message, json_dump=False):
"""Print the content of a message.
Args:
message: message to print
json_dump: boolean indicating whether to print as raw json
Returns:
None
"""
if json_dump:
if isinstance(message, models.MessageModel):
message = json.dumps(message.asdict())
else:
message = json.dumps(message)
print(message, file=sys.stdout, flush=True)
def _add_parser_args(parser):
cli.add_client_opts(parser)
cli.add_logging_opts(parser)
# consumer options
parser.add_argument(
"-s",
"--start-at",
choices=io.StartPosition.__members__,
default=str(io.StartPosition.LATEST).upper(),
help="Set the message offset offset to start at. Default: LATEST.",
)
parser.add_argument(
"-e",
"--until-eos",
action="store_true",
help="If set, only subscribe until EOS is received (end of stream). "
"Otherwise, listen to messages indefinitely.",
)
parser.add_argument(
"-g", "--group-id",
default=None,
help="Consumer group ID. If unset, a random ID will be generated."
)
parser.add_argument(
"-j", "--json", help="Request message output as raw json", action="store_true",
)
parser.add_argument(
"-t", "--test", help="Process test messages instead of ignoring them.", action="store_true",
)
def _main(args):
"""Subscribe to messages.
"""
cli.set_up_logger(args)
start_at = io.StartPosition[args.start_at]
stream = io.Stream(auth=(not args.no_auth), start_at=start_at, until_eos=args.until_eos)
with stream.open(args.url, "r", group_id=args.group_id, ignoretest=(not args.test)) as s:
for message in s:
print_message(message, args.json)
| 26.453333 | 100 | 0.632056 |
72700fb9663d318c52738b2b555a4a99415b5a4f | 6,385 | py | Python | ros/src/tl_detector/dashboard.py | aj2020tls/CarND-Capstone | 31b7cacb6da32d2008d14851f34c3e9ba6b94041 | [
"MIT"
] | null | null | null | ros/src/tl_detector/dashboard.py | aj2020tls/CarND-Capstone | 31b7cacb6da32d2008d14851f34c3e9ba6b94041 | [
"MIT"
] | 2 | 2018-05-09T13:17:48.000Z | 2018-06-05T16:57:58.000Z | ros/src/tl_detector/dashboard.py | aj2020tls/CarND-Capstone | 31b7cacb6da32d2008d14851f34c3e9ba6b94041 | [
"MIT"
] | 4 | 2018-05-09T13:07:28.000Z | 2018-06-05T06:35:38.000Z | #!/usr/bin/env python
import numpy as np
import rospy
from std_msgs.msg import Int32
from styx_msgs.msg import Lane, Waypoint
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import TrafficLightArray, TrafficLight
from sensor_msgs.msg import Image as msgImage
from cv_bridge import CvBridge
import Tkinter
import Image, ImageTk
import tkMessageBox
import cv2
from Tkinter import NW
import yaml
class Dashboard(object):
def __init__(self):
self.pose = None
self.stopline_wp_idx = -1
self.waypoints_2d = None
self.waypoint_tree = None
self.transformed_pose = None
self.has_image = False
self.camera_image = None
self.bridge = CvBridge()
self.image_to_display = None
self.min_x = 0
self.min_y = 0
self.max_x = 1
self.max_y = 1
self.message_index = 0
config_string = rospy.get_param("/traffic_light_config")
self.config = yaml.load(config_string)
self.stop_line_positions = self.config['stop_line_positions']
self.slp_transformed = None
#self.base_waypoints = None
rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
sub6 = rospy.Subscriber('/image_color', msgImage, self.image_cb)
#rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
top = Tkinter.Tk()
self.C = Tkinter.Canvas(top, bg="white", height=400, width=800)
self.C.pack()
top.mainloop()
rospy.spin()
def pose_cb(self, msg):
self.pose = msg
self.message_index += 1
if (self.message_index % 20):
if (self.slp_transformed):
for light in self.slp_transformed:
self.C.create_oval(light[0]-5, light[1]-5,
light[0]+5, light[1]+5, fill='yellow')
self.transformed_pose = (450 + 300 * (self.pose.pose.position.x - self.min_x) / (self.max_x - self.min_x),
10 + 300 * (self.pose.pose.position.y - self.min_y) / (self.max_y - self.min_y))
self.C.create_oval(self.transformed_pose[0]-5, self.transformed_pose[1]-5,
self.transformed_pose[0]+5, self.transformed_pose[1]+5, fill='red')
def waypoints_cb(self, waypoints):
self.base_waypoints = waypoints
self.min_x = min([w.pose.pose.position.x for w in self.base_waypoints.waypoints])
self.min_y = min([w.pose.pose.position.y for w in self.base_waypoints.waypoints])
self.max_x = max([w.pose.pose.position.x for w in self.base_waypoints.waypoints])
self.max_y = max([w.pose.pose.position.y for w in self.base_waypoints.waypoints])
self.transformed = [(450 + 300 * (w.pose.pose.position.x - self.min_x) / (self.max_x - self.min_x),
10 + 300 * (w.pose.pose.position.y - self.min_y) / (self.max_y - self.min_y)) for w in self.base_waypoints.waypoints]
self.C.create_polygon([e for l in self.transformed for e in l], fill='', outline='black', width = 10)
self.slp_transformed = [(450 + 300 * (w[0] - self.min_x) / (self.max_x - self.min_x),
10 + 300 * (w[1] - self.min_y) / (self.max_y - self.min_y)) for w in self.stop_line_positions]
def traffic_cb(self, msg):
self.lights = msg.lights
def image_cb(self, msg):
self.has_image = True
self.camera_image = msg
cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
#x0, y0, x1, y1 = self.project_to_image_plane(light.pose.pose.position)
#if x0 == x1 or x0 < 0 or x1 > cv_image.shape[1] or \
# y0 == y1 or y0 < 0 or y1 > cv_image.shape[0]:
# return TrafficLight.UNKNOWN
#light_image = cv_image[y0:y1, x0:x1, :]
small = cv2.resize(cv_image, (0,0), fx=0.5, fy=0.5)
b,g,r = cv2.split(small)
small = cv2.merge((r,g,b))
im = Image.fromarray(small)
self.image_to_display = ImageTk.PhotoImage(image=im)
self.C.create_image((0, 0), image = self.image_to_display, anchor=NW)
def project_to_image_plane(self, point_in_world):
# reference: https://discussions.udacity.com/t/focal-length-wrong/358568/23
fx = 2574
fy = 2744
image_width = self.config['camera_info']['image_width']
image_height = self.config['camera_info']['image_height']
x_offset = (image_width / 2) - 30
y_offset = image_height + 50
corner_offset = 1.5
try:
now = rospy.Time.now()
self.listener.waitForTransform(
"/base_link", "/world", self.pose.header.stamp, rospy.Duration(1.0))
transT, rotT = self.listener.lookupTransform(
"/base_link", "/world", self.pose.header.stamp)
except (tf.Exception, tf.LookupException, tf.ConnectivityException):
rospy.logerr("Failed to find camera to map transform")
return 0, 0, 0, 0
RT = np.mat(self.listener.fromTranslationRotation(transT, rotT))
point_3d = np.mat([[point_in_world.x],
[point_in_world.y],
[point_in_world.z],
[1.0]])
point_3d_vehicle = (RT * point_3d)[:-1, :]
camera_height_offset = 1.1
camera_x = -point_3d_vehicle[1]
camera_y = -(point_3d_vehicle[2] - camera_height_offset)
camera_z = point_3d_vehicle[0]
x0 = int((camera_x - corner_offset) * fx / camera_z) + x_offset
y0 = int((camera_y - corner_offset) * fy / camera_z) + y_offset
x1 = int((camera_x + corner_offset) * fx / camera_z) + x_offset
y1 = int((camera_y + corner_offset) * fy / camera_z) + y_offset
return x0, y0, x1, y1
if __name__ == '__main__':
try:
rospy.init_node("dashboard")
Dashboard()
except rospy.ROSInterruptException:
rospy.logerr('Could not start dashboard node.')
| 36.907514 | 149 | 0.592796 |
11b690d91e2ef552d0aa52577a98229db58e3f94 | 4,158 | py | Python | examples/model_selection/grid_search_text_feature_extraction.py | ZiBuDo/MyCareer | 41a686f199e5e88ff4d863c54304b8128bd87580 | [
"CC0-1.0"
] | null | null | null | examples/model_selection/grid_search_text_feature_extraction.py | ZiBuDo/MyCareer | 41a686f199e5e88ff4d863c54304b8128bd87580 | [
"CC0-1.0"
] | null | null | null | examples/model_selection/grid_search_text_feature_extraction.py | ZiBuDo/MyCareer | 41a686f199e5e88ff4d863c54304b8128bd87580 | [
"CC0-1.0"
] | null | null | null | """
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
| 31.984615 | 79 | 0.660654 |
a5fb1075b4a45be1c719b027a7635c5da0f5b577 | 2,404 | py | Python | data.py | Seth-Scott/quiz_game | 01fa0a33d114432df6f6e12a2da88328ba0572c3 | [
"MIT"
] | null | null | null | data.py | Seth-Scott/quiz_game | 01fa0a33d114432df6f6e12a2da88328ba0572c3 | [
"MIT"
] | null | null | null | data.py | Seth-Scott/quiz_game | 01fa0a33d114432df6f6e12a2da88328ba0572c3 | [
"MIT"
] | null | null | null | question_data = [
{"category": "Science: Computers", "type": "boolean", "difficulty": "easy",
"question": "Linus Torvalds created Linux and Git.", "correct_answer": "True", "incorrect_answers": ["False"]},
{"category": "Science: Computers", "type": "boolean", "difficulty": "easy",
"question": "The logo for Snapchat is a Bell.", "correct_answer": "False", "incorrect_answers": ["True"]},
{"category": "Science: Computers", "type": "boolean", "difficulty": "easy",
"question": "Pointers were not used in the original C programming language; they were added later on in C++.",
"correct_answer": "False", "incorrect_answers": ["True"]},
{"category": "Science: Computers", "type": "boolean", "difficulty": "easy",
"question": "RAM stands for Random Access Memory.", "correct_answer": "True", "incorrect_answers": ["False"]},
{"category": "Science: Computers", "type": "boolean", "difficulty": "easy",
"question": "Ada Lovelace is often considered the first computer programmer.", "correct_answer": "True",
"incorrect_answers": ["False"]}, {"category": "Science: Computers", "type": "boolean", "difficulty": "easy",
"question": ""HTML" stands for Hypertext Markup Language.",
"correct_answer": "True", "incorrect_answers": ["False"]},
{"category": "Science: Computers", "type": "boolean", "difficulty": "easy",
"question": "In most programming languages, the operator ++ is equivalent to the statement "+= 1".",
"correct_answer": "True", "incorrect_answers": ["False"]},
{"category": "Science: Computers", "type": "boolean", "difficulty": "easy",
"question": "Time on Computers is measured via the EPOX System.", "correct_answer": "False",
"incorrect_answers": ["True"]}, {"category": "Science: Computers", "type": "boolean", "difficulty": "easy",
"question": "The Windows ME operating system was released in the year 2000.",
"correct_answer": "True", "incorrect_answers": ["False"]},
{"category": "Science: Computers", "type": "boolean", "difficulty": "easy",
"question": "The Python programming language gets its name from the British comedy group "Monty Python."",
"correct_answer": "True", "incorrect_answers": ["False"]}]
| 89.037037 | 121 | 0.619384 |
9b568bcb7588be6f3cc066e7d5fbc2d7a50ed8bf | 1,450 | py | Python | MiGRIDS/Analyzer/PerformanceAnalyzers/getPrimaryREContribution.py | mmuellerstoffels/GBSTools | aebd8aa6667a2284aaa16424f9b9d22ca3a2a375 | [
"MIT"
] | 8 | 2019-02-18T14:18:55.000Z | 2022-03-04T12:34:24.000Z | MiGRIDS/Analyzer/PerformanceAnalyzers/getPrimaryREContribution.py | mmuellerstoffels/GBSTools | aebd8aa6667a2284aaa16424f9b9d22ca3a2a375 | [
"MIT"
] | 3 | 2019-02-13T09:42:08.000Z | 2019-05-10T16:59:02.000Z | MiGRIDS/Analyzer/PerformanceAnalyzers/getPrimaryREContribution.py | acep-uaf/GBSTools | aebd8aa6667a2284aaa16424f9b9d22ca3a2a375 | [
"MIT"
] | 3 | 2019-06-10T19:49:22.000Z | 2021-05-08T08:42:57.000Z | # Project: GBS Tool
# Author: Dr. Marc Mueller-Stoffels, marc@denamics.com, denamics GmbH
# Date: May 8, 2018
# License: MIT License (see LICENSE file of this package for more information)
def getPrimaryREContribution(time, firmLoadP, firmGenP, varGenP):
'''
Calculates the contribution of renewable generation (variable generation!) to the firm demand. Returns the fraction
'Renewable kWh/Total Firm kWh' as an output.
:param time: [Series] time vector in seconds
:param firmLoadP: [Series] firm load real power vector in kWh
:param firmGenP: [Series] firm generation (load following) real power vector in kWh
:param varGenP: [Series] variable generation (non-load following) real power vector in kWh
:return: renewableContribution: [float] fraction of renewable contribution to total firm demand.
'''
# First we need to figure out what of the variable generation actually contributed to meeting firm demand.
# Demand not met by firmGenP
remLoadP = firmLoadP - firmGenP
# Account for the fact that there might (shouldn't have been other contributions)
reP = varGenP.copy()
reP[varGenP > remLoadP] = remLoadP[varGenP > remLoadP]
# Get the kWh for the demand and RE contrib.
firmLoadE = (firmLoadP*(time - time[0])/(60*60)).sum()
reE = (reP*(time - time[0])/(60*60)).sum()
# Get the load factor
renewableContribution = reE/firmLoadE
return renewableContribution | 45.3125 | 119 | 0.722069 |
dc3e91b6326909a3a07b4ef17050d08cbe9d0fed | 1,955 | py | Python | FWCore/Integration/test/testDropOnInputRead2001_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | FWCore/Integration/test/testDropOnInputRead2001_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | FWCore/Integration/test/testDropOnInputRead2001_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST3")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'file:testDropOnInput2001.root'
),
inputCommands = cms.untracked.vstring(
'keep *',
'drop *_prodA_*_*',
'drop *_prodD_*_*',
'drop *_prodF_*_*'
),
dropDescendantsOfDroppedBranches = cms.untracked.bool(True)
)
process.a1 = cms.EDAnalyzer("TestFindProduct",
inputTags = cms.untracked.VInputTag(
cms.InputTag("prodC"),
cms.InputTag("prodB")
),
inputTagsNotFound = cms.untracked.VInputTag(
cms.InputTag("prodA"),
cms.InputTag("prodD"),
cms.InputTag("prodE"),
cms.InputTag("prodF"),
cms.InputTag("prodG")
)
)
process.test1 = cms.EDAnalyzer("TestParentage",
inputTag = cms.InputTag("A101"),
expectedAncestors = cms.vstring("K100")
)
process.test2 = cms.EDAnalyzer("TestParentage",
inputTag = cms.InputTag("K100"),
expectedAncestors = cms.vstring()
)
process.test3 = cms.EDAnalyzer("TestParentage",
inputTag = cms.InputTag("K102"),
expectedAncestors = cms.vstring("K100", "NK101")
)
process.test4 = cms.EDAnalyzer("TestParentage",
inputTag = cms.InputTag("K103"),
expectedAncestors = cms.vstring("K100", "NK101")
)
process.test5 = cms.EDAnalyzer("TestParentage",
inputTag = cms.InputTag("K104"),
expectedAncestors = cms.vstring("K100", "NK101")
)
process.path = cms.Path(process.a1 *
process.test1 *
process.test2 *
process.test3 *
process.test4 *
process.test5
)
| 30.546875 | 79 | 0.533504 |
5d70f60d02c2b9fd2e8f72a85f67ec7e093033e9 | 9,155 | py | Python | allennlp/predictors/semantic_role_labeler.py | justindujardin/allennlp | c4559f3751775aa8bc018db417edc119d29d8051 | [
"Apache-2.0"
] | 2 | 2021-04-27T19:56:28.000Z | 2021-08-19T05:34:37.000Z | allennlp/predictors/semantic_role_labeler.py | justindujardin/allennlp | c4559f3751775aa8bc018db417edc119d29d8051 | [
"Apache-2.0"
] | 5 | 2021-05-03T14:40:33.000Z | 2021-05-03T14:40:34.000Z | allennlp/predictors/semantic_role_labeler.py | justindujardin/allennlp | c4559f3751775aa8bc018db417edc119d29d8051 | [
"Apache-2.0"
] | 1 | 2020-03-01T13:01:34.000Z | 2020-03-01T13:01:34.000Z | from typing import List
from overrides import overrides
from spacy.tokens import Doc
from allennlp.common.util import JsonDict, sanitize, group_by_count
from allennlp.data import DatasetReader, Instance
from allennlp.data.tokenizers.spacy_tokenizer import SpacyTokenizer
from allennlp.models import Model
from allennlp.predictors.predictor import Predictor
@Predictor.register("semantic-role-labeling")
class SemanticRoleLabelerPredictor(Predictor):
"""
Predictor for the [`SemanticRoleLabeler`](../models/semantic_role_labeler.md) model.
"""
def __init__(
self, model: Model, dataset_reader: DatasetReader, language: str = "en_core_web_sm"
) -> None:
super().__init__(model, dataset_reader)
self._tokenizer = SpacyTokenizer(language=language, pos_tags=True)
def predict(self, sentence: str) -> JsonDict:
"""
Predicts the semantic roles of the supplied sentence and returns a dictionary
with the results.
.. code-block:: js
{"words": [...],
"verbs": [
{"verb": "...", "description": "...", "tags": [...]},
...
{"verb": "...", "description": "...", "tags": [...]},
]}
# Parameters
sentence, `str`
The sentence to parse via semantic role labeling.
# Returns
A dictionary representation of the semantic roles in the sentence.
"""
return self.predict_json({"sentence": sentence})
def predict_tokenized(self, tokenized_sentence: List[str]) -> JsonDict:
"""
Predicts the semantic roles of the supplied sentence tokens and returns a dictionary
with the results.
# Parameters
tokenized_sentence, `List[str]`
The sentence tokens to parse via semantic role labeling.
# Returns
A dictionary representation of the semantic roles in the sentence.
"""
spacy_doc = Doc(self._tokenizer.spacy.vocab, words=tokenized_sentence)
for pipe in filter(None, self._tokenizer.spacy.pipeline):
pipe[1](spacy_doc)
tokens = [token for token in spacy_doc]
instances = self.tokens_to_instances(tokens)
if not instances:
return sanitize({"verbs": [], "words": tokens})
return self.predict_instances(instances)
@staticmethod
def make_srl_string(words: List[str], tags: List[str]) -> str:
frame = []
chunk = []
for (token, tag) in zip(words, tags):
if tag.startswith("I-"):
chunk.append(token)
else:
if chunk:
frame.append("[" + " ".join(chunk) + "]")
chunk = []
if tag.startswith("B-"):
chunk.append(tag[2:] + ": " + token)
elif tag == "O":
frame.append(token)
if chunk:
frame.append("[" + " ".join(chunk) + "]")
return " ".join(frame)
@overrides
def _json_to_instance(self, json_dict: JsonDict):
raise NotImplementedError("The SRL model uses a different API for creating instances.")
def tokens_to_instances(self, tokens):
words = [token.text for token in tokens]
instances: List[Instance] = []
for i, word in enumerate(tokens):
if word.pos_ == "VERB":
verb_labels = [0 for _ in words]
verb_labels[i] = 1
instance = self._dataset_reader.text_to_instance(tokens, verb_labels)
instances.append(instance)
return instances
def _sentence_to_srl_instances(self, json_dict: JsonDict) -> List[Instance]:
"""
The SRL model has a slightly different API from other models, as the model is run
forward for every verb in the sentence. This means that for a single sentence, we need
to generate a `List[Instance]`, where the length of this list corresponds to the number
of verbs in the sentence. Additionally, all of these verbs share the same return dictionary
after being passed through the model (as really we care about all the frames of the sentence
together, rather than separately).
# Parameters
json_dict : `JsonDict`, required.
JSON that looks like `{"sentence": "..."}`.
# Returns
instances : `List[Instance]`
One instance per verb.
"""
sentence = json_dict["sentence"]
tokens = self._tokenizer.tokenize(sentence)
return self.tokens_to_instances(tokens)
@overrides
def predict_batch_json(self, inputs: List[JsonDict]) -> List[JsonDict]:
"""
Expects JSON that looks like `[{"sentence": "..."}, {"sentence": "..."}, ...]`
and returns JSON that looks like
.. code-block:: js
[
{"words": [...],
"verbs": [
{"verb": "...", "description": "...", "tags": [...]},
...
{"verb": "...", "description": "...", "tags": [...]},
]},
{"words": [...],
"verbs": [
{"verb": "...", "description": "...", "tags": [...]},
...
{"verb": "...", "description": "...", "tags": [...]},
]}
]
"""
# For SRL, we have more instances than sentences, but the user specified
# a batch size with respect to the number of sentences passed, so we respect
# that here by taking the batch size which we use to be the number of sentences
# we are given.
batch_size = len(inputs)
instances_per_sentence = [self._sentence_to_srl_instances(json) for json in inputs]
flattened_instances = [
instance
for sentence_instances in instances_per_sentence
for instance in sentence_instances
]
if not flattened_instances:
return sanitize(
[{"verbs": [], "words": self._tokenizer.tokenize(x["sentence"])} for x in inputs]
)
# Make the instances into batches and check the last batch for
# padded elements as the number of instances might not be perfectly
# divisible by the batch size.
batched_instances = group_by_count(flattened_instances, batch_size, None)
batched_instances[-1] = [
instance for instance in batched_instances[-1] if instance is not None
]
# Run the model on the batches.
outputs = []
for batch in batched_instances:
outputs.extend(self._model.forward_on_instances(batch))
verbs_per_sentence = [len(sent) for sent in instances_per_sentence]
return_dicts: List[JsonDict] = [{"verbs": []} for x in inputs]
output_index = 0
for sentence_index, verb_count in enumerate(verbs_per_sentence):
if verb_count == 0:
# We didn't run any predictions for sentences with no verbs,
# so we don't have a way to extract the original sentence.
# Here we just tokenize the input again.
original_text = self._tokenizer.tokenize(inputs[sentence_index]["sentence"])
return_dicts[sentence_index]["words"] = original_text
continue
for _ in range(verb_count):
output = outputs[output_index]
words = output["words"]
tags = output["tags"]
description = self.make_srl_string(words, tags)
return_dicts[sentence_index]["words"] = words
return_dicts[sentence_index]["verbs"].append(
{"verb": output["verb"], "description": description, "tags": tags}
)
output_index += 1
return sanitize(return_dicts)
def predict_instances(self, instances: List[Instance]) -> JsonDict:
outputs = self._model.forward_on_instances(instances)
results = {"verbs": [], "words": outputs[0]["words"]}
for output in outputs:
tags = output["tags"]
description = self.make_srl_string(output["words"], tags)
results["verbs"].append(
{"verb": output["verb"], "description": description, "tags": tags}
)
return sanitize(results)
@overrides
def predict_json(self, inputs: JsonDict) -> JsonDict:
"""
Expects JSON that looks like `{"sentence": "..."}`
and returns JSON that looks like
.. code-block:: js
{"words": [...],
"verbs": [
{"verb": "...", "description": "...", "tags": [...]},
...
{"verb": "...", "description": "...", "tags": [...]},
]}
"""
instances = self._sentence_to_srl_instances(inputs)
if not instances:
return sanitize({"verbs": [], "words": self._tokenizer.tokenize(inputs["sentence"])})
return self.predict_instances(instances)
| 36.62 | 100 | 0.566029 |
47f70adc8d023abced9af78b56c196206ecdbc40 | 3,208 | py | Python | models/conta.py | danilo-aalmeida/python-simple-bank | 44780c568994d97323099b0a33cd085723c7834d | [
"MIT"
] | null | null | null | models/conta.py | danilo-aalmeida/python-simple-bank | 44780c568994d97323099b0a33cd085723c7834d | [
"MIT"
] | null | null | null | models/conta.py | danilo-aalmeida/python-simple-bank | 44780c568994d97323099b0a33cd085723c7834d | [
"MIT"
] | null | null | null | from models.cliente import Cliente
from utils.helper import formata_float_str_moeda
class Conta:
codigo: int = 1001
def __init__(self: object, cliente: Cliente) -> None:
self.__numero: int = Conta.codigo
self.__cliente: Cliente = cliente
self.__saldo: float = 0.0
self.__limite: float = 100.00
self.__saldo_total: float = self._calcula_saldo_total
Conta.codigo += 1
@property
def numero(self: object) -> int:
return self.__numero
@property
def cliente(self: object) -> Cliente:
return self.__cliente
@property
def saldo(self: object) -> float:
return self.__saldo
@saldo.setter
def saldo(self: object, valor: float) -> None:
self.__saldo = valor
@property
def limite(self: object) -> float:
return self.__limite
@limite.setter
def limite(self: object, valor: float) -> None:
self.__limite = valor
@property
def saldo_total(self: object) -> float:
return self.__saldo_total
@saldo_total.setter
def saldo_total(self: object, valor: float) -> None:
self.__saldo_total = valor
@property
def _calcula_saldo_total(self: object) -> float:
return self.saldo + self.limite
def depositar(self: object, valor: float) -> None:
if valor > 0:
self.saldo = self.saldo + valor
self.saldo_total = self._calcula_saldo_total
print('Deposito efetuado com sucesso!')
else:
print('Erro ao efetuar deposito. Tente novamente.')
def sacar(self: object, valor: float) -> None:
if 0 < valor <= self.saldo_total:
if self.saldo >= valor:
self.saldo = self.saldo - valor
self.saldo_total = self._calcula_saldo_total
else:
restante: float = self.saldo - valor
self.limite = self.limite + restante
self.saldo = 0
self.saldo_total = self._calcula_saldo_total
print('Saque efetuado com sucesso')
else:
print('Saque nao realizado. Tente novamente')
def transferir(self: object, destino: object, valor: float):
if 0 < valor <= self.saldo_total:
if self.saldo >= valor:
self.saldo = self.saldo - valor
self.saldo_total = self._calcula_saldo_total
destino.saldo = destino.saldo + valor
destino.saldo_total = destino._calcula_saldo_total
else:
restante: float = self.saldo - valor
self.limite = self.limite + restante
self.saldo = 0
self.saldo_total = self._calcula_saldo_total
destino.saldo = destino.saldo + valor
destino.saldo_total = destino._calcula_saldo_total
print('Transferencia efetuada com sucesso')
else:
print('Transferencia nao realizada. Tente novamente')
def __str__(self: object) -> str:
return (f'Número da conta: {self.numero} \nCliente: {self.cliente.nome} \n'
f'Saldo total: {formata_float_str_moeda(self.saldo_total)}')
| 33.416667 | 83 | 0.598192 |
7ea6156b6b77f86227a21724bfb6005664ac0a9d | 8,579 | py | Python | os_brick/encryptors/cryptsetup.py | zoharm/os-brick | 6c3a89d8e6cb3ea5729e29b447b824d4067e9284 | [
"Apache-2.0"
] | null | null | null | os_brick/encryptors/cryptsetup.py | zoharm/os-brick | 6c3a89d8e6cb3ea5729e29b447b824d4067e9284 | [
"Apache-2.0"
] | null | null | null | os_brick/encryptors/cryptsetup.py | zoharm/os-brick | 6c3a89d8e6cb3ea5729e29b447b824d4067e9284 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import array
import binascii
import os
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_log import versionutils
from os_brick.encryptors import base
from os_brick import exception
LOG = logging.getLogger(__name__)
class CryptsetupEncryptor(base.VolumeEncryptor):
"""A VolumeEncryptor based on dm-crypt.
This VolumeEncryptor uses dm-crypt to encrypt the specified volume.
"""
def __init__(self, root_helper,
connection_info,
keymgr,
execute=None,
*args, **kwargs):
super(CryptsetupEncryptor, self).__init__(
root_helper=root_helper,
connection_info=connection_info,
keymgr=keymgr,
execute=execute,
*args, **kwargs)
# Fail if no device_path was set when connecting the volume, e.g. in
# the case of libvirt network volume drivers.
data = connection_info['data']
if not data.get('device_path'):
volume_id = data.get('volume_id') or connection_info.get('serial')
raise exception.VolumeEncryptionNotSupported(
volume_id=volume_id,
volume_type=connection_info['driver_volume_type'])
# the device's path as given to libvirt -- e.g., /dev/disk/by-path/...
self.symlink_path = connection_info['data']['device_path']
# a unique name for the volume -- e.g., the iSCSI participant name
self.dev_name = 'crypt-%s' % os.path.basename(self.symlink_path)
# NOTE(lixiaoy1): This is to import fix for 1439869 from Nova.
# NOTE(tsekiyama): In older version of nova, dev_name was the same
# as the symlink name. Now it has 'crypt-' prefix to avoid conflict
# with multipath device symlink. To enable rolling update, we use the
# old name when the encrypted volume already exists.
old_dev_name = os.path.basename(self.symlink_path)
wwn = data.get('multipath_id')
if self._is_crypt_device_available(old_dev_name):
self.dev_name = old_dev_name
LOG.debug("Using old encrypted volume name: %s", self.dev_name)
elif wwn and wwn != old_dev_name:
# FibreChannel device could be named '/dev/mapper/<WWN>'.
if self._is_crypt_device_available(wwn):
self.dev_name = wwn
LOG.debug("Using encrypted volume name from wwn: %s",
self.dev_name)
# the device's actual path on the compute host -- e.g., /dev/sd_
self.dev_path = os.path.realpath(self.symlink_path)
def _is_crypt_device_available(self, dev_name):
if not os.path.exists('/dev/mapper/%s' % dev_name):
return False
try:
self._execute('cryptsetup', 'status', dev_name, run_as_root=True)
except processutils.ProcessExecutionError as e:
# If /dev/mapper/<dev_name> is a non-crypt block device (such as a
# normal disk or multipath device), exit_code will be 1. In the
# case, we will omit the warning message.
if e.exit_code != 1:
LOG.warning('cryptsetup status %(dev_name)s exited '
'abnormally (status %(exit_code)s): %(err)s',
{"dev_name": dev_name, "exit_code": e.exit_code,
"err": e.stderr})
return False
return True
def _get_passphrase(self, key):
"""Convert raw key to string."""
return binascii.hexlify(key).decode('utf-8')
def _open_volume(self, passphrase, **kwargs):
"""Open the LUKS partition on the volume using passphrase.
:param passphrase: the passphrase used to access the volume
"""
LOG.debug("opening encrypted volume %s", self.dev_path)
# NOTE(joel-coffman): cryptsetup will strip trailing newlines from
# input specified on stdin unless --key-file=- is specified.
cmd = ["cryptsetup", "create", "--key-file=-"]
cipher = kwargs.get("cipher", None)
if cipher is not None:
cmd.extend(["--cipher", cipher])
key_size = kwargs.get("key_size", None)
if key_size is not None:
cmd.extend(["--key-size", key_size])
cmd.extend([self.dev_name, self.dev_path])
self._execute(*cmd, process_input=passphrase,
check_exit_code=True, run_as_root=True,
root_helper=self._root_helper)
def _get_mangled_passphrase(self, key):
"""Convert the raw key into a list of unsigned int's and then a string
"""
# NOTE(lyarwood): This replicates the methods used prior to Newton to
# first encode the passphrase as a list of unsigned int's before
# decoding back into a string. This method strips any leading 0's
# of the resulting hex digit pairs, resulting in a different
# passphrase being returned.
encoded_key = array.array('B', key).tolist()
return ''.join(hex(x).replace('0x', '') for x in encoded_key)
def attach_volume(self, context, **kwargs):
"""Shadow the device and pass an unencrypted version to the instance.
Transparent disk encryption is achieved by mounting the volume via
dm-crypt and passing the resulting device to the instance. The
instance is unaware of the underlying encryption due to modifying the
original symbolic link to refer to the device mounted by dm-crypt.
"""
# TODO(lyarwood): Remove this encryptor and refactor the LUKS based
# encryptors in the U release.
versionutils.report_deprecated_feature(
LOG,
"The plain CryptsetupEncryptor is deprecated and will be removed "
"in a future release. Existing users are encouraged to retype "
"any existing volumes using this encryptor to the 'luks' "
"LuksEncryptor or 'luks2' Luks2Encryptor encryptors as soon as "
"possible.")
key = self._get_key(context).get_encoded()
passphrase = self._get_passphrase(key)
try:
self._open_volume(passphrase, **kwargs)
except processutils.ProcessExecutionError as e:
if e.exit_code == 2:
# NOTE(lyarwood): Workaround bug#1633518 by attempting to use
# a mangled passphrase to open the device..
LOG.info("Unable to open %s with the current passphrase, "
"attempting to use a mangled passphrase to open "
"the volume.", self.dev_path)
self._open_volume(self._get_mangled_passphrase(key), **kwargs)
# modify the original symbolic link to refer to the decrypted device
self._execute('ln', '--symbolic', '--force',
'/dev/mapper/%s' % self.dev_name, self.symlink_path,
root_helper=self._root_helper,
run_as_root=True, check_exit_code=True)
def _close_volume(self, **kwargs):
"""Closes the device (effectively removes the dm-crypt mapping)."""
LOG.debug("closing encrypted volume %s", self.dev_path)
# NOTE(mdbooth): remove will return 4 (wrong device specified) if
# the device doesn't exist. We assume here that the caller hasn't
# specified the wrong device, and that it doesn't exist because it
# isn't open. We don't fail in this case in order to make this
# operation idempotent.
self._execute('cryptsetup', 'remove', self.dev_name,
run_as_root=True, check_exit_code=[0, 4],
root_helper=self._root_helper)
def detach_volume(self, **kwargs):
"""Removes the dm-crypt mapping for the device."""
self._close_volume(**kwargs)
| 43.994872 | 78 | 0.630726 |
e6acb439b3e07667b8914ace465fe5857b5b6a9a | 3,359 | py | Python | IPython/frontend/qt/console/tests/test_ansi_code_processor.py | wackywendell/ipyurwid | c4b45eab791bc23aac99e3b6d9b89d0c265bace2 | [
"BSD-3-Clause"
] | 1 | 2016-06-22T11:43:13.000Z | 2016-06-22T11:43:13.000Z | IPython/frontend/qt/console/tests/test_ansi_code_processor.py | 08saikiranreddy/ipython | 3498382180ad409592f46a9dd0d190ca917bfbff | [
"BSD-3-Clause-Clear"
] | null | null | null | IPython/frontend/qt/console/tests/test_ansi_code_processor.py | 08saikiranreddy/ipython | 3498382180ad409592f46a9dd0d190ca917bfbff | [
"BSD-3-Clause-Clear"
] | null | null | null | # Standard library imports
import unittest
# Local imports
from IPython.frontend.qt.console.ansi_code_processor import AnsiCodeProcessor
class TestAnsiCodeProcessor(unittest.TestCase):
def setUp(self):
self.processor = AnsiCodeProcessor()
def testClear(self):
string = '\x1b[2J\x1b[K'
i = -1
for i, substring in enumerate(self.processor.split_string(string)):
if i == 0:
self.assertEquals(len(self.processor.actions), 1)
action = self.processor.actions[0]
self.assertEquals(action.action, 'erase')
self.assertEquals(action.area, 'screen')
self.assertEquals(action.erase_to, 'all')
elif i == 1:
self.assertEquals(len(self.processor.actions), 1)
action = self.processor.actions[0]
self.assertEquals(action.action, 'erase')
self.assertEquals(action.area, 'line')
self.assertEquals(action.erase_to, 'end')
else:
self.fail('Too many substrings.')
self.assertEquals(i, 1, 'Too few substrings.')
def testColors(self):
string = "first\x1b[34mblue\x1b[0mlast"
i = -1
for i, substring in enumerate(self.processor.split_string(string)):
if i == 0:
self.assertEquals(substring, 'first')
self.assertEquals(self.processor.foreground_color, None)
elif i == 1:
self.assertEquals(substring, 'blue')
self.assertEquals(self.processor.foreground_color, 4)
elif i == 2:
self.assertEquals(substring, 'last')
self.assertEquals(self.processor.foreground_color, None)
else:
self.fail('Too many substrings.')
self.assertEquals(i, 2, 'Too few substrings.')
def testScroll(self):
string = '\x1b[5S\x1b[T'
i = -1
for i, substring in enumerate(self.processor.split_string(string)):
if i == 0:
self.assertEquals(len(self.processor.actions), 1)
action = self.processor.actions[0]
self.assertEquals(action.action, 'scroll')
self.assertEquals(action.dir, 'up')
self.assertEquals(action.unit, 'line')
self.assertEquals(action.count, 5)
elif i == 1:
self.assertEquals(len(self.processor.actions), 1)
action = self.processor.actions[0]
self.assertEquals(action.action, 'scroll')
self.assertEquals(action.dir, 'down')
self.assertEquals(action.unit, 'line')
self.assertEquals(action.count, 1)
else:
self.fail('Too many substrings.')
self.assertEquals(i, 1, 'Too few substrings.')
def testSpecials(self):
string = '\f' # form feed
self.assertEquals(list(self.processor.split_string(string)), [''])
self.assertEquals(len(self.processor.actions), 1)
action = self.processor.actions[0]
self.assertEquals(action.action, 'scroll')
self.assertEquals(action.dir, 'down')
self.assertEquals(action.unit, 'page')
self.assertEquals(action.count, 1)
if __name__ == '__main__':
unittest.main()
| 39.517647 | 77 | 0.579041 |
5a72221c5a6b303441a8a54de31c055b41f58a75 | 5,451 | py | Python | nicos_mlz/stressi/setups/monochromator.py | mlz-ictrl/nicos | a6de0bc194ba42e3dc04a033713b41b5499ba8e1 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12 | 2019-11-06T15:40:36.000Z | 2022-01-01T16:23:00.000Z | nicos_mlz/stressi/setups/monochromator.py | ess-dmsc/nicos | 755d61d403ff7123f804c45fc80c7ff4d762993b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91 | 2020-08-18T09:20:26.000Z | 2022-02-01T11:07:14.000Z | nicos_mlz/stressi/setups/monochromator.py | mlz-ictrl/nicos | a6de0bc194ba42e3dc04a033713b41b5499ba8e1 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6 | 2020-01-11T10:52:30.000Z | 2022-02-25T12:35:23.000Z | description = 'Monochromator CARESS HWB Devices'
group = 'lowlevel'
tango_base = 'tango://motorbox04.stressi.frm2.tum.de:10000/box/'
devices = dict(
omgm_m = device('nicos.devices.entangle.Motor',
tangodevice = tango_base + 'channel1/motor',
fmtstr = '%.2f',
lowlevel = True,
requires = {'level': 'admin'},
),
omgm_c = device('nicos.devices.entangle.Sensor',
tangodevice = tango_base + 'channel1/coder',
fmtstr = '%.2f',
lowlevel = True,
),
omgm = device('nicos.devices.generic.Axis',
description = 'OMGM',
motor = 'omgm_m',
coder = 'omgm_c',
precision = 0.005,
),
transm_m = device('nicos.devices.entangle.Motor',
tangodevice = tango_base + 'channel2/motor',
fmtstr = '%.3f',
userlimits = (-200, 200),
lowlevel = True,
requires = {'level': 'admin'},
),
transm_c = device('nicos.devices.entangle.Sensor',
tangodevice = tango_base + 'channel2/coder',
fmtstr = '%.3f',
lowlevel = True,
),
transm_a = device('nicos.devices.generic.Axis',
description = 'HWB TRANSM',
motor = 'transm_m',
coder = 'transm_c',
precision = 0.001,
lowlevel = True,
),
transm = device('nicos.devices.generic.Switcher',
description = 'Monochromator changer',
moveable = 'transm_a',
mapping = {
'Si': 0.292,
'PG': 30.292,
'Ge': 60.292,
},
precision = 0.01,
unit = '',
requires = {'level': 'admin'},
),
# tthm = device('nicos.devices.entangle.Motor',
tthm = device('nicos.devices.generic.ManualMove',
description = 'Take off angle',
default = 84.0,
# tangodevice = tangobase + '/',
fmtstr = '%.2f',
unit = 'deg',
abslimits = (50., 100.),
),
tthm_r = device('nicos_mlz.stressi.devices.wavelength.TransformedMoveable',
description = 'Base hardware device',
dev = 'tthm',
informula = '1./0.956 * x - 11.5 / 0.956',
outformula = '0.956 * x + 11.5',
precision = 0.001,
lowlevel = True,
),
wav = device('nicos_mlz.stressi.devices.wavelength.Wavelength',
description = 'The incoming wavelength',
omgm = 'omgm',
crystal = 'transm',
base = 'tthm_r',
plane = '100',
unit = 'AA',
fmtstr = '%.2f',
abslimits = (0.9, 2.5),
requires = {'level': 'admin'},
),
chim_si_m = device('nicos.devices.entangle.Motor',
tangodevice = tango_base + 'channel6/motor',
fmtstr = '%.2f',
lowlevel = True,
),
chim_si_c = device('nicos.devices.entangle.Sensor',
tangodevice = tango_base + 'channel6/coder',
fmtstr = '%.2f',
lowlevel = True,
),
chim_si = device('nicos.devices.generic.Axis',
description = 'CHI Si monochromator',
motor = 'chim_si_m',
coder = 'chim_si_c',
fmtstr = '%.2f',
precision = 0.01,
),
chim_pg_m = device('nicos.devices.entangle.Motor',
tangodevice = tango_base + 'channel7/motor',
fmtstr = '%.2f',
lowlevel = True,
),
chim_pg_c = device('nicos.devices.entangle.Sensor',
tangodevice = tango_base + 'channel7/coder',
fmtstr = '%.2f',
lowlevel = True,
),
chim_pg = device('nicos.devices.generic.Axis',
description = 'CHI PG monochromator',
motor = 'chim_pg_m',
coder = 'chim_pg_c',
fmtstr = '%.2f',
precision = 0.01,
),
chim_ge_m = device('nicos.devices.entangle.Motor',
tangodevice = tango_base + 'channel8/motor',
fmtstr = '%.2f',
lowlevel = True,
),
chim_ge_c = device('nicos.devices.entangle.Sensor',
tangodevice = tango_base + 'channel8/coder',
fmtstr = '%.2f',
lowlevel = True,
),
chim_ge = device('nicos.devices.generic.Axis',
description = 'CHI Ge monochromator',
motor = 'chim_ge_m',
coder = 'chim_ge_c',
fmtstr = '%.2f',
precision = 0.01,
),
foc_si_m = device('nicos.devices.entangle.Motor',
tangodevice = tango_base + 'channel3/motor',
fmtstr = '%.2f',
lowlevel = True,
),
foc_si = device('nicos.devices.generic.Axis',
description = 'Horizontal focus Si monochromator',
motor = 'foc_si_m',
fmtstr = '%.1f',
precision = 0.1,
),
foc_pg_m = device('nicos.devices.entangle.Motor',
tangodevice = tango_base + 'channel4/motor',
fmtstr = '%.2f',
lowlevel = True,
),
foc_pg = device('nicos.devices.generic.Axis',
description = 'Vertical focus PG monochromator',
motor = 'foc_pg_m',
fmtstr = '%.1f',
precision = 0.1,
),
foc_ge_m = device('nicos.devices.entangle.Motor',
tangodevice = tango_base + 'channel5/motor',
fmtstr = '%.2f',
lowlevel = True,
),
foc_ge_c = device('nicos.devices.entangle.Sensor',
tangodevice = tango_base + 'channel5/coder',
fmtstr = '%.2f',
lowlevel = True,
),
foc_ge = device('nicos.devices.generic.Axis',
description = 'Vertical focus Ge monochromator',
motor = 'foc_ge_m',
coder = 'foc_ge_c',
fmtstr = '%.1f',
precision = 0.1,
),
)
| 31.148571 | 79 | 0.545588 |
dce719b520bb21f21a27fda10f9e9d5ac9a7b954 | 3,945 | py | Python | StandardDataSets/1_5/collada/library_effects/effect/profile_COMMON/technique/constant/transparent/effect_constant_transparent_default/effect_constant_transparent_default.py | KhronosGroup/COLLADA-CTS | 61f2a560cbb2a06ee62da8025241f6b08d06bfd9 | [
"MIT"
] | 20 | 2015-03-19T08:02:57.000Z | 2020-10-16T15:16:11.000Z | StandardDataSets/collada/library_effects/effect/profile_COMMON/technique/constant/transparent/effect_constant_transparent_default/effect_constant_transparent_default.py | Acidburn0zzz/COLLADA-CTS | 39a36188cf8710bbc003df43ed70b965eb4386bd | [
"MIT"
] | 4 | 2017-04-19T18:42:05.000Z | 2017-06-17T03:03:28.000Z | StandardDataSets/collada/library_effects/effect/profile_COMMON/technique/constant/transparent/effect_constant_transparent_default/effect_constant_transparent_default.py | Acidburn0zzz/COLLADA-CTS | 39a36188cf8710bbc003df43ed70b965eb4386bd | [
"MIT"
] | 10 | 2015-03-26T02:52:24.000Z | 2022-02-24T08:43:48.000Z |
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
# This sample judging object does the following:
#
# JudgeBaseline: just verifies that the standard steps did not crash.
# JudgeSuperior: also verifies that the validation steps are not in error.
# JudgeExemplary: same as intermediate badge.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLst = []
attrName = ''
attrVal = ''
dataToCheck = ''
class SimpleJudgingObject:
def __init__(self, _tagLst, _attrName, _attrVal, _data):
self.tagList = _tagLst
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
def JudgeBaseline(self, context):
# No step should not crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], ["Render"])
if (self.__assistant.GetResults() == False):
self.status_baseline = False
return False
# Compare the rendered images between import and export
# Then compare images against reference test for non equivalence
if ( self.__assistant.CompareRenderedImages(context) ):
self.__assistant.CompareImagesAgainst(context, "effect_constant_transparent_a_one", None, None, 5, True, True)
self.status_baseline = self.__assistant.DeferJudgement(context)
return self.status_baseline
# To pass intermediate you need to pass basic, this object could also include additional
# tests that were specific to the intermediate badge.
def JudgeSuperior(self, context):
self.status_superior = self.status_baseline
return self.status_superior
# To pass advanced you need to pass intermediate, this object could also include additional
# tests that were specific to the advanced badge
def JudgeExemplary(self, context):
self.status_exemplary = self.status_superior
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
| 52.6 | 467 | 0.72294 |
3b4c90e5511aea9d541a73fb6a5a0a52b6fa236a | 1,855 | py | Python | lib/serialize.py | scudette/grr | d4257c5259af881e28a7d62e9837fa13352e2bf6 | [
"Apache-2.0"
] | 6 | 2015-04-03T02:25:28.000Z | 2021-11-17T21:42:59.000Z | lib/serialize.py | defaultnamehere/grr | ba1648b97a76f844ffb8e1891cc9e2680f9b1c6e | [
"Apache-2.0"
] | null | null | null | lib/serialize.py | defaultnamehere/grr | ba1648b97a76f844ffb8e1891cc9e2680f9b1c6e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""This module serializes AFF4 objects in various ways."""
import yaml
from grr.lib import aff4
from grr.lib import rdfvalue
def YamlDumper(aff4object):
"""Dumps the given aff4object into a yaml representation."""
aff4object.Flush()
result = {}
for attribute, values in aff4object.synced_attributes.items():
result[attribute.predicate] = []
for value in values:
# This value is really a LazyDecoder() instance. We need to get at the
# real data here.
value = value.ToRDFValue()
result[attribute.predicate].append(
[value.__class__.__name__, value.SerializeToString(), str(value.age)])
return yaml.dump(dict(
aff4_class=aff4object.__class__.__name__,
_urn=aff4object.urn.SerializeToString(),
attributes=result,
age_policy=aff4object.age_policy,
))
def YamlLoader(string):
"""Load an AFF4 object from a serialized YAML representation."""
representation = yaml.load(string)
result_cls = aff4.FACTORY.AFF4Object(representation["aff4_class"])
aff4_attributes = {}
for predicate, values in representation["attributes"].items():
attribute = aff4.Attribute.PREDICATES[predicate]
tmp = aff4_attributes[attribute] = []
for rdfvalue_cls_name, value, age in values:
rdfvalue_cls = aff4.FACTORY.RDFValue(rdfvalue_cls_name)
value = rdfvalue_cls(value, age=rdfvalue.RDFDatetime(age))
tmp.append(value)
# Ensure the object is dirty so when we save it, it can be written to the data
# store.
result = result_cls(urn=representation["_urn"],
clone=aff4_attributes, mode="rw",
age=representation["age_policy"])
result.new_attributes, result.synced_attributes = result.synced_attributes, {}
result._dirty = True # pylint: disable=protected-access
return result
| 31.982759 | 80 | 0.702965 |
4d14653b91366e8c87a7fba7e8e938463e096660 | 1,043 | py | Python | python_module/test/unit/core/test_zeros_ones.py | stoneMo/MegEngine | 4b55350dd0a5e7d9aef3c25f10c4b6c0d47f7ac7 | [
"Apache-2.0"
] | 2 | 2021-09-25T15:33:37.000Z | 2022-02-08T11:06:35.000Z | python_module/test/unit/core/test_zeros_ones.py | ted51/MegEngine | f91881ffdc051ab49314b1bd12c4a07a862dc9c6 | [
"Apache-2.0"
] | null | null | null | python_module/test/unit/core/test_zeros_ones.py | ted51/MegEngine | f91881ffdc051ab49314b1bd12c4a07a862dc9c6 | [
"Apache-2.0"
] | 1 | 2020-11-09T06:29:51.000Z | 2020-11-09T06:29:51.000Z | # -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine.test import assertTensorClose
def test_zeros():
assertTensorClose(
mge.zeros((2, 2), dtype=np.int32).numpy(), np.zeros((2, 2), dtype=np.int32)
)
assertTensorClose(
mge.zeros(mge.tensor([2, 2], dtype=np.int32), dtype=np.int32).numpy(),
np.zeros((2, 2), dtype=np.int32),
)
def test_ones():
assertTensorClose(
mge.ones((2, 2), dtype=np.int32).numpy(), np.ones((2, 2), dtype=np.int32)
)
assertTensorClose(
mge.ones(mge.tensor([2, 2], dtype=np.int32), dtype=np.int32).numpy(),
np.ones((2, 2), dtype=np.int32),
)
| 28.972222 | 88 | 0.661553 |
53534bed9d2d5de84b6e818e5de9b2f86e63dcf5 | 974 | py | Python | content/code/python/fizzbuzz.py | transferorbit/coderefinery_testing | b1011345cd6ed614e702b372bd2d987521bba130 | [
"CC-BY-4.0"
] | 8 | 2019-03-11T12:36:25.000Z | 2021-09-30T00:32:11.000Z | content/code/python/fizzbuzz.py | transferorbit/coderefinery_testing | b1011345cd6ed614e702b372bd2d987521bba130 | [
"CC-BY-4.0"
] | 126 | 2016-12-13T11:12:23.000Z | 2022-03-30T20:17:17.000Z | content/code/python/fizzbuzz.py | transferorbit/coderefinery_testing | b1011345cd6ed614e702b372bd2d987521bba130 | [
"CC-BY-4.0"
] | 37 | 2016-12-13T11:00:28.000Z | 2022-01-18T13:53:07.000Z | import pytest
def fizzbuzz(number):
if not isinstance(number, int):
raise TypeError
if number < 1:
raise ValueError
elif number % 15 == 0:
return "FizzBuzz"
elif number % 3 == 0:
return "Fizz"
elif number % 5 == 0:
return "Buzz"
else:
return number
def test_fizzbuzz():
expected_result = [1, 2, "Fizz", 4, "Buzz", "Fizz",
7, 8, "Fizz", "Buzz", 11, "Fizz",
13, 14, "FizzBuzz", 16, 17, "Fizz", 19, "Buzz"]
obtained_result = [fizzbuzz(i) for i in range(1, 21)]
assert obtained_result == expected_result
with pytest.raises(ValueError):
fizzbuzz(-5)
with pytest.raises(ValueError):
fizzbuzz(0)
with pytest.raises(TypeError):
fizzbuzz(1.5)
with pytest.raises(TypeError):
fizzbuzz("rabbit")
def main():
for i in range(1, 100):
print(fizzbuzz(i))
if __name__ == "__main__":
main()
| 23.756098 | 70 | 0.556468 |
d207b319b66dd41c9e6fbec9a707c84087815b3e | 22,004 | py | Python | python/infernal_cmscan.py | SamFent/webservice-clients | b4c1ab0d4e0535cc8e79a0d5e731aaafef3193f2 | [
"Apache-2.0"
] | 1 | 2020-06-04T12:31:32.000Z | 2020-06-04T12:31:32.000Z | python/infernal_cmscan.py | SamFent/webservice-clients | b4c1ab0d4e0535cc8e79a0d5e731aaafef3193f2 | [
"Apache-2.0"
] | 1 | 2020-06-04T07:48:54.000Z | 2020-06-04T07:48:54.000Z | python/infernal_cmscan.py | SamFent/webservice-clients | b4c1ab0d4e0535cc8e79a0d5e731aaafef3193f2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright 2012-2018 EMBL - European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Python Client Automatically generated with:
# https://github.com/ebi-wp/webservice-clients-generator
#
# Infernal CM Scan (REST) web service Python client using xmltramp2.
#
# For further information see:
# https://www.ebi.ac.uk/Tools/webservices/
#
###############################################################################
from __future__ import print_function
import os
import sys
import time
import requests
import platform
from xmltramp2 import xmltramp
from optparse import OptionParser
try:
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
from urllib.request import __version__ as urllib_version
except ImportError:
from urlparse import urlparse
from urllib import urlencode
from urllib2 import urlopen, Request, HTTPError
from urllib2 import __version__ as urllib_version
# allow unicode(str) to be used in python 3
try:
unicode('')
except NameError:
unicode = str
# Base URL for service
baseUrl = u'https://www.ebi.ac.uk/Tools/services/rest/infernal_cmscan'
version = u'2019-07-03 12:51'
# Set interval for checking status
pollFreq = 3
# Output level
outputLevel = 1
# Debug level
debugLevel = 0
# Number of option arguments.
numOpts = len(sys.argv)
# Process command-line options
parser = OptionParser(add_help_option=False)
# Tool specific options (Try to print all the commands automatically)
parser.add_option('--thresholdmodel', type=str, help=('Model-specific reporting thresholds.'))
parser.add_option('--sequence', type=str, help=('The input sequence can be entered directly into this form. The'
'sequence can be in GCG, FASTA, EMBL, GenBank, PIR, NBRF or PHYLIP'
'format. A partially formatted sequence is not accepted. Adding a'
'return to the end of the sequence may help certain applications'
'understand the input. Note that directly using data from word'
'processors may yield unpredictable results as hidden/control'
'characters may be present.'))
# General options
parser.add_option('-h', '--help', action='store_true', help='Show this help message and exit.')
parser.add_option('--email', help='E-mail address.')
parser.add_option('--title', help='Job title.')
parser.add_option('--outfile', help='File name for results.')
parser.add_option('--outformat', help='Output format for results.')
parser.add_option('--asyncjob', action='store_true', help='Asynchronous mode.')
parser.add_option('--jobid', help='Job identifier.')
parser.add_option('--polljob', action="store_true", help='Get job result.')
parser.add_option('--pollFreq', type='int', default=3, help='Poll frequency in seconds (default 3s).')
parser.add_option('--status', action="store_true", help='Get job status.')
parser.add_option('--resultTypes', action='store_true', help='Get result types.')
parser.add_option('--params', action='store_true', help='List input parameters.')
parser.add_option('--paramDetail', help='Get details for parameter.')
parser.add_option('--quiet', action='store_true', help='Decrease output level.')
parser.add_option('--verbose', action='store_true', help='Increase output level.')
parser.add_option('--version', action='store_true', help='Prints out the version of the Client and exit.')
parser.add_option('--debugLevel', type='int', default=debugLevel, help='Debugging level.')
parser.add_option('--baseUrl', default=baseUrl, help='Base URL for service.')
(options, args) = parser.parse_args()
# Increase output level
if options.verbose:
outputLevel += 1
# Decrease output level
if options.quiet:
outputLevel -= 1
# Debug level
if options.debugLevel:
debugLevel = options.debugLevel
if options.pollFreq:
pollFreq = options.pollFreq
if options.baseUrl:
baseUrl = options.baseUrl
# Debug print
def printDebugMessage(functionName, message, level):
if (level <= debugLevel):
print(u'[' + functionName + u'] ' + message, file=sys.stderr)
# User-agent for request (see RFC2616).
def getUserAgent():
printDebugMessage(u'getUserAgent', u'Begin', 11)
# Agent string for urllib2 library.
urllib_agent = u'Python-urllib/%s' % urllib_version
clientRevision = version
# Prepend client specific agent string.
try:
pythonversion = platform.python_version()
pythonsys = platform.system()
except ValueError:
pythonversion, pythonsys = "Unknown", "Unknown"
user_agent = u'EBI-Sample-Client/%s (%s; Python %s; %s) %s' % (
clientRevision, os.path.basename(__file__),
pythonversion, pythonsys, urllib_agent)
printDebugMessage(u'getUserAgent', u'user_agent: ' + user_agent, 12)
printDebugMessage(u'getUserAgent', u'End', 11)
return user_agent
# Wrapper for a REST (HTTP GET) request
def restRequest(url):
printDebugMessage(u'restRequest', u'Begin', 11)
printDebugMessage(u'restRequest', u'url: ' + url, 11)
try:
# Set the User-agent.
user_agent = getUserAgent()
http_headers = {u'User-Agent': user_agent}
req = Request(url, None, http_headers)
# Make the request (HTTP GET).
reqH = urlopen(req)
resp = reqH.read()
contenttype = reqH.info()
if (len(resp) > 0 and contenttype != u"image/png;charset=UTF-8"
and contenttype != u"image/jpeg;charset=UTF-8"
and contenttype != u"application/gzip;charset=UTF-8"):
try:
result = unicode(resp, u'utf-8')
except UnicodeDecodeError:
result = resp
else:
result = resp
reqH.close()
# Errors are indicated by HTTP status codes.
except HTTPError as ex:
result = requests.get(url).content
printDebugMessage(u'restRequest', u'End', 11)
return result
# Get input parameters list
def serviceGetParameters():
printDebugMessage(u'serviceGetParameters', u'Begin', 1)
requestUrl = baseUrl + u'/parameters'
printDebugMessage(u'serviceGetParameters', u'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage(u'serviceGetParameters', u'End', 1)
return doc[u'id':]
# Print list of parameters
def printGetParameters():
printDebugMessage(u'printGetParameters', u'Begin', 1)
idList = serviceGetParameters()
for id_ in idList:
print(id_)
printDebugMessage(u'printGetParameters', u'End', 1)
# Get input parameter information
def serviceGetParameterDetails(paramName):
printDebugMessage(u'serviceGetParameterDetails', u'Begin', 1)
printDebugMessage(u'serviceGetParameterDetails', u'paramName: ' + paramName, 2)
requestUrl = baseUrl + u'/parameterdetails/' + paramName
printDebugMessage(u'serviceGetParameterDetails', u'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage(u'serviceGetParameterDetails', u'End', 1)
return doc
# Print description of a parameter
def printGetParameterDetails(paramName):
printDebugMessage(u'printGetParameterDetails', u'Begin', 1)
doc = serviceGetParameterDetails(paramName)
print(unicode(doc.name) + u"\t" + unicode(doc.type))
print(doc.description)
if hasattr(doc, 'values'):
for value in doc.values:
print(value.value)
if unicode(value.defaultValue) == u'true':
print(u'default')
print(u"\t" + unicode(value.label))
if hasattr(value, u'properties'):
for wsProperty in value.properties:
print(u"\t" + unicode(wsProperty.key) + u"\t" + unicode(wsProperty.value))
printDebugMessage(u'printGetParameterDetails', u'End', 1)
# Submit job
def serviceRun(email, title, params):
printDebugMessage(u'serviceRun', u'Begin', 1)
# Insert e-mail and title into params
params[u'email'] = email
if title:
params[u'title'] = title
requestUrl = baseUrl + u'/run/'
printDebugMessage(u'serviceRun', u'requestUrl: ' + requestUrl, 2)
# Get the data for the other options
requestData = urlencode(params)
printDebugMessage(u'serviceRun', u'requestData: ' + requestData, 2)
# Errors are indicated by HTTP status codes.
try:
# Set the HTTP User-agent.
user_agent = getUserAgent()
http_headers = {u'User-Agent': user_agent}
req = Request(requestUrl, None, http_headers)
# Make the submission (HTTP POST).
reqH = urlopen(req, requestData.encode(encoding=u'utf_8', errors=u'strict'))
jobId = unicode(reqH.read(), u'utf-8')
reqH.close()
except HTTPError as ex:
print(xmltramp.parse(unicode(ex.read(), u'utf-8'))[0][0])
quit()
printDebugMessage(u'serviceRun', u'jobId: ' + jobId, 2)
printDebugMessage(u'serviceRun', u'End', 1)
return jobId
# Get job status
def serviceGetStatus(jobId):
printDebugMessage(u'serviceGetStatus', u'Begin', 1)
printDebugMessage(u'serviceGetStatus', u'jobId: ' + jobId, 2)
requestUrl = baseUrl + u'/status/' + jobId
printDebugMessage(u'serviceGetStatus', u'requestUrl: ' + requestUrl, 2)
status = restRequest(requestUrl)
printDebugMessage(u'serviceGetStatus', u'status: ' + status, 2)
printDebugMessage(u'serviceGetStatus', u'End', 1)
return status
# Print the status of a job
def printGetStatus(jobId):
printDebugMessage(u'printGetStatus', u'Begin', 1)
status = serviceGetStatus(jobId)
if outputLevel > 0:
print("Getting status for job %s" % jobId)
print(status)
if outputLevel > 0 and status == "FINISHED":
print("To get results: python %s --polljob --jobid %s"
"" % (os.path.basename(__file__), jobId))
printDebugMessage(u'printGetStatus', u'End', 1)
# Get available result types for job
def serviceGetResultTypes(jobId):
printDebugMessage(u'serviceGetResultTypes', u'Begin', 1)
printDebugMessage(u'serviceGetResultTypes', u'jobId: ' + jobId, 2)
requestUrl = baseUrl + u'/resulttypes/' + jobId
printDebugMessage(u'serviceGetResultTypes', u'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage(u'serviceGetResultTypes', u'End', 1)
return doc[u'type':]
# Print list of available result types for a job.
def printGetResultTypes(jobId):
printDebugMessage(u'printGetResultTypes', u'Begin', 1)
if outputLevel > 0:
print("Getting result types for job %s" % jobId)
resultTypeList = serviceGetResultTypes(jobId)
if outputLevel > 0:
print("Available result types:")
for resultType in resultTypeList:
print(resultType[u'identifier'])
if hasattr(resultType, u'label'):
print(u"\t", resultType[u'label'])
if hasattr(resultType, u'description'):
print(u"\t", resultType[u'description'])
if hasattr(resultType, u'mediaType'):
print(u"\t", resultType[u'mediaType'])
if hasattr(resultType, u'fileSuffix'):
print(u"\t", resultType[u'fileSuffix'])
if outputLevel > 0:
print("To get results:\n python %s --polljob --jobid %s\n"
" python %s --polljob --outformat <type> --jobid %s"
"" % (os.path.basename(__file__), jobId,
os.path.basename(__file__), jobId))
printDebugMessage(u'printGetResultTypes', u'End', 1)
# Get result
def serviceGetResult(jobId, type_):
printDebugMessage(u'serviceGetResult', u'Begin', 1)
printDebugMessage(u'serviceGetResult', u'jobId: ' + jobId, 2)
printDebugMessage(u'serviceGetResult', u'type_: ' + type_, 2)
requestUrl = baseUrl + u'/result/' + jobId + u'/' + type_
result = restRequest(requestUrl)
printDebugMessage(u'serviceGetResult', u'End', 1)
return result
# Client-side poll
def clientPoll(jobId):
printDebugMessage(u'clientPoll', u'Begin', 1)
result = u'PENDING'
while result == u'RUNNING' or result == u'PENDING':
result = serviceGetStatus(jobId)
if outputLevel > 0:
print(result)
if result == u'RUNNING' or result == u'PENDING':
time.sleep(pollFreq)
printDebugMessage(u'clientPoll', u'End', 1)
# Get result for a jobid
# Allows more than one output file written when 'outformat' is defined.
def getResult(jobId):
printDebugMessage(u'getResult', u'Begin', 1)
printDebugMessage(u'getResult', u'jobId: ' + jobId, 1)
if outputLevel > 1:
print("Getting results for job %s" % jobId)
# Check status and wait if necessary
clientPoll(jobId)
# Get available result types
resultTypes = serviceGetResultTypes(jobId)
for resultType in resultTypes:
# Derive the filename for the result
if options.outfile:
filename = (options.outfile + u'.' + unicode(resultType[u'identifier']) +
u'.' + unicode(resultType[u'fileSuffix']))
else:
filename = (jobId + u'.' + unicode(resultType[u'identifier']) +
u'.' + unicode(resultType[u'fileSuffix']))
# Write a result file
outformat_parm = str(options.outformat).split(',')
for outformat_type in outformat_parm:
outformat_type = outformat_type.replace(' ', '')
if outformat_type == 'None':
outformat_type = None
if not outformat_type or outformat_type == unicode(resultType[u'identifier']):
if outputLevel > 1:
print("Getting %s" % unicode(resultType[u'identifier']))
# Get the result
result = serviceGetResult(jobId, unicode(resultType[u'identifier']))
if (unicode(resultType[u'mediaType']) == u"image/png"
or unicode(resultType[u'mediaType']) == u"image/jpeg"
or unicode(resultType[u'mediaType']) == u"application/gzip"):
fmode = 'wb'
else:
fmode = 'w'
try:
fh = open(filename, fmode)
fh.write(result)
fh.close()
except TypeError:
fh.close()
fh = open(filename, "wb")
fh.write(result)
fh.close()
if outputLevel > 0:
print("Creating result file: " + filename)
printDebugMessage(u'getResult', u'End', 1)
# Read a file
def readFile(filename):
printDebugMessage(u'readFile', u'Begin', 1)
fh = open(filename, 'r')
data = fh.read()
fh.close()
printDebugMessage(u'readFile', u'End', 1)
return data
def print_usage():
print("""\
EMBL-EBI Infernal CM Scan Python Client:
RNA analysis with Infernal CM Scan.
[Required (for job submission)]
--email E-mail address.
--sequence The input sequence can be entered directly into this form.
The sequence can be in GCG, FASTA, EMBL, GenBank, PIR, NBRF
or PHYLIP format. A partially formatted sequence is not
accepted. Adding a return to the end of the sequence may
help certain applications understand the input. Note that
directly using data from word processors may yield
unpredictable results as hidden/control characters may be
present.
[Optional]
--thresholdmodel Model-specific reporting thresholds.
[General]
-h, --help Show this help message and exit.
--asyncjob Forces to make an asynchronous query.
--title Title for job.
--status Get job status.
--resultTypes Get available result types for job.
--polljob Poll for the status of a job.
--pollFreq Poll frequency in seconds (default 3s).
--jobid JobId that was returned when an asynchronous job was submitted.
--outfile File name for results (default is JobId; for STDOUT).
--outformat Result format(s) to retrieve. It accepts comma-separated values.
--params List input parameters.
--paramDetail Display details for input parameter.
--verbose Increase output.
--version Prints out the version of the Client and exit.
--quiet Decrease output.
--baseUrl Base URL. Defaults to:
https://www.ebi.ac.uk/Tools/services/rest/infernal_cmscan
Synchronous job:
The results/errors are returned as soon as the job is finished.
Usage: python infernal_cmscan.py --email <your@email.com> [options...] <SeqFile|SeqID(s)>
Returns: results as an attachment
Asynchronous job:
Use this if you want to retrieve the results at a later time. The results
are stored for up to 24 hours.
Usage: python infernal_cmscan.py --asyncjob --email <your@email.com> [options...] <SeqFile|SeqID(s)>
Returns: jobid
Check status of Asynchronous job:
Usage: python infernal_cmscan.py --status --jobid <jobId>
Retrieve job data:
Use the jobid to query for the status of the job. If the job is finished,
it also returns the results/errors.
Usage: python infernal_cmscan.py --polljob --jobid <jobId> [--outfile string]
Returns: string indicating the status of the job and if applicable, results
as an attachment.
Further information:
https://www.ebi.ac.uk/Tools/webservices and
https://github.com/ebi-wp/webservice-clients
Support/Feedback:
https://www.ebi.ac.uk/support/""")
# No options... print help.
if numOpts < 2:
print_usage()
elif options.help:
print_usage()
# List parameters
elif options.params:
printGetParameters()
# Get parameter details
elif options.paramDetail:
printGetParameterDetails(options.paramDetail)
# Print Client version
elif options.version:
print("Revision: %s" % version)
sys.exit()
# Submit job
elif options.email and not options.jobid:
params = {}
if len(args) == 1 and "true" not in args and "false" not in args:
if os.path.exists(args[0]): # Read file into content
params[u'sequence'] = readFile(args[0])
else: # Argument is a sequence id
params[u'sequence'] = args[0]
elif len(args) == 2 and "true" not in args and "false" not in args:
if os.path.exists(args[0]) and os.path.exists(args[1]): # Read file into content
params[u'asequence'] = readFile(args[0])
params[u'bsequence'] = readFile(args[1])
else: # Argument is a sequence id
params[u'asequence'] = args[0]
params[u'bsequence'] = args[0]
elif hasattr(options, "sequence") or (hasattr(options, "asequence") and hasattr(options, "bsequence")): # Specified via option
if hasattr(options, "sequence"):
if os.path.exists(options.sequence): # Read file into content
params[u'sequence'] = readFile(options.sequence)
else: # Argument is a sequence id
params[u'sequence'] = options.sequence
elif hasattr(options, "asequence") and hasattr(options, "bsequence"):
if os.path.exists(options.asequence) and os.path.exists(options.bsequence): # Read file into content
params[u'asequence'] = readFile(options.asequence)
params[u'bsequence'] = readFile(options.bsequence)
else: # Argument is a sequence id
params[u'asequence'] = options.asequence
params[u'bsequence'] = options.bsequence
# Pass default values and fix bools (without default value)
if not options.thresholdmodel:
params['thresholdmodel'] = 'cut_ga'
if options.thresholdmodel:
params['thresholdmodel'] = options.thresholdmodel
# Submit the job
jobId = serviceRun(options.email, options.title, params)
if options.asyncjob: # Async mode
print(jobId)
if outputLevel > 0:
print("To check status: python %s --status --jobid %s"
"" % (os.path.basename(__file__), jobId))
else:
# Sync mode
if outputLevel > 0:
print("JobId: " + jobId, file=sys.stderr)
else:
print(jobId)
time.sleep(pollFreq)
getResult(jobId)
# Get job status
elif options.jobid and options.status:
printGetStatus(options.jobid)
elif options.jobid and (options.resultTypes or options.polljob):
status = serviceGetStatus(options.jobid)
if status == 'PENDING' or status == 'RUNNING':
print("Error: Job status is %s. "
"To get result types the job must be finished." % status)
quit()
# List result types for job
if options.resultTypes:
printGetResultTypes(options.jobid)
# Get results for job
elif options.polljob:
getResult(options.jobid)
else:
# Checks for 'email' parameter
if not options.email:
print('\nParameter "--email" is missing in your command. It is required!\n')
print(u'Error: unrecognised argument combination', file=sys.stderr)
print_usage()
| 38.334495 | 131 | 0.647928 |
748eadf58002ff3b772e775c841222885b270ad5 | 1,990 | py | Python | n_utils/ecr_utils.py | MarkusNousiainenWebscale/nameless-deploy-tools | f39060d4c6835f1d2236d5ce236d7da09318e01e | [
"Apache-2.0"
] | null | null | null | n_utils/ecr_utils.py | MarkusNousiainenWebscale/nameless-deploy-tools | f39060d4c6835f1d2236d5ce236d7da09318e01e | [
"Apache-2.0"
] | null | null | null | n_utils/ecr_utils.py | MarkusNousiainenWebscale/nameless-deploy-tools | f39060d4c6835f1d2236d5ce236d7da09318e01e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import base64
# Copyright 2016-2017 Nitor Creations Oy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import str
from botocore.exceptions import ClientError
from threadlocal_aws.clients import ecr
def ensure_repo(name):
repo = None
try:
repo_resp = ecr().describe_repositories(repositoryNames=[name])
if 'repositories' in repo_resp:
repo = repo_resp['repositories'][0]
except ClientError:
repo_resp = ecr().create_repository(repositoryName=name)
if 'repository' in repo_resp:
repo = repo_resp['repository']
if not repo:
raise Exception("Failed to find or create repo")
print("REPO=\"" + repo['repositoryUri'] + "\"")
token_resp = ecr().get_authorization_token(registryIds=[repo['registryId']])
if 'authorizationData' in token_resp:
auth_data = token_resp['authorizationData'][0]
full_token = base64.b64decode(auth_data['authorizationToken']).decode("utf-8").split(":")
user = full_token[0]
token = full_token[1]
print("docker login -u " + user + " -p " + token + " " + auth_data['proxyEndpoint'])
def repo_uri(name):
repo_resp = ecr().describe_repositories(repositoryNames=[name])
if 'repositories' in repo_resp and len(repo_resp['repositories']) > 0 and \
'repositoryUri' in repo_resp['repositories'][0]:
return str(repo_resp['repositories'][0]['repositoryUri'])
else:
return None
| 38.269231 | 97 | 0.693467 |
786f5232b2636b8835a6b0f90c8e4423855c8b88 | 2,330 | py | Python | synapse/replication/http/login.py | V02460/synapse | 782dd72037cf71fb3f9e4922b07c56df2f59de75 | [
"Apache-2.0"
] | null | null | null | synapse/replication/http/login.py | V02460/synapse | 782dd72037cf71fb3f9e4922b07c56df2f59de75 | [
"Apache-2.0"
] | null | null | null | synapse/replication/http/login.py | V02460/synapse | 782dd72037cf71fb3f9e4922b07c56df2f59de75 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2019 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from twisted.internet import defer
from synapse.http.servlet import parse_json_object_from_request
from synapse.replication.http._base import ReplicationEndpoint
logger = logging.getLogger(__name__)
class RegisterDeviceReplicationServlet(ReplicationEndpoint):
"""Ensure a device is registered, generating a new access token for the
device.
Used during registration and login.
"""
NAME = "device_check_registered"
PATH_ARGS = ("user_id",)
def __init__(self, hs):
super(RegisterDeviceReplicationServlet, self).__init__(hs)
self.registration_handler = hs.get_registration_handler()
@staticmethod
def _serialize_payload(user_id, device_id, initial_display_name, is_guest):
"""
Args:
device_id (str|None): Device ID to use, if None a new one is
generated.
initial_display_name (str|None)
is_guest (bool)
"""
return {
"device_id": device_id,
"initial_display_name": initial_display_name,
"is_guest": is_guest,
}
@defer.inlineCallbacks
def _handle_request(self, request, user_id):
content = parse_json_object_from_request(request)
device_id = content["device_id"]
initial_display_name = content["initial_display_name"]
is_guest = content["is_guest"]
device_id, access_token = yield self.registration_handler.register_device(
user_id, device_id, initial_display_name, is_guest
)
return 200, {"device_id": device_id, "access_token": access_token}
def register_servlets(hs, http_server):
RegisterDeviceReplicationServlet(hs).register(http_server)
| 32.361111 | 82 | 0.704292 |
ff23d3e29c767fcde0cf4ba60e413663c08b9702 | 538 | py | Python | workflow/torch/to_shapes.py | Aiwizo/ml-workflow | 88e104fce571dd3b76914626a52f9001342c07cc | [
"Apache-2.0"
] | 4 | 2020-09-23T15:39:24.000Z | 2021-09-12T22:11:00.000Z | workflow/torch/to_shapes.py | Aiwizo/ml-workflow | 88e104fce571dd3b76914626a52f9001342c07cc | [
"Apache-2.0"
] | 4 | 2020-09-23T15:07:39.000Z | 2020-10-30T10:26:24.000Z | workflow/torch/to_shapes.py | Aiwizo/ml-workflow | 88e104fce571dd3b76914626a52f9001342c07cc | [
"Apache-2.0"
] | null | null | null | import torch
def to_shapes(*args):
'''
Helper function to convert nested dict / list of tensors to shapes
for debugging
'''
if len(args) == 1:
x = args[0]
else:
x = args
if type(x) == tuple:
return tuple(to_shapes(value) for value in x)
if type(x) == list:
return [to_shapes(value) for value in x]
elif type(x) == dict:
return {key: to_shapes(value) for key, value in x.items()}
elif hasattr(x, 'shape'):
return x.shape
else:
return x
| 22.416667 | 70 | 0.563197 |
e5c78b6ea9a29ac85fc62369e512b620fc87a34e | 12,592 | py | Python | python/cudf/cudf/tests/test_testing.py | Ahsantw/cudf | e099688d5ca7dd20104930485a829881a68c522a | [
"Apache-2.0"
] | 239 | 2018-10-10T09:55:22.000Z | 2018-10-28T20:47:23.000Z | python/cudf/cudf/tests/test_testing.py | Ahsantw/cudf | e099688d5ca7dd20104930485a829881a68c522a | [
"Apache-2.0"
] | 25 | 2018-10-10T14:46:32.000Z | 2018-10-28T22:16:14.000Z | python/cudf/cudf/tests/test_testing.py | Ahsantw/cudf | e099688d5ca7dd20104930485a829881a68c522a | [
"Apache-2.0"
] | 19 | 2018-10-10T12:42:51.000Z | 2018-10-26T16:33:22.000Z | # Copyright (c) 2020-2022, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
import cudf
from cudf.core.column.column import as_column, full
from cudf.testing import (
assert_frame_equal,
assert_index_equal,
assert_series_equal,
)
from cudf.testing._utils import (
NUMERIC_TYPES,
OTHER_TYPES,
assert_column_memory_eq,
assert_column_memory_ne,
assert_eq,
)
from cudf.testing.testing import assert_column_equal
@pytest.fixture(
params=[
pa.array([*range(10)]),
pa.array(["hello", "world", "rapids", "AI"]),
pa.array([[1, 2, 3], [4, 5], [6], [], [7]]),
pa.array([{"f0": "hello", "f1": 42}, {"f0": "world", "f1": 3}]),
]
)
def arrow_arrays(request):
return request.param
@pytest.mark.parametrize("rdata", [[1, 2, 5], [1, 2, 6], [1, 2, 5, 6]])
@pytest.mark.parametrize("exact", ["equiv", True, False])
@pytest.mark.parametrize("check_names", [True, False])
@pytest.mark.parametrize("rname", ["a", "b"])
@pytest.mark.parametrize("check_categorical", [True, False])
@pytest.mark.parametrize(
"dtype", NUMERIC_TYPES + OTHER_TYPES + ["datetime64[ns]"]
)
def test_basic_assert_index_equal(
rdata,
exact,
check_names,
rname,
check_categorical,
dtype,
):
p_left = pd.Index([1, 2, 3], name="a", dtype=dtype)
p_right = pd.Index(rdata, name=rname, dtype=dtype)
left = cudf.from_pandas(p_left)
right = cudf.from_pandas(p_right)
kind = None
try:
pd.testing.assert_index_equal(
p_left,
p_right,
exact=exact,
check_names=check_names,
check_categorical=check_categorical,
)
except BaseException as e:
kind = type(e)
msg = str(e)
if kind is not None:
if (kind == TypeError) and (
msg
== (
"Categoricals can only be compared "
"if 'categories' are the same."
)
):
kind = AssertionError
with pytest.raises(kind):
assert_index_equal(
left,
right,
exact=exact,
check_names=check_names,
check_categorical=check_categorical,
)
else:
assert_index_equal(
left,
right,
exact=exact,
check_names=check_names,
check_categorical=check_categorical,
)
@pytest.mark.parametrize("rdata", [[1, 2, 5], [1, 2, 6], [1, 2, 5, 6]])
@pytest.mark.parametrize("check_names", [True, False])
@pytest.mark.parametrize("rname", ["a", "b"])
@pytest.mark.parametrize("check_category_order", [True, False])
@pytest.mark.parametrize("check_categorical", [True, False])
@pytest.mark.parametrize(
"dtype", NUMERIC_TYPES + OTHER_TYPES + ["datetime64[ns]"]
)
def test_basic_assert_series_equal(
rdata,
rname,
check_names,
check_category_order,
check_categorical,
dtype,
):
p_left = pd.Series([1, 2, 3], name="a", dtype=dtype)
p_right = pd.Series(rdata, name=rname, dtype=dtype)
left = cudf.from_pandas(p_left)
right = cudf.from_pandas(p_right)
kind = None
try:
pd.testing.assert_series_equal(
p_left,
p_right,
check_names=check_names,
check_categorical=check_categorical,
check_category_order=check_category_order,
)
except BaseException as e:
kind = type(e)
if kind is not None:
with pytest.raises(kind):
assert_series_equal(
left,
right,
check_names=check_names,
check_categorical=check_categorical,
check_category_order=check_category_order,
)
else:
assert_series_equal(
left,
right,
check_names=check_names,
check_categorical=check_categorical,
check_category_order=check_category_order,
)
@pytest.mark.parametrize(
"other",
[
as_column(["1", "2", "3"]),
as_column([[1], [2], [3]]),
as_column([{"a": 1}, {"a": 2}, {"a": 3}]),
],
)
def test_assert_column_equal_dtype_edge_cases(other):
# string series should be 100% different
# even when the elements are the same
base = as_column([1, 2, 3])
# for these dtypes, the diff should always be 100% regardless of the values
with pytest.raises(
AssertionError, match=r".*values are different \(100.0 %\).*"
):
assert_column_equal(base, other, check_dtype=False)
# the exceptions are the empty and all null cases
assert_column_equal(base.slice(0, 0), other.slice(0, 0), check_dtype=False)
assert_column_equal(other.slice(0, 0), base.slice(0, 0), check_dtype=False)
base = full(len(base), fill_value=cudf.NA, dtype=base.dtype)
other = full(len(other), fill_value=cudf.NA, dtype=other.dtype)
assert_column_equal(base, other, check_dtype=False)
assert_column_equal(other, base, check_dtype=False)
@pytest.mark.parametrize(
"rdtype", [["int8", "int16", "int64"], ["int64", "int16", "int8"]]
)
@pytest.mark.parametrize("rname", [["a", "b", "c"], ["b", "c", "a"]])
@pytest.mark.parametrize("index", [[1, 2, 3], [3, 2, 1]])
@pytest.mark.parametrize("check_exact", [True, False])
@pytest.mark.parametrize("check_dtype", [True, False])
@pytest.mark.parametrize("check_names", [True, False])
@pytest.mark.parametrize("check_like", [True, False])
@pytest.mark.parametrize("mismatch", [True, False])
def test_basic_assert_frame_equal(
rdtype,
rname,
index,
check_exact,
check_dtype,
check_names,
check_like,
mismatch,
):
data = [1, 2, 1]
p_left = pd.DataFrame(index=[1, 2, 3])
p_left["a"] = np.array(data, dtype="int8")
p_left["b"] = np.array(data, dtype="int16")
if mismatch:
p_left["c"] = np.array([1, 2, 3], dtype="int64")
else:
p_left["c"] = np.array(data, dtype="int64")
p_right = pd.DataFrame(index=index)
for dtype, name in zip(rdtype, rname):
p_right[name] = np.array(data, dtype=dtype)
left = cudf.from_pandas(p_left)
right = cudf.from_pandas(p_right)
kind = None
try:
pd.testing.assert_frame_equal(
p_left,
p_right,
check_exact=check_exact,
check_dtype=check_dtype,
check_names=check_names,
check_like=check_like,
)
except BaseException as e:
kind = type(e)
if kind is not None:
with pytest.raises(kind):
assert_frame_equal(
left,
right,
check_exact=check_exact,
check_dtype=check_dtype,
check_names=check_names,
check_like=check_like,
)
else:
assert_frame_equal(
left,
right,
check_exact=check_exact,
check_dtype=check_dtype,
check_names=check_names,
check_like=check_like,
)
@pytest.mark.parametrize("rdata", [[0, 1, 2, 3], [0, 1, 2, 4]])
@pytest.mark.parametrize("check_datetimelike_compat", [True, False])
def test_datetime_like_compaibility(rdata, check_datetimelike_compat):
psr1 = pd.Series([0, 1, 2, 3], dtype="datetime64[ns]")
psr2 = pd.Series(rdata, dtype="datetime64[ns]").astype("str")
sr1 = cudf.from_pandas(psr1)
sr2 = cudf.from_pandas(psr2)
kind = None
try:
pd.testing.assert_series_equal(
psr1, psr2, check_datetimelike_compat=check_datetimelike_compat
)
except BaseException as e:
kind = type(e)
if kind is not None:
with pytest.raises(kind):
assert_series_equal(
sr1, sr2, check_datetimelike_compat=check_datetimelike_compat
)
else:
assert_series_equal(
sr1, sr2, check_datetimelike_compat=check_datetimelike_compat
)
@pytest.mark.parametrize(
"rdata",
[
[[0, 1, 2, 3], ["G", "O", "N", "E"]],
[[0, 1, 2, 4], ["G", "O", "N", "E"]],
],
)
def test_multiindex_equal(rdata):
pidx1 = pd.MultiIndex.from_arrays(
[[0, 1, 2, 3], ["G", "O", "N", "E"]], names=("n", "id")
)
pidx2 = pd.MultiIndex.from_arrays(rdata, names=("n", "id"))
idx1 = cudf.from_pandas(pidx1)
idx2 = cudf.from_pandas(pidx2)
kind = None
try:
pd.testing.assert_index_equal(pidx1, pidx2)
except BaseException as e:
kind = type(e)
if kind is not None:
with pytest.raises(kind):
assert_index_equal(idx1, idx2)
else:
assert_index_equal(idx1, idx2)
@pytest.mark.parametrize("dtype", ["int8", "uint8", "float32"])
@pytest.mark.parametrize("check_exact", [True, False])
@pytest.mark.parametrize("check_dtype", [True, False])
def test_series_different_type_cases(dtype, check_exact, check_dtype):
data = [0, 1, 2, 3]
psr1 = pd.Series(data, dtype="uint8")
psr2 = pd.Series(data, dtype=dtype)
sr1 = cudf.from_pandas(psr1)
sr2 = cudf.from_pandas(psr2)
kind = None
try:
pd.testing.assert_series_equal(
psr1, psr2, check_exact=check_exact, check_dtype=check_dtype
)
except BaseException as e:
kind = type(e)
if kind is not None:
with pytest.raises(kind):
assert_series_equal(
sr1, sr2, check_exact=check_exact, check_dtype=check_dtype
)
else:
assert_series_equal(
sr1, sr2, check_exact=check_exact, check_dtype=check_dtype
)
@pytest.mark.parametrize(
"index",
[cudf.Int8Index, cudf.Int16Index, cudf.Int32Index, cudf.Int64Index],
)
@pytest.mark.parametrize("exact", ["equiv", True, False])
def test_range_index_and_int_index_eqaulity(index, exact):
pidx1 = pd.RangeIndex(0, stop=5, step=1)
pidx2 = pd.Index([0, 1, 2, 3, 4])
idx1 = cudf.from_pandas(pidx1)
idx2 = index([0, 1, 2, 3, 4])
kind = None
try:
pd.testing.assert_index_equal(pidx1, pidx2, exact=exact)
except BaseException as e:
kind = type(e)
if kind is not None:
with pytest.raises(kind):
assert_index_equal(idx1, idx2, exact=exact)
else:
assert_index_equal(idx1, idx2, exact=exact)
@pytest.mark.parametrize(
"left, right",
[
(1493282, 1493282),
(1493282.0, 1493282.0 + 1e-8),
("abc", "abc"),
(0, np.array(0)),
(
np.datetime64(123456, "ns"),
pd.Timestamp(np.datetime64(123456, "ns")),
),
("int64", np.dtype("int64")),
(np.nan, np.nan),
],
)
def test_basic_scalar_equality(left, right):
assert_eq(left, right)
@pytest.mark.parametrize(
"left, right",
[
(1493282, 1493274),
(1493282.0, 1493282.0 + 1e-6),
("abc", "abd"),
(0, np.array(1)),
(
np.datetime64(123456, "ns"),
pd.Timestamp(np.datetime64(123457, "ns")),
),
("int64", np.dtype("int32")),
],
)
def test_basic_scalar_inequality(left, right):
with pytest.raises(AssertionError, match=r".*not (almost )?equal.*"):
assert_eq(left, right)
def test_assert_column_memory_basic(arrow_arrays):
left = cudf.core.column.ColumnBase.from_arrow(arrow_arrays)
right = cudf.core.column.ColumnBase.from_arrow(arrow_arrays)
with pytest.raises(AssertionError):
assert_column_memory_eq(left, right)
assert_column_memory_ne(left, right)
def test_assert_column_memory_slice(arrow_arrays):
col = cudf.core.column.ColumnBase.from_arrow(arrow_arrays)
left = col.slice(0, 1)
right = col.slice(1, 2)
with pytest.raises(AssertionError):
assert_column_memory_eq(left, right)
assert_column_memory_ne(left, right)
with pytest.raises(AssertionError):
assert_column_memory_eq(left, col)
assert_column_memory_ne(left, col)
with pytest.raises(AssertionError):
assert_column_memory_eq(right, col)
assert_column_memory_ne(right, col)
def test_assert_column_memory_basic_same(arrow_arrays):
data = cudf.core.column.ColumnBase.from_arrow(arrow_arrays)
buf = cudf.core.buffer.Buffer(data=data.base_data, owner=data)
left = cudf.core.column.build_column(buf, dtype=np.int32)
right = cudf.core.column.build_column(buf, dtype=np.int32)
assert_column_memory_eq(left, right)
with pytest.raises(AssertionError):
assert_column_memory_ne(left, right)
| 28.618182 | 79 | 0.609752 |
3c7beec56c5900b409b6d556995444302deb1182 | 290 | py | Python | mmdet/core/post_processing/__init__.py | MinliangLin/TSD | d84ddc049d6b18c3a2408c90d2b7dd63b4e2d3a1 | [
"Apache-2.0"
] | 454 | 2020-04-17T10:58:36.000Z | 2022-03-16T13:04:33.000Z | mmdet/core/post_processing/__init__.py | MinliangLin/TSD | d84ddc049d6b18c3a2408c90d2b7dd63b4e2d3a1 | [
"Apache-2.0"
] | 37 | 2020-04-29T12:37:54.000Z | 2022-01-26T21:10:42.000Z | mmdet/core/post_processing/__init__.py | MinliangLin/TSD | d84ddc049d6b18c3a2408c90d2b7dd63b4e2d3a1 | [
"Apache-2.0"
] | 61 | 2020-04-30T04:28:08.000Z | 2022-01-26T08:14:13.000Z | from .bbox_nms import multiclass_nms
from .merge_augs import (
merge_aug_bboxes,
merge_aug_masks,
merge_aug_proposals,
merge_aug_scores,
)
__all__ = [
"multiclass_nms",
"merge_aug_proposals",
"merge_aug_bboxes",
"merge_aug_scores",
"merge_aug_masks",
]
| 18.125 | 36 | 0.706897 |
5f77edb71a23c595cfc35f9c866d54e037d8fbf8 | 4,356 | py | Python | kubernetes/client/models/v1beta1_job_template_spec.py | jashandeep-sohi/kubernetes-python | e057f273069de445a2d5a250ac5fe37d79671f3b | [
"Apache-2.0"
] | 1 | 2020-05-08T12:41:04.000Z | 2020-05-08T12:41:04.000Z | kubernetes/client/models/v1beta1_job_template_spec.py | jashandeep-sohi/kubernetes-python | e057f273069de445a2d5a250ac5fe37d79671f3b | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1beta1_job_template_spec.py | jashandeep-sohi/kubernetes-python | e057f273069de445a2d5a250ac5fe37d79671f3b | [
"Apache-2.0"
] | 2 | 2021-07-09T08:49:05.000Z | 2021-08-03T18:08:36.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.10.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1JobTemplateSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'metadata': 'V1ObjectMeta',
'spec': 'V1JobSpec'
}
attribute_map = {
'metadata': 'metadata',
'spec': 'spec'
}
def __init__(self, metadata=None, spec=None):
"""
V1beta1JobTemplateSpec - a model defined in Swagger
"""
self._metadata = None
self._spec = None
self.discriminator = None
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
@property
def metadata(self):
"""
Gets the metadata of this V1beta1JobTemplateSpec.
Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
:return: The metadata of this V1beta1JobTemplateSpec.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1beta1JobTemplateSpec.
Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
:param metadata: The metadata of this V1beta1JobTemplateSpec.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""
Gets the spec of this V1beta1JobTemplateSpec.
Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
:return: The spec of this V1beta1JobTemplateSpec.
:rtype: V1JobSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""
Sets the spec of this V1beta1JobTemplateSpec.
Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
:param spec: The spec of this V1beta1JobTemplateSpec.
:type: V1JobSpec
"""
self._spec = spec
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1JobTemplateSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 28.103226 | 161 | 0.582415 |
5739e6b213c90874615c85e9ee59c655d7f6bbe9 | 302 | py | Python | test.py | DanielDobromylskyj/Custom-Multi-Network-Interface-idk | fc77f79c0997739cdd4c966bc6ab6af3185d3154 | [
"MIT"
] | null | null | null | test.py | DanielDobromylskyj/Custom-Multi-Network-Interface-idk | fc77f79c0997739cdd4c966bc6ab6af3185d3154 | [
"MIT"
] | null | null | null | test.py | DanielDobromylskyj/Custom-Multi-Network-Interface-idk | fc77f79c0997739cdd4c966bc6ab6af3185d3154 | [
"MIT"
] | null | null | null | from CMNI import network
def test(server, socket_number):
print("Test Function Called")
server.Send(socket_number, "123")
data = server.Recv(socket_number)
print("Recved: ", data)
if __name__ == "__main__":
server = network.Server()
server.Setup(8776)
server.Run(test)
| 18.875 | 37 | 0.672185 |
1bcdf03a5c917faeb18c980a450d724c2f510e1b | 19,769 | py | Python | captum/_utils/common.py | greentfrapp/captum | 03f89a5825ae389d117ebdb6b9924e97bc311a4b | [
"BSD-3-Clause"
] | 1 | 2020-12-23T14:00:26.000Z | 2020-12-23T14:00:26.000Z | captum/_utils/common.py | greentfrapp/captum | 03f89a5825ae389d117ebdb6b9924e97bc311a4b | [
"BSD-3-Clause"
] | 4 | 2020-11-20T21:05:39.000Z | 2022-03-18T15:28:01.000Z | captum/_utils/common.py | greentfrapp/captum | 03f89a5825ae389d117ebdb6b9924e97bc311a4b | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import typing
from enum import Enum
from inspect import signature
from typing import Any, Callable, Dict, List, Tuple, Union, cast, overload
import numpy as np
import torch
from torch import Tensor, device
from torch.nn import Module
from captum._utils.typing import (
BaselineType,
Literal,
TargetType,
TensorOrTupleOfTensorsGeneric,
TupleOrTensorOrBoolGeneric,
)
class ExpansionTypes(Enum):
repeat = 1
repeat_interleave = 2
def safe_div(
numerator: Tensor, denom: Union[Tensor, float], default_value: Tensor
) -> Tensor:
r"""
A simple utility function to perform `numerator / denom`
if the statement is undefined => result will be `default_value`
"""
if isinstance(denom, float):
return numerator / denom if denom != 0.0 else default_value
# if denominator is a tensor
return numerator / torch.where(denom != 0.0, denom, default_value)
@typing.overload
def _is_tuple(inputs: Tensor) -> Literal[False]:
...
@typing.overload
def _is_tuple(inputs: Tuple[Tensor, ...]) -> Literal[True]:
...
def _is_tuple(inputs: Union[Tensor, Tuple[Tensor, ...]]) -> bool:
return isinstance(inputs, tuple)
def _validate_target(num_samples: int, target: TargetType) -> None:
if isinstance(target, list) or (
isinstance(target, torch.Tensor) and torch.numel(target) > 1
):
assert num_samples == len(target), (
"The number of samples provied in the"
"input {} does not match with the number of targets. {}".format(
num_samples, len(target)
)
)
def _validate_input(
inputs: Tuple[Tensor, ...],
baselines: Tuple[Union[Tensor, int, float], ...],
draw_baseline_from_distrib: bool = False,
) -> None:
assert len(inputs) == len(baselines), (
"Input and baseline must have the same "
"dimensions, baseline has {} features whereas input has {}.".format(
len(baselines), len(inputs)
)
)
for input, baseline in zip(inputs, baselines):
if draw_baseline_from_distrib:
assert (
isinstance(baseline, (int, float))
or input.shape[1:] == baseline.shape[1:]
), (
"The samples in input and baseline batches must have"
" the same shape or the baseline corresponding to the"
" input tensor must be a scalar."
" Found baseline: {} and input: {} ".format(baseline, input)
)
else:
assert (
isinstance(baseline, (int, float))
or input.shape == baseline.shape
or baseline.shape[0] == 1
), (
"Baseline can be provided as a tensor for just one input and"
" broadcasted to the batch or input and baseline must have the"
" same shape or the baseline corresponding to each input tensor"
" must be a scalar. Found baseline: {} and input: {}".format(
baseline, input
)
)
def _zeros(inputs: Tuple[Tensor, ...]) -> Tuple[int, ...]:
r"""
Takes a tuple of tensors as input and returns a tuple that has the same
length as `inputs` with each element as the integer 0.
"""
return tuple(0 for input in inputs)
def _format_baseline(
baselines: BaselineType, inputs: Tuple[Tensor, ...]
) -> Tuple[Union[Tensor, int, float], ...]:
if baselines is None:
return _zeros(inputs)
if not isinstance(baselines, tuple):
baselines = (baselines,)
for baseline in baselines:
assert isinstance(
baseline, (torch.Tensor, int, float)
), "baseline input argument must be either a torch.Tensor or a number \
however {} detected".format(
type(baseline)
)
return baselines
@overload
def _format_tensor_into_tuples(inputs: None) -> None:
...
@overload
def _format_tensor_into_tuples(
inputs: Union[Tensor, Tuple[Tensor, ...]]
) -> Tuple[Tensor, ...]:
...
def _format_tensor_into_tuples(
inputs: Union[None, Tensor, Tuple[Tensor, ...]]
) -> Union[None, Tuple[Tensor, ...]]:
if inputs is None:
return None
if not isinstance(inputs, tuple):
assert isinstance(
inputs, torch.Tensor
), "`inputs` must have type " "torch.Tensor but {} found: ".format(type(inputs))
inputs = (inputs,)
return inputs
def _format_input(inputs: Union[Tensor, Tuple[Tensor, ...]]) -> Tuple[Tensor, ...]:
return _format_tensor_into_tuples(inputs)
@overload
def _format_additional_forward_args(additional_forward_args: None) -> None:
...
@overload
def _format_additional_forward_args(
additional_forward_args: Union[Tensor, Tuple]
) -> Tuple:
...
@overload
def _format_additional_forward_args(additional_forward_args: Any) -> Union[None, Tuple]:
...
def _format_additional_forward_args(additional_forward_args: Any) -> Union[None, Tuple]:
if additional_forward_args is not None and not isinstance(
additional_forward_args, tuple
):
additional_forward_args = (additional_forward_args,)
return additional_forward_args
def _expand_additional_forward_args(
additional_forward_args: Any,
n_steps: int,
expansion_type: ExpansionTypes = ExpansionTypes.repeat,
) -> Union[None, Tuple]:
def _expand_tensor_forward_arg(
additional_forward_arg: Tensor,
n_steps: int,
expansion_type: ExpansionTypes = ExpansionTypes.repeat,
) -> Tensor:
if len(additional_forward_arg.size()) == 0:
return additional_forward_arg
if expansion_type == ExpansionTypes.repeat:
return torch.cat([additional_forward_arg] * n_steps, dim=0)
elif expansion_type == ExpansionTypes.repeat_interleave:
return additional_forward_arg.repeat_interleave(n_steps, dim=0)
else:
raise NotImplementedError(
"Currently only `repeat` and `repeat_interleave`"
" expansion_types are supported"
)
if additional_forward_args is None:
return None
return tuple(
_expand_tensor_forward_arg(additional_forward_arg, n_steps, expansion_type)
if isinstance(additional_forward_arg, torch.Tensor)
else additional_forward_arg
for additional_forward_arg in additional_forward_args
)
def _expand_target(
target: TargetType,
n_steps: int,
expansion_type: ExpansionTypes = ExpansionTypes.repeat,
) -> TargetType:
if isinstance(target, list):
if expansion_type == ExpansionTypes.repeat:
return target * n_steps
elif expansion_type == ExpansionTypes.repeat_interleave:
expanded_target = []
for i in target:
expanded_target.extend([i] * n_steps)
return cast(Union[List[Tuple[int, ...]], List[int]], expanded_target)
else:
raise NotImplementedError(
"Currently only `repeat` and `repeat_interleave`"
" expansion_types are supported"
)
elif isinstance(target, torch.Tensor) and torch.numel(target) > 1:
if expansion_type == ExpansionTypes.repeat:
return torch.cat([target] * n_steps, dim=0)
elif expansion_type == ExpansionTypes.repeat_interleave:
return target.repeat_interleave(n_steps, dim=0)
else:
raise NotImplementedError(
"Currently only `repeat` and `repeat_interleave`"
" expansion_types are supported"
)
return target
def _expand_and_update_baselines(
inputs: Tuple[Tensor, ...],
n_samples: int,
kwargs: dict,
draw_baseline_from_distrib: bool = False,
):
def get_random_baseline_indices(bsz, baseline):
num_ref_samples = baseline.shape[0]
return np.random.choice(num_ref_samples, n_samples * bsz).tolist()
# expand baselines to match the sizes of input
if "baselines" not in kwargs:
return
baselines = kwargs["baselines"]
baselines = _format_baseline(baselines, inputs)
_validate_input(
inputs, baselines, draw_baseline_from_distrib=draw_baseline_from_distrib
)
if draw_baseline_from_distrib:
bsz = inputs[0].shape[0]
baselines = tuple(
baseline[get_random_baseline_indices(bsz, baseline)]
if isinstance(baseline, torch.Tensor)
else baseline
for baseline in baselines
)
else:
baselines = tuple(
baseline.repeat_interleave(n_samples, dim=0)
if isinstance(baseline, torch.Tensor)
and baseline.shape[0] == input.shape[0]
and baseline.shape[0] > 1
else baseline
for input, baseline in zip(inputs, baselines)
)
# update kwargs with expanded baseline
kwargs["baselines"] = baselines
def _expand_and_update_additional_forward_args(n_samples: int, kwargs: dict):
if "additional_forward_args" not in kwargs:
return
additional_forward_args = kwargs["additional_forward_args"]
additional_forward_args = _format_additional_forward_args(additional_forward_args)
if additional_forward_args is None:
return
additional_forward_args = _expand_additional_forward_args(
additional_forward_args,
n_samples,
expansion_type=ExpansionTypes.repeat_interleave,
)
# update kwargs with expanded baseline
kwargs["additional_forward_args"] = additional_forward_args
def _expand_and_update_target(n_samples: int, kwargs: dict):
if "target" not in kwargs:
return
target = kwargs["target"]
target = _expand_target(
target, n_samples, expansion_type=ExpansionTypes.repeat_interleave
)
# update kwargs with expanded baseline
kwargs["target"] = target
@typing.overload
def _format_output(
is_inputs_tuple: Literal[True], output: Tuple[Tensor, ...]
) -> Tuple[Tensor, ...]:
...
@typing.overload
def _format_output(
is_inputs_tuple: Literal[False], output: Tuple[Tensor, ...]
) -> Tensor:
...
@typing.overload
def _format_output(
is_inputs_tuple: bool, output: Tuple[Tensor, ...]
) -> Union[Tensor, Tuple[Tensor, ...]]:
...
def _format_output(
is_inputs_tuple: bool, output: Tuple[Tensor, ...]
) -> Union[Tensor, Tuple[Tensor, ...]]:
r"""
In case input is a tensor and the output is returned in form of a
tuple we take the first element of the output's tuple to match the
same shape signatues of the inputs
"""
assert isinstance(output, tuple), "Output must be in shape of a tuple"
assert is_inputs_tuple or len(output) == 1, (
"The input is a single tensor however the output isn't."
"The number of output tensors is: {}".format(len(output))
)
return output if is_inputs_tuple else output[0]
@typing.overload
def _format_outputs(
is_multiple_inputs: Literal[False], outputs: List[Tuple[Tensor, ...]]
) -> Union[Tensor, Tuple[Tensor, ...]]:
...
@typing.overload
def _format_outputs(
is_multiple_inputs: Literal[True], outputs: List[Tuple[Tensor, ...]]
) -> List[Union[Tensor, Tuple[Tensor, ...]]]:
...
@typing.overload
def _format_outputs(
is_multiple_inputs: bool, outputs: List[Tuple[Tensor, ...]]
) -> Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]]:
...
def _format_outputs(
is_multiple_inputs: bool, outputs: List[Tuple[Tensor, ...]]
) -> Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]]:
assert isinstance(outputs, list), "Outputs must be a list"
assert is_multiple_inputs or len(outputs) == 1, (
"outputs should contain multiple inputs or have a single output"
f"however the number of outputs is: {len(outputs)}"
)
return (
[_format_output(len(output) > 1, output) for output in outputs]
if is_multiple_inputs
else _format_output(len(outputs[0]) > 1, outputs[0])
)
def _run_forward(
forward_func: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
target: TargetType = None,
additional_forward_args: Any = None,
) -> Tensor:
forward_func_args = signature(forward_func).parameters
if len(forward_func_args) == 0:
output = forward_func()
return output if target is None else _select_targets(output, target)
# make everything a tuple so that it is easy to unpack without
# using if-statements
inputs = _format_input(inputs)
additional_forward_args = _format_additional_forward_args(additional_forward_args)
output = forward_func(
*(*inputs, *additional_forward_args)
if additional_forward_args is not None
else inputs
)
return _select_targets(output, target)
def _select_targets(output: Tensor, target: TargetType) -> Tensor:
if target is None:
return output
num_examples = output.shape[0]
dims = len(output.shape)
device = output.device
if isinstance(target, (int, tuple)):
return _verify_select_column(output, target)
elif isinstance(target, torch.Tensor):
if torch.numel(target) == 1 and isinstance(target.item(), int):
return _verify_select_column(output, cast(int, target.item()))
elif len(target.shape) == 1 and torch.numel(target) == num_examples:
assert dims == 2, "Output must be 2D to select tensor of targets."
return torch.gather(output, 1, target.reshape(len(output), 1))
else:
raise AssertionError(
"Tensor target dimension %r is not valid. %r"
% (target.shape, output.shape)
)
elif isinstance(target, list):
assert len(target) == num_examples, "Target list length does not match output!"
if isinstance(target[0], int):
assert dims == 2, "Output must be 2D to select tensor of targets."
return torch.gather(
output, 1, torch.tensor(target, device=device).reshape(len(output), 1)
)
elif isinstance(target[0], tuple):
return torch.stack(
[
output[(i,) + cast(Tuple, targ_elem)]
for i, targ_elem in enumerate(target)
]
)
else:
raise AssertionError("Target element type in list is not valid.")
else:
raise AssertionError("Target type %r is not valid." % target)
def _contains_slice(target: Union[int, Tuple[Union[int, slice], ...]]) -> bool:
if isinstance(target, tuple):
for index in target:
if isinstance(index, slice):
return True
return False
return isinstance(target, slice)
def _verify_select_column(
output: Tensor, target: Union[int, Tuple[Union[int, slice], ...]]
) -> Tensor:
target = (target,) if isinstance(target, int) else target
assert (
len(target) <= len(output.shape) - 1
), "Cannot choose target column with output shape %r." % (output.shape,)
return output[(slice(None), *target)]
def _verify_select_neuron(
layer_output: Tuple[Tensor, ...],
selector: Union[int, Tuple[Union[int, slice], ...], Callable],
) -> Tensor:
if callable(selector):
return selector(layer_output if len(layer_output) > 1 else layer_output[0])
assert len(layer_output) == 1, (
"Cannot select neuron index from layer with multiple tensors,"
"consider providing a neuron selector function instead."
)
selected_neurons = _verify_select_column(layer_output[0], selector)
if _contains_slice(selector):
return selected_neurons.reshape(selected_neurons.shape[0], -1).sum(1)
return selected_neurons
def _extract_device(
module: Module,
hook_inputs: Union[None, Tensor, Tuple[Tensor, ...]],
hook_outputs: Union[None, Tensor, Tuple[Tensor, ...]],
) -> device:
params = list(module.parameters())
if (
(hook_inputs is None or len(hook_inputs) == 0)
and (hook_outputs is None or len(hook_outputs) == 0)
and len(params) == 0
):
raise RuntimeError(
"""Unable to extract device information for the module
{}. Both inputs and outputs to the forward hook and
`module.parameters()` are empty.
The reason that the inputs to the forward hook are empty
could be due to the fact that the arguments to that
module {} are all named and are passed as named
variables to its forward function.
""".format(
module, module
)
)
if hook_inputs is not None and len(hook_inputs) > 0:
return hook_inputs[0].device
if hook_outputs is not None and len(hook_outputs) > 0:
return hook_outputs[0].device
return params[0].device
def _reduce_list(
val_list: List[TupleOrTensorOrBoolGeneric],
red_func: Callable[[List], Any] = torch.cat,
) -> TupleOrTensorOrBoolGeneric:
"""
Applies reduction function to given list. If each element in the list is
a Tensor, applies reduction function to all elements of the list, and returns
the output Tensor / value. If each element is a boolean, apply any method (or).
If each element is a tuple, applies reduction
function to corresponding elements of each tuple in the list, and returns
tuple of reduction function outputs with length matching the length of tuple
val_list[0]. It is assumed that all tuples in the list have the same length
and red_func can be applied to all elements in each corresponding position.
"""
assert len(val_list) > 0, "Cannot reduce empty list!"
if isinstance(val_list[0], torch.Tensor):
first_device = val_list[0].device
return red_func([elem.to(first_device) for elem in val_list])
elif isinstance(val_list[0], bool):
return any(val_list)
elif isinstance(val_list[0], tuple):
final_out = []
for i in range(len(val_list[0])):
final_out.append(
_reduce_list([val_elem[i] for val_elem in val_list], red_func)
)
else:
raise AssertionError(
"Elements to be reduced can only be"
"either Tensors or tuples containing Tensors."
)
return tuple(final_out)
def _sort_key_list(
keys: List[device], device_ids: Union[None, List[int]] = None
) -> List[device]:
"""
Sorts list of torch devices (keys) by given index list, device_ids. If keys
contains only one device, then the list is returned unchanged. If keys
contains a device for which the id is not contained in device_ids, then
an error is returned. This method is used to identify the order of DataParallel
batched devices, given the device ID ordering.
"""
if len(keys) == 1:
return keys
id_dict: Dict[int, device] = {}
assert device_ids is not None, "Device IDs must be provided with multiple devices."
for key in keys:
if key.index in id_dict:
raise AssertionError("Duplicate CUDA Device ID identified in device list.")
id_dict[key.index] = key
out_list = [
id_dict[device_id]
for device_id in filter(lambda device_id: device_id in id_dict, device_ids)
]
assert len(out_list) == len(keys), "Given Device ID List does not match"
"devices with computed tensors."
return out_list
def _flatten_tensor_or_tuple(inp: TensorOrTupleOfTensorsGeneric) -> Tensor:
if isinstance(inp, Tensor):
return inp.flatten()
return torch.cat([single_inp.flatten() for single_inp in inp])
| 33.281145 | 88 | 0.647175 |
a563e0a497cf6a3f4e485360459bfdc04f0e6b70 | 350 | py | Python | my_code.py | athenian-computational-thinking/add-n-numbers-assignment-template | e4ba92e3b56b8f67ee654cc738b6f6fa1981825d | [
"Apache-2.0"
] | null | null | null | my_code.py | athenian-computational-thinking/add-n-numbers-assignment-template | e4ba92e3b56b8f67ee654cc738b6f6fa1981825d | [
"Apache-2.0"
] | null | null | null | my_code.py | athenian-computational-thinking/add-n-numbers-assignment-template | e4ba92e3b56b8f67ee654cc738b6f6fa1981825d | [
"Apache-2.0"
] | null | null | null | def add_n(n):
# add code here
return total
if __name__ == '__main__':
# Test your code with this first
# Change the argument to try different values
print(add_n(4))
# After you are satisfied with your results, use input() to prompt the user for a value:
# num = int(input("Enter a number: "))
# print(add_n(num))
| 21.875 | 92 | 0.642857 |
f4c52a1a37b3c0c489f5f34128df077e75d5d2a1 | 1,126 | py | Python | datadog_checks_dev/tests/tooling/commands/test_create.py | justinsousa/integrations-core | 223e337492a04de517bc35ec85ddf921108fd8d2 | [
"BSD-3-Clause"
] | 2 | 2019-05-28T03:48:29.000Z | 2019-07-05T07:05:58.000Z | datadog_checks_dev/tests/tooling/commands/test_create.py | justinsousa/integrations-core | 223e337492a04de517bc35ec85ddf921108fd8d2 | [
"BSD-3-Clause"
] | 4 | 2019-07-03T02:53:19.000Z | 2019-07-10T14:52:14.000Z | datadog_checks_dev/tests/tooling/commands/test_create.py | justinsousa/integrations-core | 223e337492a04de517bc35ec85ddf921108fd8d2 | [
"BSD-3-Clause"
] | 1 | 2020-01-15T16:58:51.000Z | 2020-01-15T16:58:51.000Z | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import sys
from datadog_checks.dev import EnvVars, run_command
from datadog_checks.dev._env import TESTING_PLUGIN
from datadog_checks.dev.utils import chdir, remove_path
HERE = os.path.dirname(os.path.abspath(__file__))
CORE_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(HERE))))
def test_new_check_test():
check_path = os.path.join(CORE_ROOT, 'my_check')
try:
run_command(
[sys.executable, '-m', 'datadog_checks.dev', 'create', '-q', '-l', CORE_ROOT, 'my-check'],
capture=True,
check=True,
)
run_command([sys.executable, '-m', 'pip', 'install', check_path], capture=True, check=True)
with chdir(check_path):
with EnvVars(ignore=[TESTING_PLUGIN]):
run_command([sys.executable, '-m', 'pytest'], capture=True, check=True)
run_command([sys.executable, '-m', 'pip', 'uninstall', '-y', 'my-check'], capture=True, check=True)
finally:
remove_path(check_path)
| 34.121212 | 107 | 0.659858 |
ea4bf810f01e2033ab62dd16a4e00aafbbc32f1d | 1,118 | py | Python | app/config/setting.py | Allen7D/mini-shop-server | 5f3ddd5a4e5e99a1e005f11abc620cefff2493fc | [
"MIT"
] | 533 | 2019-01-19T07:12:00.000Z | 2022-03-30T09:08:46.000Z | app/config/setting.py | Alimazing/mini-shop-server | 5f3ddd5a4e5e99a1e005f11abc620cefff2493fc | [
"MIT"
] | 8 | 2019-06-10T03:58:54.000Z | 2021-06-10T08:21:19.000Z | app/config/setting.py | Allen7D/mini-shop-server | 5f3ddd5a4e5e99a1e005f11abc620cefff2493fc | [
"MIT"
] | 161 | 2019-01-24T04:06:21.000Z | 2022-03-14T06:05:16.000Z | # _*_ coding: utf-8 _*_
"""
Created by Allen7D on 2018/4/2.
Flask 对配置项的限制,你必须保证命名全都大写,才能注入到current_app.config中
"""
from app.libs.enums import ClientTypeEnum
__author__ = 'Allen7D'
'''
应用于Swagger的URL,会自动添加协议前缀(http://或者https://),因为会切换协议前缀
local_setting.py中 SERVER_URL = '127.0.0.1:8010'
'''
SERVER_URL = 'server.mini-shop.ivinetrue.com' # 外部(云服务器)地址
# 所有红图的路径
API_PATH = 'app.api'
# all api by module(version)
# 可以控制Swagger API文档的显示顺序
ALL_RP_API_LIST = \
['v1.token'] + \
['cms.admin', 'cms.group', 'cms.auth', 'cms.menu', 'cms.element', 'cms.route', 'cms.oper_log', 'cms.login_log'] + \
['cms.file'] + \
['v1.user', 'v1.address',
'v1.banner', 'v1.theme', 'v1.category', 'v1.product', 'v1.order', 'v1.pay'] + \
['cms.user', 'cms.article'] + \
['cms.order', 'cms.banner', 'cms.banner_item'] + \
['cms.notice', 'cms.dict_type', 'cms.dict', 'cms.config', 'cms.server']
# 所有endpoint的meta信息
EP_META = {}
EP_INFO_LIST = []
EP_INFOS = {}
# 分页配置
PAGE_DEFAULT = 1
SIZE_DEFAULT = 10
# 登录类型(站内)
CLINET_INNER_TYPES = (ClientTypeEnum.USERNAME, ClientTypeEnum.EMAIL, ClientTypeEnum.MOBILE)
| 27.95 | 119 | 0.660107 |
77f44f88ddb974a13969ee206d0a6ea0cca4b136 | 9,000 | py | Python | train.py | helloful/AlphaZero_Gomoku-master | 17361edcd8d6c11416d92c160466c79f07545487 | [
"MIT"
] | null | null | null | train.py | helloful/AlphaZero_Gomoku-master | 17361edcd8d6c11416d92c160466c79f07545487 | [
"MIT"
] | null | null | null | train.py | helloful/AlphaZero_Gomoku-master | 17361edcd8d6c11416d92c160466c79f07545487 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
An implementation of the training pipeline of AlphaZero for Gomoku
@author: Junxiao Song
"""
from __future__ import print_function
import random
import numpy as np
from collections import defaultdict, deque
from game import Board, Game
from mcts_pure import MCTSPlayer as MCTS_Pure
from mcts_alphaZero import MCTSPlayer
# from policy_value_net import PolicyValueNet # Theano and Lasagne
# from policy_value_net_pytorch import PolicyValueNet # Pytorch
from policy_value_net_tensorflow import PolicyValueNet # Tensorflow
# from policy_value_net_keras import PolicyValueNet # Keras
class TrainPipeline():
def __init__(self, init_model=None):
# params of the board and the game
self.board_width = 15
self.board_height = 15
self.n_in_row = 5
self.board = Board(width=self.board_width,
height=self.board_height,
n_in_row=self.n_in_row)
self.game = Game(self.board)
# training params
self.learn_rate = 2e-3
self.lr_multiplier = 1.0 # adaptively adjust the learning rate based on KL
self.temp = 1.0 # the temperature param
self.n_playout = 800 # num of simulations for each move
self.c_puct = 5
self.buffer_size = 10000
self.batch_size = 512 # mini-batch size for training
self.data_buffer = deque(maxlen=self.buffer_size) # 存储mcts的数据,增广以后的数据
self.play_batch_size = 1
self.epochs = 5 # num of train_steps for each update # 此处应该是400或者800
self.kl_targ = 0.02
self.check_freq = 50
self.game_batch_num = 1500
self.best_win_ratio = 0.0
# num of simulations used for the pure mcts, which is used as
# the opponent to evaluate the trained policy
self.pure_mcts_playout_num = 1000 # 此处是1000
if init_model:
# start training from an initial policy-value net
self.policy_value_net = PolicyValueNet(self.board_width,
self.board_height,
model_file=init_model)
else:
# start training from a new policy-value net
self.policy_value_net = PolicyValueNet(self.board_width,
self.board_height)
self.mcts_player = MCTSPlayer(self.policy_value_net.policy_value_fn,
c_puct=self.c_puct,
n_playout=self.n_playout,
is_selfplay=1)
def get_equi_data(self, play_data):
"""augment the data set by rotation and flipping # 通过旋转翻转增强数据集
play_data: [(state, mcts_prob, winner_z), ..., ...] # state表示当前棋盘,mcts_prob表示棋盘的点的概率
"""
extend_data = []
for state, mcts_porb, winner in play_data:
for i in [1, 2, 3, 4]:
# rotate counterclockwise
equi_state = np.array([np.rot90(s, i) for s in state])
equi_mcts_prob = np.rot90(np.flipud(
mcts_porb.reshape(self.board_height, self.board_width)), i)
extend_data.append((equi_state,
np.flipud(equi_mcts_prob).flatten(),
winner))
# flip horizontally
equi_state = np.array([np.fliplr(s) for s in equi_state])
equi_mcts_prob = np.fliplr(equi_mcts_prob)
extend_data.append((equi_state,
np.flipud(equi_mcts_prob).flatten(),
winner))
return extend_data
def collect_selfplay_data(self, n_games=1):
"""collect self-play data for training"""
for i in range(n_games):
# play_data为zip(states, mcts_probs, winners_z)
winner, play_data = self.game.start_self_play(self.mcts_player,
temp=self.temp)
play_data = list(play_data)[:] # 对应的玩家和棋面组成元祖
self.episode_len = len(play_data) # 表示一盘棋走了多少步,有多少个状态
# augment the data 数据增广
play_data = self.get_equi_data(play_data)
self.data_buffer.extend(play_data)
def policy_update(self):
"""update the policy-value net"""
mini_batch = random.sample(self.data_buffer, self.batch_size)
state_batch = [data[0] for data in mini_batch]
mcts_probs_batch = [data[1] for data in mini_batch]
winner_batch = [data[2] for data in mini_batch]
old_probs, old_v = self.policy_value_net.policy_value(state_batch)
for i in range(self.epochs):
loss, entropy = self.policy_value_net.train_step(
state_batch,
mcts_probs_batch,
winner_batch,
self.learn_rate*self.lr_multiplier)
new_probs, new_v = self.policy_value_net.policy_value(state_batch)
kl = np.mean(np.sum(old_probs * (
np.log(old_probs + 1e-10) - np.log(new_probs + 1e-10)),
axis=1)
)
if kl > self.kl_targ * 4: # early stopping if D_KL diverges badly
break
# adaptively adjust the learning rate
if kl > self.kl_targ * 2 and self.lr_multiplier > 0.1:
self.lr_multiplier /= 1.5
elif kl < self.kl_targ / 2 and self.lr_multiplier < 10:
self.lr_multiplier *= 1.5
explained_var_old = (1 -
np.var(np.array(winner_batch) - old_v.flatten()) /
np.var(np.array(winner_batch)))
explained_var_new = (1 -
np.var(np.array(winner_batch) - new_v.flatten()) /
np.var(np.array(winner_batch)))
print(("kl:{:.5f},"
"lr_multiplier:{:.3f},"
"loss:{},"
"entropy:{},"
"explained_var_old:{:.3f},"
"explained_var_new:{:.3f}"
).format(kl,
self.lr_multiplier,
loss,
entropy,
explained_var_old,
explained_var_new))
return loss, entropy
def policy_evaluate(self, n_games=10):
"""
Evaluate the trained policy by playing against the pure MCTS player
Note: this is only for monitoring the progress of training
"""
current_mcts_player = MCTSPlayer(self.policy_value_net.policy_value_fn,
c_puct=self.c_puct,
n_playout=self.n_playout)
pure_mcts_player = MCTS_Pure(c_puct=5,
n_playout=self.pure_mcts_playout_num)
win_cnt = defaultdict(int)
for i in range(n_games):
winner = self.game.start_play(current_mcts_player,
pure_mcts_player,
start_player=i % 2,
is_shown=0)
win_cnt[winner] += 1
win_ratio = 1.0*(win_cnt[1] + 0.5*win_cnt[-1]) / n_games
print("num_playouts:{}, win: {}, lose: {}, tie:{}".format(
self.pure_mcts_playout_num,
win_cnt[1], win_cnt[2], win_cnt[-1]))
return win_ratio
def run(self):
"""run the training pipeline"""
try:
for i in range(self.game_batch_num):
self.collect_selfplay_data(self.play_batch_size)
print("batch i:{}, episode_len:{}".format(
i+1, self.episode_len))
if len(self.data_buffer) > self.batch_size: # 所有状态是否超过512
loss, entropy = self.policy_update()
# check the performance of the current model,
# and save the model params
if (i+1) % self.check_freq == 0:
print("current self-play batch: {}".format(i+1))
win_ratio = self.policy_evaluate()
self.policy_value_net.save_model('./current_policy_model')
if win_ratio > self.best_win_ratio:
print("New best policy!!!!!!!!")
self.best_win_ratio = win_ratio
# update the best_policy
self.policy_value_net.save_model('./best_policy_model')
if (self.best_win_ratio == 1.0 and
self.pure_mcts_playout_num < 5000):
self.pure_mcts_playout_num += 1000
self.best_win_ratio = 0.0
except KeyboardInterrupt:
print('\n\rquit')
if __name__ == '__main__':
training_pipeline = TrainPipeline()
training_pipeline.run()
| 45 | 92 | 0.544333 |
ce8d8cd4404dcccc5cbed645f24cb6c45dba7e81 | 3,237 | py | Python | examples/train_iseg2017_new.py | McMasterAI/RadiologyandAI-MedicalZooPytorch | 606a1654f08b8bae7c265608694d55fecc1001ed | [
"MIT"
] | 995 | 2019-07-23T11:34:22.000Z | 2022-03-30T21:10:52.000Z | examples/train_iseg2017_new.py | pyushkevich/MedicalZooPytorch | c6831d8ddebfbc1b33c04f8cec0d01c2ceb828f6 | [
"MIT"
] | 18 | 2020-04-27T03:38:22.000Z | 2022-01-18T20:55:20.000Z | examples/train_iseg2017_new.py | pyushkevich/MedicalZooPytorch | c6831d8ddebfbc1b33c04f8cec0d01c2ceb828f6 | [
"MIT"
] | 209 | 2019-08-21T13:41:13.000Z | 2022-03-30T08:01:52.000Z | # Python libraries
import argparse
import os
# Lib files
import lib.medloaders as medical_loaders
import lib.medzoo as medzoo
import lib.train as train
import lib.utils as utils
from lib.losses3D import DiceLoss
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
seed = 1777777
def main():
args = get_arguments()
utils.reproducibility(args, seed)
utils.make_dirs(args.save)
training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets(args,
path='.././datasets')
model, optimizer = medzoo.create_model(args)
criterion = DiceLoss(classes=args.classes)
if args.cuda:
model = model.cuda()
print("Model transferred in GPU.....")
trainer = train.Trainer(args, model, criterion, optimizer, train_data_loader=training_generator,
valid_data_loader=val_generator, lr_scheduler=None)
print("START TRAINING...")
trainer.training()
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--batchSz', type=int, default=4)
parser.add_argument('--dataset_name', type=str, default="iseg2017")
parser.add_argument('--dim', nargs="+", type=int, default=(64, 64, 64))
parser.add_argument('--nEpochs', type=int, default=200)
parser.add_argument('--classes', type=int, default=4)
parser.add_argument('--samples_train', type=int, default=1024)
parser.add_argument('--samples_val', type=int, default=128)
parser.add_argument('--inChannels', type=int, default=2)
parser.add_argument('--inModalities', type=int, default=2)
parser.add_argument('--threshold', default=0.1, type=float)
parser.add_argument('--terminal_show_freq', default=50)
parser.add_argument('--augmentation', action='store_true', default=False)
parser.add_argument('--normalization', default='full_volume_mean', type=str,
help='Tensor normalization: options ,max_min,',
choices=('max_min', 'full_volume_mean', 'brats', 'max', 'mean'))
parser.add_argument('--split', default=0.8, type=float, help='Select percentage of training data(default: 0.8)')
parser.add_argument('--lr', default=1e-2, type=float,
help='learning rate (default: 1e-3)')
parser.add_argument('--cuda', action='store_true', default=True)
parser.add_argument('--loadData', default=True)
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--model', type=str, default='VNET',
choices=('VNET', 'VNET2', 'UNET3D', 'DENSENET1', 'DENSENET2', 'DENSENET3', 'HYPERDENSENET'))
parser.add_argument('--opt', type=str, default='sgd',
choices=('sgd', 'adam', 'rmsprop'))
parser.add_argument('--log_dir', type=str,
default='../runs/')
args = parser.parse_args()
args.save = '../saved_models/' + args.model + '_checkpoints/' + args.model + '_{}_{}_'.format(
utils.datestr(), args.dataset_name)
return args
if __name__ == '__main__':
main()
| 42.038961 | 116 | 0.638554 |
cdb9abba889c12ada5738c27aa7b2aeece393648 | 2,128 | py | Python | tests/parsers/winreg_plugins/mountpoints.py | kiddinn/plaso | c7d955724cfeeea09ce6166609fd4d3c77a99fc6 | [
"Apache-2.0"
] | null | null | null | tests/parsers/winreg_plugins/mountpoints.py | kiddinn/plaso | c7d955724cfeeea09ce6166609fd4d3c77a99fc6 | [
"Apache-2.0"
] | null | null | null | tests/parsers/winreg_plugins/mountpoints.py | kiddinn/plaso | c7d955724cfeeea09ce6166609fd4d3c77a99fc6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the MountPoints2 Windows Registry plugin."""
import unittest
from plaso.parsers.winreg_plugins import mountpoints
from tests.parsers.winreg_plugins import test_lib
class MountPoints2PluginTest(test_lib.RegistryPluginTestCase):
"""Tests for the MountPoints2 Windows Registry plugin."""
def testFilters(self):
"""Tests the FILTERS class attribute."""
plugin = mountpoints.MountPoints2Plugin()
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\MountPoints2')
self._AssertFiltersOnKeyPath(plugin, key_path)
self._AssertNotFiltersOnKeyPath(plugin, 'HKEY_LOCAL_MACHINE\\Bogus')
def testProcess(self):
"""Tests the Process function."""
test_file_entry = self._GetTestFileEntry(['NTUSER-WIN7.DAT'])
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\MountPoints2')
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin = mountpoints.MountPoints2Plugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin, file_entry=test_file_entry)
self.assertEqual(storage_writer.number_of_events, 5)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
expected_event_values = {
'date_time': '2011-08-23 17:10:14.9609605',
'data_type': 'windows:registry:mount_points2',
'key_path': key_path,
'label': 'Home Drive',
'name': '##controller#home#nfury',
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
'parser': plugin.plugin_name,
'server_name': 'controller',
'share_name': '\\home\\nfury',
'type': 'Remote Drive'}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
if __name__ == '__main__':
unittest.main()
| 33.25 | 77 | 0.710996 |
59b82942aff9e77c846ed3095ed8e44b609cd489 | 621 | py | Python | migrations/versions/612cb7862395_.py | jhwhite/ability-score-calc-api | cca22fd8e04299069af43f2c2a4473159271eb00 | [
"MIT"
] | null | null | null | migrations/versions/612cb7862395_.py | jhwhite/ability-score-calc-api | cca22fd8e04299069af43f2c2a4473159271eb00 | [
"MIT"
] | 1 | 2016-04-19T01:55:25.000Z | 2016-04-19T01:55:25.000Z | migrations/versions/612cb7862395_.py | jhwhite/ability-score-calc-api | cca22fd8e04299069af43f2c2a4473159271eb00 | [
"MIT"
] | null | null | null | """empty message
Revision ID: 612cb7862395
Revises: 9336b2f5a1c7
Create Date: 2016-04-18 20:07:41.402402
"""
# revision identifiers, used by Alembic.
revision = '612cb7862395'
down_revision = '9336b2f5a1c7'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('aging_effects', sa.Column('age', sa.String(length=20), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('aging_effects', 'age')
### end Alembic commands ###
| 23 | 89 | 0.698873 |
96db3dd3377f00590d74f03db2559a2a1704ed94 | 3,520 | py | Python | examples/csc_readout.py | AdamVStephen/gatt-python | 20fc809c83655a88616e1fd523d993014f906c25 | [
"MIT"
] | null | null | null | examples/csc_readout.py | AdamVStephen/gatt-python | 20fc809c83655a88616e1fd523d993014f906c25 | [
"MIT"
] | null | null | null | examples/csc_readout.py | AdamVStephen/gatt-python | 20fc809c83655a88616e1fd523d993014f906c25 | [
"MIT"
] | null | null | null | import pdb
import gatt
csc_measurement_service_uuid = "00001816-0000-1000-8000-00805f9b34fb"
csc_measurement_characteristic_uuid = "00002a5b-0000-1000-8000-00805f9b34fb"
class AnyDeviceManager(gatt.DeviceManager):
def device_discovered(self, device):
print("Discovered [%s] %s" % (device.mac_address, device.alias()))
class AnyDevice(gatt.Device):
def connect_succeeded(self):
super().connect_succeeded()
print("[%s] Connected" % self.mac_address)
def connect_failed(self):
super().connect_failed()
print("[%s] Connection failed" % self.mac_address)
def disconnect_succeeded(self):
super().disconnect_succeeded()
print("[%s] Disconnected" % self.mac_address)
def services_resolved(self):
super().services_resolved()
print("[%s] resolved services" % self.mac_address)
device_information_service = None
cycle_information_service = None
csc_characteristic = None
#pdb.set_trace()
for service in self.services:
print("[%s] Service [%s]" % (self.mac_address, service.uuid))
if service.uuid == '0000180a-0000-1000-8000-00805f9b34fb':
device_information_service = service
print(">> device_information_service uuid = %s" % device_information_service.uuid)
elif service.uuid == csc_measurement_service_uuid:
cycle_information_service = service
print(">>>> cycle_information_service uuid = %s" % cycle_information_service.uuid)
for characteristic in service.characteristics:
if device_information_service is not None:
if service.uuid == device_information_service.uuid:
if characteristic.uuid == '00002a26-0000-1000-8000-00805f9b34fb':
firmware_version_characteristic = characteristic
print(">> firmware_version_characteristic uuid = %s" % firmware_version_characteristic.uuid)
firmware_version_characteristic.read_value()
if cycle_information_service is not None:
if service.uuid == csc_measurement_service_uuid:
if characteristic.uuid == csc_measurement_characteristic_uuid:
csc_characteristic = characteristic
print(">>>>>> csc_characteristic uuid = %s" % csc_characteristic.uuid)
csc_characteristic.enable_notifications()
print("[%s] Characteristic [%s]" % (self.mac_address, characteristic.uuid))
firmware_version_characteristic.read_value()
def characteristic_value_updated(self, characteristic, value):
print("characteristic_value_updated callback for uuid %s" % characteristic.uuid)
if characteristic.uuid == '00002a26-0000-1000-8000-00805f9b34fb':
print("Firmware version: ", value.decode("utf-8"))
elif characteristic.uuid == csc_measurement_characteristic_uuid:
print("CSC update : %s" % value)
def service_demo(mac_address="DC:66:7D:AF:3A:08"):
manager = gatt.DeviceManager(adapter_name = 'hci0')
device = AnyDevice(mac_address, manager = manager)
device.connect()
manager.run()
def simple_demo():
manager = AnyDeviceManager(adapter_name="hci0")
manager.start_discovery()
manager.run()
if __name__ == '__main__':
#pdb.set_trace()
service_demo()
#simple_demo()
| 46.315789 | 121 | 0.650568 |
6304fe02499e3d3c66cf0e80917f4b9e20770f56 | 4,138 | py | Python | spectralcluster/autotune.py | ericwxia/SpectralCluster | 3adca51e967c5b2407a1f92a630f299cb06e64ba | [
"Apache-2.0"
] | 327 | 2019-01-19T00:41:14.000Z | 2022-03-31T12:48:23.000Z | spectralcluster/autotune.py | ericwxia/SpectralCluster | 3adca51e967c5b2407a1f92a630f299cb06e64ba | [
"Apache-2.0"
] | 33 | 2019-01-25T02:21:18.000Z | 2022-01-30T20:28:15.000Z | spectralcluster/autotune.py | ericwxia/SpectralCluster | 3adca51e967c5b2407a1f92a630f299cb06e64ba | [
"Apache-2.0"
] | 65 | 2019-01-19T00:46:35.000Z | 2022-03-15T18:28:46.000Z | """Auto-tuning hyper-parameters."""
import numpy as np
MIN_SEARCH_STEP = 1e-04
class AutoTune:
"""AutoTune Class.
This auto-tuning method is implemented based on this paper:
Park, Tae Jin, et al. "Auto-tuning spectral clustering for speaker
diarization using normalized maximum eigengap." IEEE Signal Processing Letter
2019.
"""
def __init__(self,
p_percentile_min=0.60,
p_percentile_max=0.95,
init_search_step=0.01,
search_level=1):
"""Initialization of the autotune arguments.
Args:
p_percentile_min: minimum value of p_percentile
p_percentile_max: maximum value of p_percentile
init_search_step: initial search step size for auto-tuning
search_level: hierarchical search level for auto-tuning
"""
self.p_percentile_min = p_percentile_min
self.p_percentile_max = p_percentile_max
self.search_step = init_search_step
self.search_level = search_level
def get_percentile_range(self):
"""Get the current percentile search range."""
num_steps = np.int(
np.ceil(
(self.p_percentile_max - self.p_percentile_min) / self.search_step))
return list(
np.linspace(self.p_percentile_min, self.p_percentile_max, num_steps))
def update_percentile_range(self, p_percentile_min, p_percentile_max,
search_step):
"""Update the percentile search range."""
self.p_percentile_min = p_percentile_min
self.p_percentile_max = p_percentile_max
self.search_step = search_step
return self.get_percentile_range()
def tune(self, p_percentile_to_ratio):
"""Tune the hyper-parameter p_percentile.
Use a proxy ratio of DER to tune the hyper-parameter p_percentile. It also
performs some side work to do affinity refinement, eigen decomposition, and
estimate the number of clusters.
Args:
p_percentile_to_ratio: a callable to compute the `ratio` given a
`p_percentile` value
Returns:
eigenvectors: sorted eigenvectors. numpy array of shape
(n_samples, n_samples)
n_clusters: number of clusters as an integer
best_p_percentile: p_percentile value that minimizes the ratio
"""
p_percentile_range = self.get_percentile_range()
searched = dict()
for _ in range(self.search_level):
min_ratio = np.inf
for index, p_percentile in enumerate(p_percentile_range):
if p_percentile in searched:
continue
# ratio is a proxy value of DER. We minimize this ratio
# to find a good p_percentile
ratio, eigenvectors_p, n_clusters_p = p_percentile_to_ratio(
p_percentile)
searched[p_percentile] = ratio
if ratio < min_ratio:
min_ratio = ratio
eigenvectors = eigenvectors_p
n_clusters = n_clusters_p
best_p_percentile = p_percentile
best_p_percentile_index = index
# If the search range is not valid or search step is too small, we stop
if not p_percentile_range or len(
p_percentile_range) == 1 or self.search_step < MIN_SEARCH_STEP:
break
# Update the search range of p_percentile.
# We search again from `start_index` position to `end_index` position
# which is `local_search_dist` away from the found
# `best_p_percentile_index` position. `search_step` is reduced to half of
# the original size
local_search_dist = max(2, len(p_percentile_range) // 8)
start_index = max(0, best_p_percentile_index - local_search_dist)
end_index = min(
len(p_percentile_range) - 1,
best_p_percentile_index + local_search_dist)
p_percentile_min = p_percentile_range[start_index]
p_percentile_max = p_percentile_range[end_index]
self.search_step = self.search_step / 2
p_percentile_range = self.update_percentile_range(p_percentile_min,
p_percentile_max,
self.search_step)
return eigenvectors, n_clusters, best_p_percentile
| 38.672897 | 80 | 0.681005 |
50d02e4a8d601dbde6c210f4c971565703823c23 | 1,212 | py | Python | mwaa/mwaa-cdk/dags/timestream-backfill.py | 094459/time-series-and-data-lakes | 75540661764b8bca91debf625278985ceba7b5ca | [
"MIT"
] | 9 | 2021-12-03T17:51:42.000Z | 2022-03-17T08:45:05.000Z | mwaa/mwaa-cdk/dags/timestream-backfill.py | 094459/time-series-and-data-lakes | 75540661764b8bca91debf625278985ceba7b5ca | [
"MIT"
] | null | null | null | mwaa/mwaa-cdk/dags/timestream-backfill.py | 094459/time-series-and-data-lakes | 75540661764b8bca91debf625278985ceba7b5ca | [
"MIT"
] | 1 | 2021-12-12T16:00:31.000Z | 2021-12-12T16:00:31.000Z | #Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#SPDX-License-Identifier: Apache-2.0
# when running this DAG make sure you select the backfill command
# {"command": "backfill -s 2021-09-10 -e 2021-10-10 timestream-airflow-demo"} #mwaa 1.x
# {"command": "dags backfill -s 2021-09-10 -e 2021-10-10 timestream-airflow-demo"} #mwaa 2.x
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from datetime import timedelta,time,datetime
import os
DAG_ID = os.path.basename(__file__).replace(".py", "")
default_args = {
"owner": "airflow",
"start_date": datetime(2020, 9, 9),
"depends_on_past": False,
"email_on_failure": False,
"email_on_retry": False,
"email": "youremail@host.com",
"retries": 0,
"retry_delay": timedelta(minutes=5)
}
from time import sleep
from datetime import datetime
from random import random
with DAG(dag_id=DAG_ID, schedule_interval=None, default_args=default_args, catchup=False) as dag:
bash_task_1 = BashOperator(
task_id="cli_command",
bash_command="airflow {{ dag_run.conf['command'] }}"
)
bash_task_1 | 31.894737 | 97 | 0.673267 |
9df3d1591314bfbb9d52b17a073013383d77afd6 | 2,850 | py | Python | dual_tape_ez/dual_tape_ez.py | cmcmarrow/dual_tape_ez | 4028c82ee43677b016cb106206412b86f812cc76 | [
"Apache-2.0"
] | null | null | null | dual_tape_ez/dual_tape_ez.py | cmcmarrow/dual_tape_ez | 4028c82ee43677b016cb106206412b86f812cc76 | [
"Apache-2.0"
] | null | null | null | dual_tape_ez/dual_tape_ez.py | cmcmarrow/dual_tape_ez | 4028c82ee43677b016cb106206412b86f812cc76 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2021 Charles McMarrow
"""
# built-in
import argparse
from typing import List, Generator, Optional, Tuple, Union
# dual_tape_ez
import dual_tape_ez as dte
from . import assembler
from . import error
from . import vm
from .log import enable_log
class DualTapeEzAPI(error.DualTapeEzError):
@classmethod
def hit_timeout(cls):
return cls("Hit timeout!")
def dual_tape_ez() -> None:
"""
info: Console Interface into dual_tape_ez.
:return: None
"""
try:
parser = argparse.ArgumentParser(description="dual_tape_ez")
parser.add_argument("file",
type=str,
action="store",
help="path to dual_tape_ez script")
parser.add_argument("-a",
"--author",
default=False,
action="store_true",
help="get author of dual_tape_ez")
parser.add_argument("-l",
"--log",
default=False,
action="store_true",
help="enables debug log")
parser.add_argument("-v",
"--version",
default=False,
action="store_true",
help="get version of dual_tape_ez")
parser.add_argument("--timeout",
default=-1,
type=int,
help="max number of instructions that can run")
args = parser.parse_args()
if args.author:
print(dte.AUTHOR)
if args.version:
print(f"v{dte.MAJOR}.{dte.MINOR}.{dte.MAINTENANCE}")
for at, _ in enumerate(dual_tape_ez_api(file=args.file, log=args.log)):
if at == args.timeout:
raise DualTapeEzAPI.hit_timeout()
except error.DualTapeEzError as e:
print(f"\nERROR: {e}", flush=True)
except KeyboardInterrupt:
print("\nKeyboard Interrupt!", flush=True)
def dual_tape_ez_api(file: str,
inputs: Optional[Union[Tuple[str, ...], List[str]]] = None,
sys_output: bool = True,
catch_output: bool = False,
log: bool = False) -> Generator[vm.VMState, None, None]:
"""
info: API to dual_tape_ez
:param: inputs: Optional[Union[Tuple[str, ...], List[str]]]
:param: sys_output: bool
:param: catch_output: bool
:param: log: bool
:return: Generator[vm.VMState, None, None]
"""
if log:
enable_log()
entry_point, instructions, data = assembler.assembler(file=file)
return vm.vm(entry_point, instructions, data, inputs, sys_output, catch_output)
| 32.386364 | 83 | 0.525965 |
7e233e99dcb4d33f00b918bc73aa319c23cb8580 | 8,193 | py | Python | lasertool.py | jpirnay/gui-files-meerk40t | 1f22252162be5d2a4e788990f8070b75ada4a0c9 | [
"MIT"
] | null | null | null | lasertool.py | jpirnay/gui-files-meerk40t | 1f22252162be5d2a4e788990f8070b75ada4a0c9 | [
"MIT"
] | null | null | null | lasertool.py | jpirnay/gui-files-meerk40t | 1f22252162be5d2a4e788990f8070b75ada4a0c9 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
#
# generated by wxGlade 1.1.0pre on Mon Apr 11 12:41:47 2022
#
import wx
# begin wxGlade: dependencies
# end wxGlade
# begin wxGlade: extracode
# end wxGlade
class clsLasertools(wx.Panel):
def __init__(self, *args, **kwds):
# begin wxGlade: clsLasertools.__init__
kwds["style"] = kwds.get("style", 0) | wx.TAB_TRAVERSAL
wx.Panel.__init__(self, *args, **kwds)
sizer_main = wx.BoxSizer(wx.VERTICAL)
self.nbook_lasertools = wx.Notebook(self, wx.ID_ANY)
sizer_main.Add(self.nbook_lasertools, 1, wx.EXPAND, 0)
self.nb_circle = wx.Panel(self.nbook_lasertools, wx.ID_ANY)
self.nbook_lasertools.AddPage(self.nb_circle, "Find center")
sizer_circle = wx.BoxSizer(wx.VERTICAL)
sizer_9 = wx.BoxSizer(wx.HORIZONTAL)
sizer_circle.Add(sizer_9, 0, wx.EXPAND, 0)
sizer_10 = wx.BoxSizer(wx.VERTICAL)
sizer_9.Add(sizer_10, 0, wx.EXPAND, 0)
sizer_1 = wx.BoxSizer(wx.HORIZONTAL)
sizer_10.Add(sizer_1, 1, wx.EXPAND, 0)
label_1 = wx.StaticText(self.nb_circle, wx.ID_ANY, "A")
label_1.SetMinSize((20, 23))
sizer_1.Add(label_1, 0, 0, 0)
self.btnSet1 = wx.Button(self.nb_circle, wx.ID_ANY, "Get")
self.btnSet1.SetToolTip("Place the laser over the desired point and press...")
sizer_1.Add(self.btnSet1, 0, 0, 0)
lbl_pos_1 = wx.StaticText(self.nb_circle, wx.ID_ANY, "<empty>")
sizer_1.Add(lbl_pos_1, 0, 0, 0)
sizer_2 = wx.BoxSizer(wx.HORIZONTAL)
sizer_10.Add(sizer_2, 1, wx.EXPAND, 0)
label_2 = wx.StaticText(self.nb_circle, wx.ID_ANY, "B")
label_2.SetMinSize((20, 23))
sizer_2.Add(label_2, 0, 0, 0)
self.btnSet2 = wx.Button(self.nb_circle, wx.ID_ANY, "Get")
self.btnSet2.SetToolTip("Place the laser over the desired point and press...")
sizer_2.Add(self.btnSet2, 0, 0, 0)
lbl_pos_2 = wx.StaticText(self.nb_circle, wx.ID_ANY, "<empty>")
sizer_2.Add(lbl_pos_2, 0, 0, 0)
sizer_3 = wx.BoxSizer(wx.HORIZONTAL)
sizer_10.Add(sizer_3, 1, wx.EXPAND, 0)
label_3 = wx.StaticText(self.nb_circle, wx.ID_ANY, "C")
label_3.SetMinSize((20, 23))
sizer_3.Add(label_3, 0, 0, 0)
self.btnSet3 = wx.Button(self.nb_circle, wx.ID_ANY, "Get")
self.btnSet3.SetToolTip("Place the laser over the desired point and press...")
sizer_3.Add(self.btnSet3, 0, 0, 0)
lbl_pos_3 = wx.StaticText(self.nb_circle, wx.ID_ANY, "<empty>")
sizer_3.Add(lbl_pos_3, 0, 0, 0)
image1 = wx.StaticBitmap(self.nb_circle, wx.ID_ANY, wx.Bitmap("C:\\Users\\MCHPIRNAYJ\\OneDrive - FUJITSU\\Desktop\\circle.png", wx.BITMAP_TYPE_ANY))
image1.SetToolTip("Instruction: place the laser on three points on the circumference of the circle on the bed and confirm the position by clicking on the buttons below.\nMK will find the center for you and place the laser above it or will recreate the circle for futher processing.")
sizer_9.Add(image1, 1, 0, 0)
sizer_4 = wx.BoxSizer(wx.HORIZONTAL)
sizer_circle.Add(sizer_4, 0, wx.EXPAND, 0)
self.btn_move = wx.Button(self.nb_circle, wx.ID_ANY, "Move to center")
sizer_4.Add(self.btn_move, 0, 0, 0)
self.button_1 = wx.Button(self.nb_circle, wx.ID_ANY, "Create circle")
sizer_4.Add(self.button_1, 0, 0, 0)
self.check_reference_1 = wx.CheckBox(self.nb_circle, wx.ID_ANY, "Make reference")
self.check_reference_1.SetMinSize((-1, 23))
sizer_4.Add(self.check_reference_1, 0, 0, 0)
self.nb_rectangle = wx.Panel(self.nbook_lasertools, wx.ID_ANY)
self.nbook_lasertools.AddPage(self.nb_rectangle, "Place rectangle")
sizer_rectangle = wx.BoxSizer(wx.VERTICAL)
sizer_11 = wx.BoxSizer(wx.HORIZONTAL)
sizer_rectangle.Add(sizer_11, 0, wx.EXPAND, 0)
sizer_12 = wx.BoxSizer(wx.VERTICAL)
sizer_11.Add(sizer_12, 0, wx.EXPAND, 0)
sizer_5 = wx.BoxSizer(wx.HORIZONTAL)
sizer_12.Add(sizer_5, 1, wx.EXPAND, 0)
label_4 = wx.StaticText(self.nb_rectangle, wx.ID_ANY, "Side A 1")
label_4.SetMinSize((45, 23))
sizer_5.Add(label_4, 0, 0, 0)
self.btnSet1_copy = wx.Button(self.nb_rectangle, wx.ID_ANY, "Get")
self.btnSet1_copy.SetToolTip("Place the laser over the desired point and press...")
sizer_5.Add(self.btnSet1_copy, 0, 0, 0)
lbl_pos_4 = wx.StaticText(self.nb_rectangle, wx.ID_ANY, "<empty>")
sizer_5.Add(lbl_pos_4, 0, 0, 0)
sizer_6 = wx.BoxSizer(wx.HORIZONTAL)
sizer_12.Add(sizer_6, 1, wx.EXPAND, 0)
label_5 = wx.StaticText(self.nb_rectangle, wx.ID_ANY, "Side A 2")
label_5.SetMinSize((45, 23))
sizer_6.Add(label_5, 0, 0, 0)
self.btnSet2_copy = wx.Button(self.nb_rectangle, wx.ID_ANY, "Get")
self.btnSet2_copy.SetToolTip("Place the laser over the desired point and press...")
sizer_6.Add(self.btnSet2_copy, 0, 0, 0)
lbl_pos_5 = wx.StaticText(self.nb_rectangle, wx.ID_ANY, "<empty>")
sizer_6.Add(lbl_pos_5, 0, 0, 0)
sizer_7 = wx.BoxSizer(wx.HORIZONTAL)
sizer_12.Add(sizer_7, 1, wx.EXPAND, 0)
label_6 = wx.StaticText(self.nb_rectangle, wx.ID_ANY, "Side B")
label_6.SetMinSize((45, 23))
sizer_7.Add(label_6, 0, 0, 0)
self.btnSet3_copy = wx.Button(self.nb_rectangle, wx.ID_ANY, "Get")
self.btnSet3_copy.SetToolTip("Place the laser over the desired point and press...")
sizer_7.Add(self.btnSet3_copy, 0, 0, 0)
lbl_pos_6 = wx.StaticText(self.nb_rectangle, wx.ID_ANY, "<empty>")
sizer_7.Add(lbl_pos_6, 0, 0, 0)
image2 = wx.StaticBitmap(self.nb_rectangle, wx.ID_ANY, wx.Bitmap("C:\\Users\\MCHPIRNAYJ\\OneDrive - FUJITSU\\Desktop\\rectangle.png", wx.BITMAP_TYPE_ANY))
image2.SetToolTip("Instruction: place the laser on two points of one side of a rectangle on the bed and confirm the position by clicking on the buttons below. Then choose one point on the other side of the corner.\nMK will create a rectangle for you for futher processing.")
sizer_11.Add(image2, 1, 0, 0)
sizer_8 = wx.BoxSizer(wx.HORIZONTAL)
sizer_rectangle.Add(sizer_8, 0, wx.EXPAND, 0)
self.btn_move_copy = wx.Button(self.nb_rectangle, wx.ID_ANY, "Create rectangle")
sizer_8.Add(self.btn_move_copy, 0, 0, 0)
self.check_reference_2 = wx.CheckBox(self.nb_rectangle, wx.ID_ANY, "Make reference")
self.check_reference_2.SetMinSize((-1, 23))
sizer_8.Add(self.check_reference_2, 0, 0, 0)
self.nb_rectangle.SetSizer(sizer_rectangle)
self.nb_circle.SetSizer(sizer_circle)
self.SetSizer(sizer_main)
sizer_main.Fit(self)
self.Layout()
self.btnSet1.Bind(wx.EVT_BUTTON, self.on_click_get)
self.btnSet2.Bind(wx.EVT_BUTTON, self.on_click_get)
self.btnSet3.Bind(wx.EVT_BUTTON, self.on_click_get)
self.btn_move.Bind(wx.EVT_BUTTON, self.on_bt_move_center)
self.button_1.Bind(wx.EVT_BUTTON, self.on_btn_create_circle)
self.btnSet1_copy.Bind(wx.EVT_BUTTON, self.on_click_get)
self.btnSet2_copy.Bind(wx.EVT_BUTTON, self.on_click_get)
self.btnSet3_copy.Bind(wx.EVT_BUTTON, self.on_click_get)
self.btn_move_copy.Bind(wx.EVT_BUTTON, self.on_bt_move_center)
# end wxGlade
def on_click_get(self, event): # wxGlade: clsLasertools.<event_handler>
print("Event handler 'on_click_get' not implemented!")
event.Skip()
def on_bt_move_center(self, event): # wxGlade: clsLasertools.<event_handler>
print("Event handler 'on_bt_move_center' not implemented!")
event.Skip()
def on_btn_create_circle(self, event): # wxGlade: clsLasertools.<event_handler>
print("Event handler 'on_btn_create_circle' not implemented!")
event.Skip()
# end of class clsLasertools
| 41.80102 | 292 | 0.646894 |
fd21a7de107bc1a07a308fd965350e4dcf26e924 | 1,234 | py | Python | tests/numpy/matmul_delegation_test.py | targetsm/dace | 297b12804a334df8cc6fad5250d5fb0cce20dc6e | [
"BSD-3-Clause"
] | null | null | null | tests/numpy/matmul_delegation_test.py | targetsm/dace | 297b12804a334df8cc6fad5250d5fb0cce20dc6e | [
"BSD-3-Clause"
] | null | null | null | tests/numpy/matmul_delegation_test.py | targetsm/dace | 297b12804a334df8cc6fad5250d5fb0cce20dc6e | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2019-2020 ETH Zurich and the DaCe authors. All rights reserved.
import numpy as np
import dace
N, K, M = 24, 12, 48
@dace.program
def matmul_delegation_test(matrix0: dace.float32[N, K],
matrix1: dace.float32[K, M],
vector0: dace.float32[M], vector1: dace.float32[N],
result: dace.float32[1]):
# GEMM -> GEMV -> dot product
result[0] = ((matrix0 @ matrix1) @ vector0) @ vector1
if __name__ == '__main__':
matrix0 = np.random.rand(N, K).astype(np.float32)
matrix1 = np.random.rand(K, M).astype(np.float32)
vector0 = np.random.rand(M).astype(np.float32)
vector1 = np.random.rand(N).astype(np.float32)
result = np.empty([1], dtype=np.float32)
matmul_delegation_test(
matrix0=matrix0,
matrix1=matrix1,
vector0=vector0,
vector1=vector1,
result=result)
reference = ((matrix0 @ matrix1) @ vector0) @ vector1
rel_error = (result - reference) / reference
if rel_error > 1e-5:
raise ValueError("Result mismatch: {} (expected {})".format(
result, reference))
else:
print("Linear algebra multiplication delegation test verified.")
| 32.473684 | 78 | 0.613452 |
455b36728b70ddee669ac2b247448f30d2f1013f | 515 | py | Python | pipescaler/core/sorter.py | KarlTDebiec/PipeScaler | b990ece8f3dd2c3506c226ed871871997fc57beb | [
"BSD-3-Clause"
] | 1 | 2022-02-07T03:47:53.000Z | 2022-02-07T03:47:53.000Z | pipescaler/core/sorter.py | KarlTDebiec/PipeScaler | b990ece8f3dd2c3506c226ed871871997fc57beb | [
"BSD-3-Clause"
] | 49 | 2022-01-17T15:16:22.000Z | 2022-03-28T03:00:39.000Z | pipescaler/core/sorter.py | KarlTDebiec/PipeScaler | b990ece8f3dd2c3506c226ed871871997fc57beb | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# pipescaler/core/sorter.py
#
# Copyright (C) 2020-2021 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license.
from __future__ import annotations
from abc import ABC
from pipescaler.core.stage import Stage
class Sorter(Stage, ABC):
"""Base class for sorters."""
def __call__(self, infile: str) -> str:
raise NotImplementedError()
@property
def inlets(self):
return ["inlet"]
| 20.6 | 72 | 0.683495 |
4afaba4ce292a938627179964132a28df32548d5 | 709 | py | Python | samples/invoice/search_pages.py | Hey-Marvelous/PayPal-Python-SDK | c56069f8971877e0632c1d58fdf240de9320ece3 | [
"BSD-Source-Code"
] | 653 | 2015-01-07T21:40:11.000Z | 2022-03-07T18:25:27.000Z | samples/invoice/search_pages.py | sucithrashanu/PayPal-Python-SDK | a129190dc105941b11340659c028385cdceea192 | [
"BSD-Source-Code"
] | 214 | 2015-01-05T15:42:18.000Z | 2020-05-05T13:15:12.000Z | samples/invoice/search_pages.py | sucithrashanu/PayPal-Python-SDK | a129190dc105941b11340659c028385cdceea192 | [
"BSD-Source-Code"
] | 300 | 2015-01-05T07:29:23.000Z | 2022-03-22T14:25:02.000Z | from paypalrestsdk import Invoice
import logging
import json
logging.basicConfig(level=logging.INFO)
my_invoices = []
page_size = 2
for i in range(3):
options = {
"start_invoice_date": "2016-01-01 PST",
"end_invoice_date": "2030-03-26 PST",
"status": ["SENT", "DRAFT", "PAID", "CANCELLED"],
"total_count_required": True,
"page": i * page_size,
"page_size": page_size
}
invoices = Invoice.search(options)
if invoices.success(): # return True or False
for inv in invoices.invoices:
my_invoices.append(inv.id)
else:
print(invoices.error)
print(json.dumps(my_invoices, sort_keys=False, indent=4))
| 22.870968 | 57 | 0.624824 |
b60301bf340cafccf362e196b4472354b0556a54 | 38,058 | py | Python | kornia/enhance/adjust.py | mitmul/kornia | b0fe723269e635f4652c182297a6db3146f4929b | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2021-03-24T12:43:02.000Z | 2021-03-24T12:43:08.000Z | kornia/enhance/adjust.py | cceyda/kornia | 810e5189408cf97e81449e4a11454d803038a3f6 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | kornia/enhance/adjust.py | cceyda/kornia | 810e5189408cf97e81449e4a11454d803038a3f6 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | from typing import Union, Optional
from math import pi
import torch
import torch.nn as nn
import torch.nn.functional as F
from kornia.color.hsv import rgb_to_hsv, hsv_to_rgb
from kornia.utils.image import _to_bchw, _to_bcdhw
from kornia.utils.helpers import _torch_histc_cast
__all__ = [
"adjust_brightness",
"adjust_contrast",
"adjust_gamma",
"adjust_hue",
"adjust_saturation",
"adjust_hue_raw",
"adjust_saturation_raw",
"solarize",
"equalize",
"equalize3d",
"posterize",
"sharpness",
"invert",
"AdjustBrightness",
"AdjustContrast",
"AdjustGamma",
"AdjustHue",
"AdjustSaturation",
"Invert",
]
def adjust_saturation_raw(input: torch.Tensor, saturation_factor: Union[float, torch.Tensor]) -> torch.Tensor:
r"""Adjust color saturation of an image. Expecting input to be in hsv format already."""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(saturation_factor, (float, torch.Tensor)):
raise TypeError(
f"The saturation_factor should be a float number or torch.Tensor." f"Got {type(saturation_factor)}"
)
if isinstance(saturation_factor, float):
saturation_factor = torch.as_tensor(saturation_factor)
saturation_factor = saturation_factor.to(input.device).to(input.dtype)
# TODO: find a proper way to check bound values in batched tensors.
# if (saturation_factor < 0).any():
# raise ValueError(f"Saturation factor must be non-negative. Got {saturation_factor}")
for _ in input.shape[1:]:
saturation_factor = torch.unsqueeze(saturation_factor, dim=-1)
# unpack the hsv values
h, s, v = torch.chunk(input, chunks=3, dim=-3)
# transform the hue value and appl module
s_out: torch.Tensor = torch.clamp(s * saturation_factor, min=0, max=1)
# pack back back the corrected hue
out: torch.Tensor = torch.cat([h, s_out, v], dim=-3)
return out
def adjust_saturation(input: torch.Tensor, saturation_factor: Union[float, torch.Tensor]) -> torch.Tensor:
r"""Adjust color saturation of an image.
The input image is expected to be an RGB image in the range of [0, 1].
Args:
input (torch.Tensor): Image/Tensor to be adjusted in the shape of :math:`(*, 3, H, W)`.
saturation_factor (Union[float, torch.Tensor]): How much to adjust the saturation. 0 will give a black
and white image, 1 will give the original image while 2 will enhance the saturation by a factor of 2.
Return:
torch.Tensor: Adjusted image in the shape of :math:`(*, 3, H, W)`.
Example:
>>> x = torch.ones(1, 3, 3, 3)
>>> adjust_saturation(x, 2.)
tensor([[[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]],
<BLANKLINE>
[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]],
<BLANKLINE>
[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]]]])
>>> x = torch.ones(2, 3, 3, 3)
>>> y = torch.ones(2)
>>> out = adjust_saturation(x, y)
>>> torch.nn.functional.mse_loss(x, out)
tensor(0.)
"""
# convert the rgb image to hsv
x_hsv: torch.Tensor = rgb_to_hsv(input)
# perform the conversion
x_adjusted: torch.Tensor = adjust_saturation_raw(x_hsv, saturation_factor)
# convert back to rgb
out: torch.Tensor = hsv_to_rgb(x_adjusted)
return out
def adjust_hue_raw(input: torch.Tensor, hue_factor: Union[float, torch.Tensor]) -> torch.Tensor:
r"""Adjust hue of an image. Expecting input to be in hsv format already."""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(hue_factor, (float, torch.Tensor)):
raise TypeError(
f"The hue_factor should be a float number or torch.Tensor in the range between"
f" [-PI, PI]. Got {type(hue_factor)}"
)
if isinstance(hue_factor, float):
hue_factor = torch.as_tensor(hue_factor)
hue_factor = hue_factor.to(input.device, input.dtype)
# TODO: find a proper way to check bound values in batched tensors.
# if ((hue_factor < -pi) | (hue_factor > pi)).any():
# raise ValueError(f"Hue-factor must be in the range [-PI, PI]. Got {hue_factor}")
for _ in input.shape[1:]:
hue_factor = torch.unsqueeze(hue_factor, dim=-1)
# unpack the hsv values
h, s, v = torch.chunk(input, chunks=3, dim=-3)
# transform the hue value and appl module
divisor: float = 2 * pi
h_out: torch.Tensor = torch.fmod(h + hue_factor, divisor)
# pack back back the corrected hue
out: torch.Tensor = torch.cat([h_out, s, v], dim=-3)
return out
def adjust_hue(input: torch.Tensor, hue_factor: Union[float, torch.Tensor]) -> torch.Tensor:
r"""Adjust hue of an image.
The input image is expected to be an RGB image in the range of [0, 1].
Args:
input (torch.Tensor): Image to be adjusted in the shape of :math:`(*, 3, H, W)`.
hue_factor (Union[float, torch.Tensor]): How much to shift the hue channel. Should be in [-PI, PI]. PI
and -PI give complete reversal of hue channel in HSV space in positive and negative
direction respectively. 0 means no shift. Therefore, both -PI and PI will give an
image with complementary colors while 0 gives the original image.
Return:
torch.Tensor: Adjusted image in the shape of :math:`(*, 3, H, W)`.
Example:
>>> x = torch.ones(1, 3, 3, 3)
>>> adjust_hue(x, 3.141516)
tensor([[[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]],
<BLANKLINE>
[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]],
<BLANKLINE>
[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]]]])
>>> x = torch.ones(2, 3, 3, 3)
>>> y = torch.ones(2) * 3.141516
>>> adjust_hue(x, y).shape
torch.Size([2, 3, 3, 3])
"""
# convert the rgb image to hsv
x_hsv: torch.Tensor = rgb_to_hsv(input)
# perform the conversion
x_adjusted: torch.Tensor = adjust_hue_raw(x_hsv, hue_factor)
# convert back to rgb
out: torch.Tensor = hsv_to_rgb(x_adjusted)
return out
def adjust_gamma(
input: torch.Tensor, gamma: Union[float, torch.Tensor], gain: Union[float, torch.Tensor] = 1.0
) -> torch.Tensor:
r"""Perform gamma correction on an image.
The input image is expected to be in the range of [0, 1].
Args:
input (torch.Tensor): Image to be adjusted in the shape of :math:`(*, N)`.
gamma (Union[float, torch.Tensor]): Non negative real number, same as γ\gammaγ in the equation.
gamma larger than 1 make the shadows darker, while gamma smaller than 1 make
dark regions lighter.
gain (Union[float, torch.Tensor], optional): The constant multiplier. Default 1.
Return:
torch.Tenor: Adjusted image in the shape of :math:`(*, N)`.
Example:
>>> x = torch.ones(1, 1, 3, 3)
>>> adjust_gamma(x, 1.0, 2.0)
tensor([[[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]]]])
>>> x = torch.ones(2, 5, 3, 3)
>>> y1 = torch.ones(2) * 1.0
>>> y2 = torch.ones(2) * 2.0
>>> adjust_gamma(x, y1, y2).shape
torch.Size([2, 5, 3, 3])
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(gamma, (float, torch.Tensor)):
raise TypeError(f"The gamma should be a positive float or torch.Tensor. Got {type(gamma)}")
if not isinstance(gain, (float, torch.Tensor)):
raise TypeError(f"The gain should be a positive float or torch.Tensor. Got {type(gain)}")
if isinstance(gamma, float):
gamma = torch.tensor([gamma])
if isinstance(gain, float):
gain = torch.tensor([gain])
gamma = gamma.to(input.device).to(input.dtype)
gain = gain.to(input.device).to(input.dtype)
if (gamma < 0.0).any():
raise ValueError(f"Gamma must be non-negative. Got {gamma}")
if (gain < 0.0).any():
raise ValueError(f"Gain must be non-negative. Got {gain}")
for _ in input.shape[1:]:
gamma = torch.unsqueeze(gamma, dim=-1)
gain = torch.unsqueeze(gain, dim=-1)
# Apply the gamma correction
x_adjust: torch.Tensor = gain * torch.pow(input, gamma)
# Truncate between pixel values
out: torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0)
return out
def adjust_contrast(input: torch.Tensor, contrast_factor: Union[float, torch.Tensor]) -> torch.Tensor:
r"""Adjust Contrast of an image.
This implementation aligns OpenCV, not PIL. Hence, the output differs from TorchVision.
The input image is expected to be in the range of [0, 1].
Args:
input (torch.Tensor): Image to be adjusted in the shape of :math:`(*, N)`.
contrast_factor (Union[float, torch.Tensor]): Contrast adjust factor per element
in the batch. 0 generates a completely black image, 1 does not modify
the input image while any other non-negative number modify the
brightness by this factor.
Return:
torch.Tensor: Adjusted image in the shape of :math:`(*, N)`.
Example:
>>> x = torch.ones(1, 1, 3, 3)
>>> adjust_contrast(x, 0.5)
tensor([[[[0.5000, 0.5000, 0.5000],
[0.5000, 0.5000, 0.5000],
[0.5000, 0.5000, 0.5000]]]])
>>> x = torch.ones(2, 5, 3, 3)
>>> y = torch.ones(2)
>>> adjust_contrast(x, y).shape
torch.Size([2, 5, 3, 3])
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(contrast_factor, (float, torch.Tensor)):
raise TypeError(f"The factor should be either a float or torch.Tensor. " f"Got {type(contrast_factor)}")
if isinstance(contrast_factor, float):
contrast_factor = torch.tensor([contrast_factor])
contrast_factor = contrast_factor.to(input.device).to(input.dtype)
if (contrast_factor < 0).any():
raise ValueError(f"Contrast factor must be non-negative. Got {contrast_factor}")
for _ in input.shape[1:]:
contrast_factor = torch.unsqueeze(contrast_factor, dim=-1)
# Apply contrast factor to each channel
x_adjust: torch.Tensor = input * contrast_factor
# Truncate between pixel values
out: torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0)
return out
def adjust_brightness(input: torch.Tensor, brightness_factor: Union[float, torch.Tensor]) -> torch.Tensor:
r"""Adjust Brightness of an image.
This implementation aligns OpenCV, not PIL. Hence, the output differs from TorchVision.
The input image is expected to be in the range of [0, 1].
Args:
input (torch.Tensor): image to be adjusted in the shape of :math:`(*, N)`.
brightness_factor (Union[float, torch.Tensor]): Brightness adjust factor per element
in the batch. 0 does not modify the input image while any other number modify the
brightness.
Return:
torch.Tensor: Adjusted image in the shape of :math:`(*, N)`.
Example:
>>> x = torch.ones(1, 1, 3, 3)
>>> adjust_brightness(x, 1.)
tensor([[[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]]]])
>>> x = torch.ones(2, 5, 3, 3)
>>> y = torch.ones(2)
>>> adjust_brightness(x, y).shape
torch.Size([2, 5, 3, 3])
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(brightness_factor, (float, torch.Tensor)):
raise TypeError(f"The factor should be either a float or torch.Tensor. " f"Got {type(brightness_factor)}")
if isinstance(brightness_factor, float):
brightness_factor = torch.tensor([brightness_factor])
brightness_factor = brightness_factor.to(input.device).to(input.dtype)
for _ in input.shape[1:]:
brightness_factor = torch.unsqueeze(brightness_factor, dim=-1)
# Apply brightness factor to each channel
x_adjust: torch.Tensor = input + brightness_factor
# Truncate between pixel values
out: torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0)
return out
def _solarize(input: torch.Tensor, thresholds: Union[float, torch.Tensor] = 0.5) -> torch.Tensor:
r"""For each pixel in the image, select the pixel if the value is less than the threshold.
Otherwise, subtract 1.0 from the pixel.
Args:
input (torch.Tensor): image or batched images to solarize.
thresholds (float or torch.Tensor): solarize thresholds.
If int or one element tensor, input will be solarized across the whole batch.
If 1-d tensor, input will be solarized element-wise, len(thresholds) == len(input).
Returns:
torch.Tensor: Solarized images.
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(thresholds, (float, torch.Tensor)):
raise TypeError(f"The factor should be either a float or torch.Tensor. " f"Got {type(thresholds)}")
if isinstance(thresholds, torch.Tensor) and len(thresholds.shape) != 0:
assert (
input.size(0) == len(thresholds) and len(thresholds.shape) == 1
), f"threshholds must be a 1-d vector of shape ({input.size(0)},). Got {thresholds}"
# TODO: I am not happy about this line, but no easy to do batch-wise operation
thresholds = thresholds.to(input.device).to(input.dtype)
thresholds = torch.stack([x.expand(*input.shape[1:]) for x in thresholds])
return torch.where(input < thresholds, input, 1.0 - input)
def solarize(
input: torch.Tensor,
thresholds: Union[float, torch.Tensor] = 0.5,
additions: Optional[Union[float, torch.Tensor]] = None,
) -> torch.Tensor:
r"""For each pixel in the image less than threshold.
We add 'addition' amount to it and then clip the pixel value to be between 0 and 1.0.
The value of 'addition' is between -0.5 and 0.5.
Args:
input (torch.Tensor): image tensor with shapes like :math:`(B, C, H, W)` to solarize.
thresholds (float or torch.Tensor): solarize thresholds.
If int or one element tensor, input will be solarized across the whole batch.
If 1-d tensor, input will be solarized element-wise, len(thresholds) == len(input).
additions (optional, float or torch.Tensor): between -0.5 and 0.5. Default None.
If None, no addition will be performed.
If int or one element tensor, same addition will be added across the whole batch.
If 1-d tensor, additions will be added element-wisely, len(additions) == len(input).
Returns:
torch.Tensor: The solarized images with shape :math:`(B, C, H, W)`.
Example:
>>> x = torch.rand(1, 4, 3, 3)
>>> out = solarize(x, thresholds=0.5, additions=0.)
>>> out.shape
torch.Size([1, 4, 3, 3])
>>> x = torch.rand(2, 4, 3, 3)
>>> thresholds = torch.tensor([0.8, 0.7])
>>> out = solarize(x, thresholds)
>>> out.shape
torch.Size([2, 4, 3, 3])
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(thresholds, (float, torch.Tensor)):
raise TypeError(f"The factor should be either a float or torch.Tensor. " f"Got {type(thresholds)}")
if isinstance(thresholds, float):
thresholds = torch.tensor(thresholds)
if additions is not None:
if not isinstance(additions, (float, torch.Tensor)):
raise TypeError(f"The factor should be either a float or torch.Tensor. " f"Got {type(additions)}")
if isinstance(additions, float):
additions = torch.tensor(additions)
assert torch.all(
(additions < 0.5) * (additions > -0.5)
), f"The value of 'addition' is between -0.5 and 0.5. Got {additions}."
if isinstance(additions, torch.Tensor) and len(additions.shape) != 0:
assert (
input.size(0) == len(additions) and len(additions.shape) == 1
), f"additions must be a 1-d vector of shape ({input.size(0)},). Got {additions}"
# TODO: I am not happy about this line, but no easy to do batch-wise operation
additions = additions.to(input.device).to(input.dtype)
additions = torch.stack([x.expand(*input.shape[1:]) for x in additions])
input = input + additions
input = input.clamp(0.0, 1.0)
return _solarize(input, thresholds)
def posterize(input: torch.Tensor, bits: Union[int, torch.Tensor]) -> torch.Tensor:
r"""Reduce the number of bits for each color channel.
Non-differentiable function, torch.uint8 involved.
Args:
input (torch.Tensor): image tensor with shapes like :math:`(B, C, H, W)` to posterize.
bits (int or torch.Tensor): number of high bits. Must be in range [0, 8].
If int or one element tensor, input will be posterized by this bits.
If 1-d tensor, input will be posterized element-wisely, len(bits) == input.shape[1].
If n-d tensor, input will be posterized element-channel-wisely, bits.shape == input.shape[:len(bits.shape)]
Returns:
torch.Tensor: Image with reduced color channels with shape :math:`(B, C, H, W)`.
Example:
>>> x = torch.rand(1, 6, 3, 3)
>>> out = posterize(x, bits=8)
>>> torch.testing.assert_allclose(x, out)
>>> x = torch.rand(2, 6, 3, 3)
>>> bits = torch.tensor([0, 8])
>>> posterize(x, bits).shape
torch.Size([2, 6, 3, 3])
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(bits, (int, torch.Tensor)):
raise TypeError(f"bits type is not an int or torch.Tensor. Got {type(bits)}")
if isinstance(bits, int):
bits = torch.tensor(bits)
# TODO: find a better way to check boundaries on tensors
# if not torch.all((bits >= 0) * (bits <= 8)) and bits.dtype == torch.int:
# raise ValueError(f"bits must be integers within range [0, 8]. Got {bits}.")
# TODO: Make a differentiable version
# Current version:
# Ref: https://github.com/open-mmlab/mmcv/pull/132/files#diff-309c9320c7f71bedffe89a70ccff7f3bR19
# Ref: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py#L222
# Potential approach: implementing kornia.LUT with floating points
# https://github.com/albumentations-team/albumentations/blob/master/albumentations/augmentations/functional.py#L472
def _left_shift(input: torch.Tensor, shift: torch.Tensor):
return ((input * 255).to(torch.uint8) * (2 ** shift)).to(input.dtype) / 255.0
def _right_shift(input: torch.Tensor, shift: torch.Tensor):
return (input * 255).to(torch.uint8) / (2 ** shift).to(input.dtype) / 255.0
def _posterize_one(input: torch.Tensor, bits: torch.Tensor):
# Single bits value condition
if bits == 0:
return torch.zeros_like(input)
if bits == 8:
return input.clone()
bits = 8 - bits
return _left_shift(_right_shift(input, bits), bits)
if len(bits.shape) == 0 or (len(bits.shape) == 1 and len(bits) == 1):
return _posterize_one(input, bits)
res = []
if len(bits.shape) == 1:
input = _to_bchw(input)
assert (
bits.shape[0] == input.shape[0]
), f"Batch size must be equal between bits and input. Got {bits.shape[0]}, {input.shape[0]}."
for i in range(input.shape[0]):
res.append(_posterize_one(input[i], bits[i]))
return torch.stack(res, dim=0)
assert (
bits.shape == input.shape[: len(bits.shape)]
), f"Batch and channel must be equal between bits and input. Got {bits.shape}, {input.shape[:len(bits.shape)]}."
_input = input.view(-1, *input.shape[len(bits.shape) :])
_bits = bits.flatten()
for i in range(input.shape[0]):
res.append(_posterize_one(_input[i], _bits[i]))
return torch.stack(res, dim=0).reshape(*input.shape)
def sharpness(input: torch.Tensor, factor: Union[float, torch.Tensor]) -> torch.Tensor:
r"""Apply sharpness to the input tensor.
Implemented Sharpness function from PIL using torch ops. This implementation refers to:
https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py#L326
Args:
input (torch.Tensor): image tensor with shapes like (C, H, W) or (B, C, H, W) to sharpen.
factor (float or torch.Tensor): factor of sharpness strength. Must be above 0.
If float or one element tensor, input will be sharpened by the same factor across the whole batch.
If 1-d tensor, input will be sharpened element-wisely, len(factor) == len(input).
Returns:
torch.Tensor: Sharpened image or images with shape :math:`(B, C, H, W)`.
Example:
>>> _ = torch.manual_seed(0)
>>> sharpness(torch.randn(1, 1, 5, 5), 0.5)
tensor([[[[-1.1258, -1.1524, -0.2506, -0.4339, 0.8487],
[ 0.6920, -0.1580, -1.0576, 0.1765, -0.1577],
[ 1.4437, 0.1998, 0.1799, 0.6588, -0.1435],
[-0.1116, -0.3068, 0.8381, 1.3477, 0.0537],
[ 0.6181, -0.4128, -0.8411, -2.3160, -0.1023]]]])
"""
input = _to_bchw(input)
if not isinstance(factor, torch.Tensor):
factor = torch.tensor(factor, device=input.device, dtype=input.dtype)
if len(factor.size()) != 0:
assert factor.shape == torch.Size([input.size(0)]), (
"Input batch size shall match with factor size if factor is not a 0-dim tensor. "
f"Got {input.size(0)} and {factor.shape}"
)
kernel = (
torch.tensor([[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=input.dtype, device=input.device)
.view(1, 1, 3, 3)
.repeat(input.size(1), 1, 1, 1)
/ 13
)
# This shall be equivalent to depthwise conv2d:
# Ref: https://discuss.pytorch.org/t/depthwise-and-separable-convolutions-in-pytorch/7315/2
degenerate = torch.nn.functional.conv2d(input, kernel, bias=None, stride=1, groups=input.size(1))
degenerate = torch.clamp(degenerate, 0.0, 1.0)
# For the borders of the resulting image, fill in the values of the original image.
mask = torch.ones_like(degenerate)
padded_mask = torch.nn.functional.pad(mask, [1, 1, 1, 1])
padded_degenerate = torch.nn.functional.pad(degenerate, [1, 1, 1, 1])
result = torch.where(padded_mask == 1, padded_degenerate, input)
if len(factor.size()) == 0:
return _blend_one(result, input, factor)
return torch.stack([_blend_one(result[i], input[i], factor[i]) for i in range(len(factor))])
def _blend_one(input1: torch.Tensor, input2: torch.Tensor, factor: torch.Tensor) -> torch.Tensor:
r"""Blend two images into one.
Args:
input1 (torch.Tensor): image tensor with shapes like :math:`(H, W)` or :math:`(D, H, W)`.
input2 (torch.Tensor): image tensor with shapes like :math:`(H, W)` or :math:`(D, H, W)`.
factor (torch.Tensor): factor 0-dim tensor.
Returns:
torch.Tensor: image tensor with the batch in the zero position.
"""
assert isinstance(input1, torch.Tensor), f"`input1` must be a tensor. Got {input1}."
assert isinstance(input2, torch.Tensor), f"`input1` must be a tensor. Got {input2}."
if isinstance(factor, torch.Tensor):
assert len(factor.size()) == 0, f"Factor shall be a float or single element tensor. Got {factor}."
if factor == 0.0:
return input1
if factor == 1.0:
return input2
diff = (input2 - input1) * factor
res = input1 + diff
if factor > 0.0 and factor < 1.0:
return res
return torch.clamp(res, 0, 1)
def _build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (torch.cumsum(histo, 0) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = torch.cat([torch.zeros(1, device=lut.device, dtype=lut.dtype), lut[:-1]])
# Clip the counts to be in range. This is done
# in the C code for image.point.
return torch.clamp(lut, 0, 255)
# Code taken from: https://github.com/pytorch/vision/pull/796
def _scale_channel(im: torch.Tensor) -> torch.Tensor:
r"""Scale the data in the channel to implement equalize.
Args:
input (torch.Tensor): image tensor with shapes like :math:`(H, W)` or :math:`(D, H, W)`.
Returns:
torch.Tensor: image tensor with the batch in the zero position.
"""
min_ = im.min()
max_ = im.max()
if min_.item() < 0.0 and not torch.isclose(min_, torch.tensor(0.0, dtype=min_.dtype)):
raise ValueError(f"Values in the input tensor must greater or equal to 0.0. Found {min_.item()}.")
if max_.item() > 1.0 and not torch.isclose(max_, torch.tensor(1.0, dtype=max_.dtype)):
raise ValueError(f"Values in the input tensor must lower or equal to 1.0. Found {max_.item()}.")
ndims = len(im.shape)
if ndims not in (2, 3):
raise TypeError(f"Input tensor must have 2 or 3 dimensions. Found {ndims}.")
im = im * 255
# Compute the histogram of the image channel.
histo = _torch_histc_cast(im, bins=256, min=0, max=255)
# For the purposes of computing the step, filter out the nonzeros.
nonzero_histo = torch.reshape(histo[histo != 0], [-1])
step = (torch.sum(nonzero_histo) - nonzero_histo[-1]) // 255
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
if step == 0:
result = im
else:
# can't index using 2d index. Have to flatten and then reshape
result = torch.gather(_build_lut(histo, step), 0, im.flatten().long())
result = result.reshape_as(im)
return result / 255.0
def equalize(input: torch.Tensor) -> torch.Tensor:
r"""Apply equalize on the input tensor.
Implements Equalize function from PIL using PyTorch ops based on uint8 format:
https://github.com/tensorflow/tpu/blob/5f71c12a020403f863434e96982a840578fdd127/models/official/efficientnet/autoaugment.py#L355
Args:
input (torch.Tensor): image tensor to equalize with shapes like :math:`(C, H, W)` or :math:`(B, C, H, W)`.
Returns:
torch.Tensor: Equalized image tensor with shape :math:`(B, C, H, W)`.
Example:
>>> _ = torch.manual_seed(0)
>>> x = torch.rand(1, 2, 3, 3)
>>> equalize(x)
tensor([[[[0.4963, 0.7682, 0.0885],
[0.1320, 0.3074, 0.6341],
[0.4901, 0.8964, 0.4556]],
<BLANKLINE>
[[0.6323, 0.3489, 0.4017],
[0.0223, 0.1689, 0.2939],
[0.5185, 0.6977, 0.8000]]]])
"""
input = _to_bchw(input)
res = []
for image in input:
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
scaled_image = torch.stack([_scale_channel(image[i, :, :]) for i in range(len(image))])
res.append(scaled_image)
return torch.stack(res)
def equalize3d(input: torch.Tensor) -> torch.Tensor:
r"""Equalizes the values for a 3D volumetric tensor.
Implements Equalize function for a sequence of images using PyTorch ops based on uint8 format:
https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py#L352
Args:
input (torch.Tensor): image tensor with shapes like :math:`(C, D, H, W)` or :math:`(B, C, D, H, W)` to equalize.
Returns:
torch.Tensor: Equalized volume with shape :math:`(B, C, D, H, W)`.
"""
input = _to_bcdhw(input)
res = []
for volume in input:
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
scaled_input = torch.stack([_scale_channel(volume[i, :, :, :]) for i in range(len(volume))])
res.append(scaled_input)
return torch.stack(res)
def invert(input: torch.Tensor, max_val: torch.Tensor = torch.tensor(1.0)) -> torch.Tensor:
r"""Inverts the values of an input tensor by its maximum value.
Args:
input (torch.Tensor): The input tensor to invert with an arbitatry shape.
max_val (torch.Tensor): The expected maximum value in the input tensor. The shape has to
according to the input tensor shape, or at least has to work with broadcasting. Default: 1.0.
Example:
>>> img = torch.rand(1, 2, 4, 4)
>>> invert(img).shape
torch.Size([1, 2, 4, 4])
>>> img = 255. * torch.rand(1, 2, 3, 4, 4)
>>> invert(img, torch.tensor(255.)).shape
torch.Size([1, 2, 3, 4, 4])
>>> img = torch.rand(1, 3, 4, 4)
>>> invert(img, torch.tensor([[[[1.]]]])).shape
torch.Size([1, 3, 4, 4])
"""
assert isinstance(input, torch.Tensor), f"Input is not a torch.Tensor. Got: {type(input)}"
assert isinstance(max_val, torch.Tensor), f"max_val is not a torch.Tensor. Got: {type(max_val)}"
return max_val.to(input.dtype) - input
class AdjustSaturation(nn.Module):
r"""Adjust color saturation of an image.
The input image is expected to be an RGB image in the range of [0, 1].
Args:
saturation_factor (Union[float, torch.Tensor]): How much to adjust the saturation. 0 will give a black
and white image, 1 will give the original image while 2 will enhance the saturation by a factor of 2.
Shape:
- Input: Image/Tensor to be adjusted in the shape of :math:`(*, 3, H, W)`.
- Output: Adjusted image in the shape of :math:`(*, 3, H, W)`.
Example:
>>> x = torch.ones(1, 3, 3, 3)
>>> AdjustSaturation(2.)(x)
tensor([[[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]],
<BLANKLINE>
[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]],
<BLANKLINE>
[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]]]])
>>> x = torch.ones(2, 3, 3, 3)
>>> y = torch.ones(2)
>>> out = AdjustSaturation(y)(x)
>>> torch.nn.functional.mse_loss(x, out)
tensor(0.)
"""
def __init__(self, saturation_factor: Union[float, torch.Tensor]) -> None:
super(AdjustSaturation, self).__init__()
self.saturation_factor: Union[float, torch.Tensor] = saturation_factor
def forward(self, input: torch.Tensor) -> torch.Tensor:
return adjust_saturation(input, self.saturation_factor)
class AdjustHue(nn.Module):
r"""Adjust hue of an image.
The input image is expected to be an RGB image in the range of [0, 1].
Args:
hue_factor (Union[float, torch.Tensor]): How much to shift the hue channel. Should be in [-PI, PI]. PI
and -PI give complete reversal of hue channel in HSV space in positive and negative
direction respectively. 0 means no shift. Therefore, both -PI and PI will give an
image with complementary colors while 0 gives the original image.
Shape:
- Input: Image/Tensor to be adjusted in the shape of :math:`(*, 3, H, W)`.
- Output: Adjusted image in the shape of :math:`(*, 3, H, W)`.
Example:
>>> x = torch.ones(1, 3, 3, 3)
>>> AdjustHue(3.141516)(x)
tensor([[[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]],
<BLANKLINE>
[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]],
<BLANKLINE>
[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]]]])
>>> x = torch.ones(2, 3, 3, 3)
>>> y = torch.ones(2) * 3.141516
>>> AdjustHue(y)(x).shape
torch.Size([2, 3, 3, 3])
"""
def __init__(self, hue_factor: Union[float, torch.Tensor]) -> None:
super(AdjustHue, self).__init__()
self.hue_factor: Union[float, torch.Tensor] = hue_factor
def forward(self, input: torch.Tensor) -> torch.Tensor:
return adjust_hue(input, self.hue_factor)
class AdjustGamma(nn.Module):
r"""Perform gamma correction on an image.
The input image is expected to be in the range of [0, 1].
Args:
gamma (Union[float, torch.Tensor]): Non negative real number, same as γ\gammaγ in the equation.
gamma larger than 1 make the shadows darker, while gamma smaller than 1 make
dark regions lighter.
gain (Union[float, torch.Tensor], optional): The constant multiplier. Default 1.
Shape:
- Input: Image to be adjusted in the shape of :math:`(*, N)`.
- Output: Adjusted image in the shape of :math:`(*, N)`.
Example:
>>> x = torch.ones(1, 1, 3, 3)
>>> AdjustGamma(1.0, 2.0)(x)
tensor([[[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]]]])
>>> x = torch.ones(2, 5, 3, 3)
>>> y1 = torch.ones(2) * 1.0
>>> y2 = torch.ones(2) * 2.0
>>> AdjustGamma(y1, y2)(x).shape
torch.Size([2, 5, 3, 3])
"""
def __init__(self, gamma: Union[float, torch.Tensor], gain: Union[float, torch.Tensor] = 1.0) -> None:
super(AdjustGamma, self).__init__()
self.gamma: Union[float, torch.Tensor] = gamma
self.gain: Union[float, torch.Tensor] = gain
def forward(self, input: torch.Tensor) -> torch.Tensor:
return adjust_gamma(input, self.gamma, self.gain)
class AdjustContrast(nn.Module):
r"""Adjust Contrast of an image.
This implementation aligns OpenCV, not PIL. Hence, the output differs from TorchVision.
The input image is expected to be in the range of [0, 1].
Args:
contrast_factor (Union[float, torch.Tensor]): Contrast adjust factor per element
in the batch. 0 generates a completely black image, 1 does not modify
the input image while any other non-negative number modify the
brightness by this factor.
Shape:
- Input: Image/Input to be adjusted in the shape of :math:`(*, N)`.
- Output: Adjusted image in the shape of :math:`(*, N)`.
Example:
>>> x = torch.ones(1, 1, 3, 3)
>>> AdjustContrast(0.5)(x)
tensor([[[[0.5000, 0.5000, 0.5000],
[0.5000, 0.5000, 0.5000],
[0.5000, 0.5000, 0.5000]]]])
>>> x = torch.ones(2, 5, 3, 3)
>>> y = torch.ones(2)
>>> AdjustContrast(y)(x).shape
torch.Size([2, 5, 3, 3])
"""
def __init__(self, contrast_factor: Union[float, torch.Tensor]) -> None:
super(AdjustContrast, self).__init__()
self.contrast_factor: Union[float, torch.Tensor] = contrast_factor
def forward(self, input: torch.Tensor) -> torch.Tensor:
return adjust_contrast(input, self.contrast_factor)
class AdjustBrightness(nn.Module):
r"""Adjust Brightness of an image.
This implementation aligns OpenCV, not PIL. Hence, the output differs from TorchVision.
The input image is expected to be in the range of [0, 1].
Args:
brightness_factor (Union[float, torch.Tensor]): Brightness adjust factor per element
in the batch. 0 does not modify the input image while any other number modify the
brightness.
Shape:
- Input: Image/Input to be adjusted in the shape of :math:`(*, N)`.
- Output: Adjusted image in the shape of :math:`(*, N)`.
Example:
>>> x = torch.ones(1, 1, 3, 3)
>>> AdjustBrightness(1.)(x)
tensor([[[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]]]])
>>> x = torch.ones(2, 5, 3, 3)
>>> y = torch.ones(2)
>>> AdjustBrightness(y)(x).shape
torch.Size([2, 5, 3, 3])
"""
def __init__(self, brightness_factor: Union[float, torch.Tensor]) -> None:
super(AdjustBrightness, self).__init__()
self.brightness_factor: Union[float, torch.Tensor] = brightness_factor
def forward(self, input: torch.Tensor) -> torch.Tensor:
return adjust_brightness(input, self.brightness_factor)
class Invert(nn.Module):
r"""Inverts the values of an input tensor by its maximum value.
Args:
input (torch.Tensor): The input tensor to invert with an arbitatry shape.
max_val (torch.Tensor): The expected maximum value in the input tensor. The shape has to
according to the input tensor shape, or at least has to work with broadcasting. Default: 1.0.
Example:
>>> img = torch.rand(1, 2, 4, 4)
>>> Invert()(img).shape
torch.Size([1, 2, 4, 4])
>>> img = 255. * torch.rand(1, 2, 3, 4, 4)
>>> Invert(torch.tensor(255.))(img).shape
torch.Size([1, 2, 3, 4, 4])
>>> img = torch.rand(1, 3, 4, 4)
>>> Invert(torch.tensor([[[[1.]]]]))(img).shape
torch.Size([1, 3, 4, 4])
"""
def __init__(self, max_val: torch.Tensor = torch.tensor(1.0)) -> None:
super(Invert, self).__init__()
if not isinstance(max_val, nn.Parameter):
self.register_buffer("max_val", max_val)
else:
self.max_val = max_val
def forward(self, input: torch.Tensor) -> torch.Tensor:
return invert(input, self.max_val)
| 37.755952 | 132 | 0.606101 |
b2d071fe6103517635db7532a7bb05d01f8d3629 | 2,999 | py | Python | testQA.py | yachee-gupta/Factoid-based-Question-Answer-Chatbot | edb3f58f5ecd417942b62142f73dbdc2b78c7fdc | [
"MIT"
] | 45 | 2018-04-04T04:45:17.000Z | 2021-11-29T22:21:37.000Z | testQA.py | yachee-gupta/Factoid-based-Question-Answer-Chatbot | edb3f58f5ecd417942b62142f73dbdc2b78c7fdc | [
"MIT"
] | 4 | 2018-10-01T17:12:57.000Z | 2018-10-02T01:20:25.000Z | testQA.py | yachee-gupta/Factoid-based-Question-Answer-Chatbot | edb3f58f5ecd417942b62142f73dbdc2b78c7fdc | [
"MIT"
] | 18 | 2018-04-04T04:45:45.000Z | 2022-02-16T07:07:34.000Z | from DocumentRetrievalModel import DocumentRetrievalModel
from ProcessedQuestion import ProcessedQuestion
from StanfordDataset import StanfordDataset
from nltk.tokenize import word_tokenize
import csv
import math
def computeAccuracy(topic,sd = StanfordDataset()):
testPara = sd.getParagraph(topic)
drm = DocumentRetrievalModel(testPara,True,True)
result = []
res = [[0,0],[0,0],[0,0],[0,0]]
devData =sd.getTopic(topic)
for index in range(0,len(devData['paragraphs'])):
p = devData['paragraphs'][index]
for qNo in range(0,len(p['qas'])):
pq = ProcessedQuestion(p['qas'][qNo]['question'],True,False,True)
index = 0
if pq.aType == 'PERSON':
index = 0
elif pq.aType == 'DATE':
index = 1
elif pq.aType == 'LOCATION':
index = 2
else:
index = 3
res[index][0] += 1
r = drm.query(pq)
answers = []
for ans in p['qas'][qNo]['answers']:
answers.append(ans['text'].lower())
r = r.lower()
isMatch = False
for rt in word_tokenize(r):
#print(rt,word_tokenize(ans) for ans in answers)
if [rt in word_tokenize(ans) for ans in answers].count(True) > 0:
isMatch = True
res[index][1] += 1
break
#if isMatch:
# print(pq.question,r,str(answers))
result.append((index, qNo, pq.question, r, str(answers),isMatch))
noOfResult = len(result)
correct = [r[5] for r in result].count(True)
if noOfResult == 0:
accuracy = -1
else:
accuracy = correct/noOfResult
#return (result,accuracy)
#return {"Topic":topic,"No of Ques":noOfResult,"Correct Retrieval":correct,"whoAccu":res[0][1]/(res[0][0]+1),"whenAccu":res[1][1]/(res[1][0]+1),"whereAccu":res[2][1]/(res[2][0]+1),"summarizationAccu":res[3][1]/(res[3][0]+1),"OverallAccuracy":accuracy}
return {"Topic":topic,"No of Ques":noOfResult,"Correct Retrieval":correct,"OverallAccuracy":round(accuracy*100,2)}
def runAll():
sd = StanfordDataset()
toCSV = []
total = len(sd.titles)
index = 1
tA = 0
for title in sd.titles:
print("Testing all questions for \"" + title + "\"")
d=computeAccuracy(title)
if d["No of Ques"] == 0:
continue
tA += d['OverallAccuracy']
print(d)
print(str(index) + "/" + str(total) + ":",d['OverallAccuracy'],"/",tA/index)
toCSV.append(d)
index += 1
print("OverallAccuracy : ",tA/total)
keys = toCSV[0].keys()
with open('accuracy.csv', 'w') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(toCSV)
print("Written the accuracy measure in accuracy.csv file. Done")
runAll() | 35.282353 | 255 | 0.560854 |
e48aac964678939d8a791493fb6e0243dc8e3c6d | 8,511 | py | Python | dragonflow/conf/df_common_params.py | anlaneg/dragonflow | f684fd721cb953f4d0320725d708e79c9f35ef6c | [
"Apache-2.0"
] | null | null | null | dragonflow/conf/df_common_params.py | anlaneg/dragonflow | f684fd721cb953f4d0320725d708e79c9f35ef6c | [
"Apache-2.0"
] | null | null | null | dragonflow/conf/df_common_params.py | anlaneg/dragonflow | f684fd721cb953f4d0320725d708e79c9f35ef6c | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import portbindings
from oslo_config import cfg
from dragonflow._i18n import _
df_opts = [
cfg.IPOpt('remote_db_ip',
default='127.0.0.1',
deprecated_for_removal=True,
help=_('The remote db server ip address')),
cfg.PortOpt('remote_db_port',
default=4001,
deprecated_for_removal=True,
help=_('The remote db server port')),
#北向database主机及端口列表
cfg.ListOpt('remote_db_hosts',
default=['$remote_db_ip:$remote_db_port'],
help=_('Remote DB cluster host:port pairs.')),
cfg.StrOpt('nb_db_class',
default='etcd_nb_db_driver',
help=_('The driver to use for the NB database')),
#配置本主机的ip地址
cfg.IPOpt('local_ip',
default='127.0.0.1',
help=_('Local host VTEP IP')),
cfg.IPOpt('management_ip',
default='127.0.0.1',
help=_('Local host management IP')),
#指明tunnel的封装方式
cfg.ListOpt('tunnel_types',
default=['geneve', 'vxlan', 'gre'],
help=_("The encapsulation types for the tunnels")),
cfg.BoolOpt('enable_dpdk',
default=False,
help=_("Enable dpdk")),
cfg.ListOpt('apps_list',
default=['l2', 'l3_proactive', 'dhcp'],
help=_('List of openflow applications classes to load')),
#集成桥名称
cfg.StrOpt('integration_bridge', default='br-int',
help=_("Integration bridge to use. "
"Do not change this parameter unless you have a good "
"reason to. This is the name of the OVS integration "
"bridge. There is one per hypervisor. The integration "
"bridge acts as a virtual 'patch bay'. All VM VIFs are "
"attached to this bridge and then 'patched' according "
"to their network connectivity.")),
cfg.BoolOpt('use_centralized_ipv6_DHCP',
default=False,
help=_("Enable IPv6 DHCP by using DHCP agent")),
cfg.BoolOpt('enable_df_pub_sub',
default=False,
help=_("Enable use of Dragonflow built-in pub/sub")),
cfg.StrOpt('pub_sub_driver',
default='zmq_pubsub_driver',
help=_('Drivers to use for the Dragonflow pub/sub')),
cfg.BoolOpt('enable_neutron_notifier',
default=False,
help=_('Enable notifier for Dragonflow controller sending '
'data to neutron server')),
cfg.StrOpt('neutron_notifier',
default='nb_api_neutron_notifier_driver',
help=_('Notifier for the Dragonflow controller events')),
#指定switch后端驱动
cfg.StrOpt('switch_backend',
default='vswitch_backend_driver',
help=_('Backend switch drivers to use')),
#指定可用的publishers ip列表
cfg.ListOpt('publishers_ips',
default=['$local_ip'],
help=_('List of the Neutron Server Publisher IPs.')),
cfg.PortOpt('publisher_port',
default=8866,
help=_('Neutron Server Publishers port')),
#指明发布协议
cfg.StrOpt('publisher_transport',
default='tcp',
help=_('Neutron Server Publishers transport protocol')),
cfg.StrOpt('publisher_bind_address',
default='*',
help=_('Neutron Server Publishers bind address')),
cfg.IntOpt(
'publisher_timeout',
default=300,
help=_('Publisher idle timeout before it is removed from the table')
),
cfg.IntOpt(
'db_sync_time',
default=120,
help=_('Min periodically db comparison time')
),
cfg.IntOpt(
'publisher_rate_limit_timeout',
default=180,
help=_(
'Limit update of publishers\' table timestamp to '
'$publisher_rate_limit_count per this many seconds.'
)
),
cfg.IntOpt(
'publisher_rate_limit_count',
default=1,
help=_(
'Limit update of publishers\' table timestamp to '
'this many times per $publisher_rate_limit_timeout seconds.'
)
),
cfg.FloatOpt('monitor_table_poll_time',
default=30,
help=_('Poll monitored tables every this number of seconds')),
cfg.BoolOpt('enable_selective_topology_distribution',
default=False,
help=_('When enabled, each controller will get only the part '
'of the topology relevant to it.')),
cfg.StrOpt(
'ovsdb_local_address',
default='/usr/local/var/run/openvswitch/db.sock',
help=_('local controller connect to the ovsdb server socket address')
),
cfg.IntOpt('distributed_lock_ttl',
default=120,
help=_('The TTL of the distributed lock. The lock will be '
'reset if it is timeout.')),
cfg.StrOpt("vif_type",
default=portbindings.VIF_TYPE_OVS,
help=_("Type of VIF to be used for ports valid values are"
"(%(ovs)s, %(vhostuser)s) default %(ovs)s") % {
"ovs": portbindings.VIF_TYPE_OVS,
"vhostuser": portbindings.VIF_TYPE_VHOST_USER},
choices=[portbindings.VIF_TYPE_OVS,
portbindings.VIF_TYPE_VHOST_USER]),
cfg.StrOpt("vhost_sock_dir",
default="/var/run/openvswitch",
help=_("The directory in which vhost virtio socket"
"is created by all the vswitch daemons")),
cfg.IntOpt(
'service_down_time',
default=80,
help=_('This time(in seconds) should be at least thrice of '
'report_interval, to be sure the service is really down.')
),
cfg.IntOpt(
'report_interval',
default=25,
help=_('Time(in seconds) interval between two heartbeats')
),
cfg.IntOpt('neutron_listener_report_interval',
default=25,
help=_('Neutron report heart beat every this number in seconds'
'plus a random delay, which should be no more than'
'neutron_listener_report_delay')),
cfg.IntOpt('neutron_listener_report_delay',
default=10,
help=_('The max delay in seconds for Neutron to report heart'
'beat to df-db')),
cfg.StrOpt('external_host_ip',
help=_("Compute node external IP")),
cfg.BoolOpt('auto_detect_port_behind_port',
default=False,
help=_("Automatically detect port-behind-port scenarios, "
"e.g., amphora, or macvlan")),
cfg.StrOpt('datapath_layout_path',
help=_("Path to datapath layout configuration"),
default="/etc/neutron/dragonflow_datapath_layout.yaml"),
cfg.BoolOpt('write_datapath_allocation',
help=_("Write the datapath allocation data to file?"),
default=True),
cfg.StrOpt('datapath_allocation_output_path',
help=_("Path to output the datapath allocation data"),
default=("/var/run/dragonflow/"
"dragonflow_datapath_allocation.json")),
cfg.BoolOpt('overwrite_datapath_allocation_output_path',
help=_("Overwrite datapath allocation data?"),
default=True),
# FIXME (dimak) rename to something simpler once all tables are
# auto-allocated.
cfg.IntOpt('datapath_autoalloc_table_offset',
default=201,
help=_('Start offset for new datapath application tables')),
]
def register_opts():
cfg.CONF.register_opts(df_opts, 'df')
def list_opts():
return {'df': df_opts}
| 42.343284 | 79 | 0.585713 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.