max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
orbitdeterminator/doppler/utils/utils.py | DewanshiDewan/orbitdeterminator | 158 | 12758851 | <reponame>DewanshiDewan/orbitdeterminator<filename>orbitdeterminator/doppler/utils/utils.py
import numpy as np
from scipy.integrate import odeint # Orbit propagation
from scipy.optimize import fsolve # For solving TDoA
from sgp4.api import Satrec
from astropy import units as u
from astropy.time import Time
from astropy.coordinates import EarthLocation, ITRS, ICRS, TEME, CartesianDifferential, CartesianRepresentation
from orbitdeterminator.doppler.utils.constants import *
def range_range_rate(x_sat:np.ndarray, x_obs:np.ndarray):
""" Get range and slant range rate (radial relative velocity component).
Vectorized.
Args:
x_sat (np.ndarray): satellite location (pos, vel).
x_obs (np.ndarray): observer location (pos, vel).
Returns:
r (np.ndarray): range.
rr (np.ndarray): range rate (slant range rate).
"""
if len(x_obs.shape) == 2: # Single observer (6, n)
einsum_format = 'ij,ij->j'
d = x_sat - x_obs # Difference
elif len(x_obs.shape) == 3: # Multiple observers (6,n,n_obs)
einsum_format = 'ijk,ijk->jk'
d = np.repeat(np.expand_dims(x_sat, axis = 2), x_obs.shape[2], axis = 2) - x_obs # Difference
#d = x_sat - x_obs # Difference
r = np.linalg.norm(d[0:3,], axis=0) # Range
l = d[0:3,] / np.linalg.norm(d[0:3,], axis=0) # Range unit vectors
rr = np.einsum(einsum_format, d[3:6,], l) # Radial range rate
return r.T, rr.T
def doppler_shift(x_sat:np.ndarray, x_obs:np.ndarray, f_ref:float, c:float):
""" Get Doppler shift value for the give satellite and observer vectors.
Vectorized.
Args:
x_sat (np.ndarray): satellite location (pos, vel).
x_obs (np.ndarray): observer location (pos, vel).
f_ref (float): reference frequency.
c (float): propagation speed.
Returns:
df (np.ndarray): frequency shift relative to reference frequenct df
"""
_, rv = range_range_rate(x_sat, x_obs)
df = rv / c * f_ref
return df
# Orbit derivative
def orbdyn_2body(x:np.ndarray, t:float, mu:float=3.986004418e14):
""" Orbital (x,y,z,x_dot,y_dot,z_dot) vector derivative
Args:
x (np.ndarray): state vector.
t (float): time.
Returns:
dxdt (np.ndarray): state vector time derivative.
"""
r = np.linalg.norm(x[0:3,], axis=0)
dxdt = np.zeros(x.shape)
dxdt[0:3,] = x[3:6,]
dxdt[3:6,] = -(mu/r**3) * x[0:3,]
return dxdt
def orbdyn_2body_stm(x:np.ndarray, t:float, mu:float=3.986004418e14):
""" Orbital (x,y,z,x_dot,y_dot,z_dot) vector and matrix derivative.
Phi_dot = A * Phi.
Args:
x (np.ndarray): state vector and flattened state transition matrix [x, Phi(:)]
Size: (6+6*6,): (42,).
t (float): time.
Returns:
dxdt (np.ndarray): state vector and state transition matrix time derivative.
"""
dxdt = np.zeros(x.shape)
r = np.linalg.norm(x[0:3,], axis=0)
dxdt[0:3,] = x[3:6,]
dxdt[3:6,] = (-mu / r**3) * x[0:3,]
A = get_matrix_A(x[0:3,], mu=mu) # (6,6,n)
if len(x.shape) == 1:
Phi = x[6:,].reshape((6, 6)) # (6,6)
Phi_dot = np.matmul(A, Phi)
dxdt[6:,] = Phi_dot.reshape((36))
else:
Phi = x[6:,].reshape((6, 6, -1)) # (6,6,n)
Phi_dot = np.einsum('ijl,jkl->ikl', A, Phi)
dxdt[6:,] = Phi_dot.reshape((36, -1))
return dxdt
def get_matrix_A(x:np.ndarray, mu:float=3.986004418e14):
""" Get A matrix (orbital x_dot = A*x). Vectorized.
Args:
x (np.ndarray): orbital state vector (Cartesian).
mu (np.ndarray): standard gravitational parameter. Defaults to 3.98e14 m^3/s^2.
Returns:
A (np.ndarray): A matrix. Size (x_dim, x_dim): (6,6).
"""
r = np.linalg.norm(x[0:3,], axis=0)
aa = -mu / r**3
b = 3 * mu / r**5
AA = np.array([
[aa + b*x[0,]**2, b*x[0,]*x[1,], b*x[0,]*x[2,]],
[b*x[0,]*x[1,], aa + b*x[1,]**2, b*x[1,]*x[2,]],
[b*x[0,]*x[2,], b*x[1,]*x[2,], aa + b*x[2,]**2,]
])
A_z = np.zeros(AA.shape) # Zero parts for A matrix
A_e = np.zeros(AA.shape) # Eye (upper right)
i = np.arange(AA.shape[0])
A_e[i, i, ] = 1
A = np.concatenate([
np.concatenate([A_z, A_e], axis=1),
np.concatenate([AA, A_z], axis=1)
], axis=0)
return A
def f_obs_range_rate(x_sat:np.ndarray, x_obs:np.ndarray):
""" Observation function for range rate.
Args:
x_sat (np.ndarray): set of satellite positions.
x_obs (np.ndarray): set of observer positions.
Returns:
rr (np.ndarray): range rate. Size (z_dim, n): (1, n)
H (np.ndarray): Partial of radial range rate w.r.t state vector.
Size (z_dim, x_dim, n): (1, 6, n).
"""
_, rr = range_range_rate(x_sat, x_obs)
H = get_matrix_range_rate_H(x_sat, x_obs)
if len(x_obs.shape) == 2:
rr = np.expand_dims(rr, axis=0)
return rr, H
def f_obs_x_sat(x_sat:np.ndarray, x_obs:np.ndarray=None):
""" Observation function for full state vector.
E.g. GPS measurement
Used for debugging.
Args:
x_sat (np.ndarray): set of satellite positions.
Returns:
x_sat (np.ndarray): satellite state vector.
H (np.ndarray): observation matrix (identity).
"""
H = np.expand_dims(np.eye(x_sat.shape[0]), axis=2)
H = np.repeat(H, x_sat.shape[1], axis=2)
return x_sat, H
def get_matrix_range_rate_H(x_sat:np.ndarray, x_obs:np.ndarray):
""" Obtain measurement Jacobian for range rate measurements. Vectorized.
Args:
x_sat (np.ndarray): set of satellite positions.
x_obs (np.ndarray): set of observer positions.
Returns:
H (np.ndarray): Partial of radial range rate w.r.t state vector.
Size (z_dim, x_dim, n): (1, 6, n).
"""
if len(x_obs.shape) == 2: # Single observer (6, n)
einsum_format = 'ij,ij->j'
d = x_sat - x_obs # Difference
elif len(x_obs.shape) == 3: # Multiple observers (6,n,n_obs)
einsum_format = 'ijk,ijk->jk'
d = np.repeat(np.expand_dims(x_sat, axis = 2), x_obs.shape[2], axis = 2) - x_obs # Difference
#d = x_sat - x_obs # Difference
r = np.linalg.norm(d[0:3,], axis=0) # Range
d_r = d / r # Temporary variable
H = d_r[[3,4,5,0,1,2],]
r_dot_v = np.einsum(einsum_format, d[0:3,], d[3:6]) # Dot product position, velocity
H[0:3,:] -= (d[0:3,] * r_dot_v) / r**3
if len(x_obs.shape) == 2: # Single observer (6, n)
H = np.expand_dims(H, axis=0)
elif len(x_obs.shape) == 3: # Multiple observers (6,n,n_obs)
H = np.transpose(H, (2, 0, 1))
return H # Transpose before return (H is a single row matrix)
def tdoa_objective_function(vars, *data):
""" Objective function for solving Time Differential of Arrival (TDoA).
0 = C * (TDoA + tau) - || x_sat-x_obs ||
Args:
vars (tuple): a tuple of unknowns - xyz satellite position and time offset
(x, y, z, t)
data (tuple): additional arguments - observer positions and TDoA measurements
(x_obs, tdoa)
Returns:
(tuple): tuple of objective function values
"""
x, y, z, tau = vars
x_sat = np.array([[x], [y], [z]], dtype=np.float64)
x_obs, tdoa = data
r = C*(tdoa + tau) - np.linalg.norm(x_obs - x_sat, axis=0)
return (r.item(0), r.item(1), r.item(2), r.item(3))
def get_tdoa_simulated(x_sat:np.ndarray, x_obs:np.ndarray, flag_tof:bool=False):
""" Get simulated Time Differential of Arrival measurements.
TODO: Take into account time of flight, right now it is instantaneous.
TODO: Flip range and tdoa arrays dimensions to be (n_measurements, n_stations)
Args:
x_sat (np.ndarray): set of satellite state vectors.
x_obs (np.ndarray): set of observer positions.
tof (bool): flag whether to simulate using time of flight (not currently implemented).
Returns:
tdoa (np.ndarray): set of simulated TDoA measurements.
tof (np.ndarray): set of simulate time of flights between the observer and the satellite.
"""
if flag_tof:
assert False, "Time of flight not implemented!"
else:
r, _ = range_range_rate(x_sat, x_obs)
tof = r / C
tdoa = tof - tof[0,:]
return tdoa, tof
def get_tdoa_simulated_r(r:np.ndarray):
""" Same as get_tdoa_simulated_r, but only range as argument.
TODO: Flip range and tdoa arrays dimensions to be (n_measurements, n_stations)
Args:
range(np.ndarray): set of observed ranges per station (n_stations, n_measurements).
Returns:
tdoa (np.ndarray): set of simulated TDoA measurements.
tof (np.ndarray): set of simulate time of flights between the observer and the satellite.
"""
tof = r / C
tdoa = tof - tof[0,:]
return tdoa, tof
def solve_tdoa(tdoa:np.ndarray, x_obs:np.ndarray):
""" Function to solve Time Differential of Arrival (TDoA) measurements.
Args:
tdoa (np.ndarray): array of TDoA measurements. TODO: Array dimensions.
TDoA array must include time differential for the reference station
even being zero.
x_obs (np.ndarray): array of observer positions (6, n, n_obs).
Returns:
p_sat (np.ndarray): array of multilaterated satellite positions.
tau (np.ndarray): array of time offsets for reference station
"""
n = x_obs.shape[1]
p_sat = np.zeros((3, n))
tau = np.zeros(n)
x_obs_mean = np.mean(x_obs, axis=2)
for i in range(n):
vars_0 = [x_obs_mean[0,i]*1.01, x_obs_mean[1,i]*1.01, x_obs_mean[2,i]*1.01, 5e-3]
data = (x_obs[0:3, i, :], tdoa[:, i])
result = fsolve(tdoa_objective_function, vars_0, args=data)
p_sat[:,i] = result[0:3]
tau[i] = result[3]
return p_sat, tau
def verify_sat_orbital(x_sat:np.ndarray, range_pos:np.ndarray, range_vel:np.ndarray):
""" Verifies whether given state vectors represent a valid orbital state.
This function is used to eliminate possible states that violate orbital constraints.
Args:
x_sat (np.ndarray): set of satellite positions.
range_r (np.ndarray): set of valid position vector norms.
range_v (np.ndarray): set of valid velocity vector norms.
Returns:
x_sat_ok (np.ndarray): set of satellite positions.
x_mask (np.ndarray): boolean array indicating the validity of satellite vector.
"""
r = np.linalg.norm(x_sat[0:3,], axis=0) # Norm of the position
v = np.linalg.norm(x_sat[3:6,], axis=0) # Norm of the velocity
r_mask = (r >= range_pos[0]) & (r <= range_pos[1])
v_mask = (v >= range_vel[0]) & (v <= range_vel[1])
x_mask = r_mask & v_mask
# x_mask = np.logical_and.reduce(r >= range_pos[0], r <= range_pos[1],
# v >= range_vel[0], v <= range_vel[1])
x_sat_ok = x_sat[:,x_mask]
return x_sat_ok, x_mask
def verify_sat_observer(x_sat:np.ndarray, x_obs:np.ndarray, range_range:np.ndarray):
""" Verifies whether the satellite is within the valid range from the observer.
This function is used to eliminate possible states that violate satellite-observer constraints.
Args:
x_sat (np.ndarray): set of satellite positions.
x_obs (np.ndarray): set of observer positions.
Returns:
x_sat_ok (np.ndarray): set of satellite positions.
x_mask (np.ndarray): boolean array indicating the validity of satellite vector.
"""
r, _ = range_range_rate(x_sat, x_obs)
x_mask = (r >= range_range[0]) & (r <= range_range[1])
x_sat_ok = x_sat[:,x_mask]
return x_sat_ok, x_mask
def herrick_gibbs(p_sat:np.ndarray, t:np.ndarray, angle_checks=True):
""" Herrick-Gibbs Initial Orbit Determination Method. Takes three positional observations and corresponding
timesteps and outpus full state vector estimate (position and velocity) for the middle measurement.
Reference: <NAME> - Fundamentals of Astrodynamics and Applications, 4th ed., p.461, 7.5.2 Herrick-Gibbs
Args:
p_sat (np.ndarray): set of satellite positions. Three close positions are required for the method to work.
t (np.ndarray): observation times
angle_checks (bool): flag whether on not to perform angle checks between position vectors
Returns:
x_2 (np.ndarray): estimated satellite state (position + velocity for the second observation)
"""
#print(f"Herrick-Gibbs")
error = None
tolerance_angle = 10.0/180.0*np.pi
r = np.linalg.norm(p_sat, axis=0) # Magnitude of the observed positions
# Sanity checks
#angle_checks = True
if angle_checks:
p = np.cross(p_sat[:,1], p_sat[:,2])
p_n = p / np.linalg.norm(p)
x_sat_1n = p_sat[:,0] / r[0]
#copa = np.arcsin(np.dot(p_n, x_sat_1n)) # Variable unused in original code
# Check whether the vectors are coplanar
if np.abs(np.dot(x_sat_1n, p_n)) > tolerance_angle:
error = f"Error: not coplanar {np.abs(np.dot(x_sat_1n, p_n))} > {tolerance_angle}"
# Calculate angle between vectors
theta_01 = np.arccos(np.dot(p_sat[:,0], p_sat[:,1]) / (np.linalg.norm(p_sat[:,0])*np.linalg.norm(p_sat[:,1])))
theta_12 = np.arccos(np.dot(p_sat[:,1], p_sat[:,2]) / (np.linalg.norm(p_sat[:,1])*np.linalg.norm(p_sat[:,2])))
if min(theta_01, theta_12) > tolerance_angle:
error = f"Error: angles {min(theta_01, theta_12)} > {tolerance_angle}"
# Herrick-Gibbs Initial Orbit Determination
dt_10, dt_20, dt_21 = t[1]-t[0], t[2]-t[0], t[2]-t[1]
term = np.array([ -dt_21 * (1.0/(dt_10*dt_20)) + MU/(12.0*r[0]**3),
(dt_21-dt_10) * (1.0/(dt_10*dt_21)) + MU/(12.0*r[1]**3),
dt_10 * (1.0/(dt_21*dt_20)) + MU/(12.0*r[2]**3),
])
#v_sat_1 = term[0]*p_sat[:,0] + term[1]*p_sat[:,1] + term[2]*p_sat[:,2]
v_sat_1 = np.sum(term*p_sat, axis=1)
x_sat_1 = np.concatenate([p_sat[:,1], v_sat_1])
return x_sat_1, error
def batch(
x_0: np.ndarray,
P_bar_0: np.ndarray,
R: np.ndarray,
z: np.ndarray,
t: np.ndarray,
x_obs: np.ndarray,
f_obs,
tolerance: float = 1e-8,
max_iterations: int = 1000
):
""" Batch estimation algorithm.
Reference: <NAME>, <NAME>, <NAME> - Statistical Orbit Determination,
Chapter 4.6, p. 196-197 - Computational Algorithm for the Batch Processor.
Args:
x_0 (np.ndarray): Initial state vector, shape (x_dim, 1).
P_bar_0 (np.ndarray): Initial uncertainty, shape (x_dim, x_dim).
R (np.ndarray): Measurement uncertainty, shape (z_dim, z_dim).
z (np.ndarray): Array of measurements, shape (z_dim, n).
t (np.ndarray): Array of time deltas, shape (n,).
x_obs (np.ndarray): Array of observer positions (x_dim, n).
f_obs (): observation function.
tolerance (float): convergence tolerance.
Return:
x_0 (np.ndarray): new estimate for the initial state vector.
"""
n = z.shape[1]
Phi_0 = np.eye(x_0.shape[0]) # Initial State Transition Matrix
x_hat_0 = np.zeros(x_0.shape) # Nominal trajectory update
x_bar_0 = np.zeros(x_0.shape) # Apriori estimate
W = np.linalg.inv(R)
W_vec = np.repeat(np.expand_dims(W, axis=2), n, axis=2)
error = 1
i = 0
singular = False
while(np.abs(error) > tolerance and i < max_iterations):
i += 1
# Check if initial uncertainty has been set up
if np.count_nonzero(P_bar_0) == 0:
L = np.zeros(x_0.shape[0], x_0.shape[0])
else:
L = np.linalg.inv(P_bar_0)
N = L.dot(x_bar_0)
# Propagate, flatten the stm and append to the state vector
x_Phi = np.transpose(odeint(orbdyn_2body_stm,
np.concatenate([x_0.squeeze(), Phi_0.flatten()]), t, args=(MU,)))
X = x_Phi[0:6,]
Phi = x_Phi[6:,].reshape((x_0.shape[0], x_0.shape[0], t.shape[0]))
# Calculate projected observations (projected measurements and H_tilde)
y, H_t = f_obs(X, x_obs)
dy = np.expand_dims(z - y, axis=1)
# Calculate H
H_k = np.einsum('ijl,jkl->ikl', H_t, Phi)
H_kt = np.transpose(H_k, axes=(1,0,2))
# Batch update
L += np.einsum('ijl,jkl,kml->im', H_kt, W_vec, H_k)
N += np.einsum('ijl,jkl,kml->im', H_kt, W_vec, dy)
temp = np.copy(x_hat_0)
try:
x_hat_0 = np.linalg.inv(L).dot(N)
except np.linalg.LinAlgError:
print("Singular matrix exception.")
singular = True
break
x_0 += + x_hat_0
x_bar_0 -= x_hat_0
error = np.abs(np.linalg.norm(temp - x_hat_0))
np.set_printoptions(precision=2)
output = {'num_it': i, 'singular': singular}
return x_0, output | 2.328125 | 2 |
openCVMiniProject.py | GroupOneSEED/github-slideshow | 0 | 12758852 | from time import sleep
from picamera import PiCamera as Camera
import cv2 as cv
import cv2.aruco as aruco
import numpy as np
camera = Camera(resolution=(1920, 1080), framerate=60)
def cameraCalibration():
camera.iso = 100
sleep(2)
camera.shutter_speed = camera.exposure_speed
camera.exposure_mode = 'off'
gain = camera.awb_gains
camera.awb_mode = 'off'
camera.awb_gains = gain
for i in range(3):
camera.capture('calibration.jpg')
return(gain)
def detectAruco(awb_gain):
camera.awb_gains = awb_gains
camera.capture('object.jpg')
img = cv.imread('object.jpg')
grayImg = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
arucoDictionary = aruco.Dictionary_get(aruco.DICT_6X6_250)
arucoParameters = aruco.DetectorParameters_create()
corners, ids, rejectedImgPoints = aruco.detectMarkers(grayImg, arucoDictionary, parameters=arucoParameters)
cornerList = list(corners)
bottomRightX = int(cornerList[0][0][2][0]) #need to check this
bottomLeftX = int(cornerList[0][0][3][0]) #need to check this
bottomRightY = int(cornerList[0][0][2][1]) #need to check this
bottomLeftY = int(cornerList[0][0][3][1]) #need to check this
centerX = (bottomRightX + bottomLeftX) / (2)
centerY = (bottomRightY + bottomLeftY) / (2)
if((centerX > 960) and (centerY < 540)):
return(0)
elif((centerX < 960) and (centerY < 540)):
return(1)
elif((centerX < 960) and (centerY > 540)):
return(2)
elif((centerX > 960) and (centerY > 540)):
return(3)
else:
return(4)
gain = cameraCalibration()
quadrant = detectAruco(gain)
| 2.71875 | 3 |
tmdb_mp4.py | MReptile/sickbeard_mp4_automator | 0 | 12758853 | import os
import sys
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import tempfile
import time
import logging
from tmdb_api import tmdb
from mutagen.mp4 import MP4, MP4Cover
from extensions import valid_output_extensions, valid_poster_extensions, tmdb_api_key
class tmdb_mp4:
def __init__(self, imdbid, tmdbid=False, original=None, language='en', logger=None):
if logger:
self.log = logger
else:
self.log = logging.getLogger(__name__)
if tmdbid:
self.log.debug("TMDB ID: %s." % tmdbid)
else:
self.log.debug("IMDB ID: %s." % imdbid)
if tmdbid is False and imdbid.startswith('tt') is not True:
imdbid = 'tt' + imdbid
self.log.debug("Correcting imdbid to %s." % imdbid)
self.imdbid = imdbid
self.original = original
for i in range(3):
try:
tmdb.configure(tmdb_api_key, language=language)
self.movie = tmdb.Movie(imdbid)
self.HD = None
self.title = self.movie.get_title()
self.genre = self.movie.get_genres()
self.shortdescription = self.movie.get_tagline()
self.description = self.movie.get_overview()
self.date = self.movie.get_release_date()
# Generate XML tags for Actors/Writers/Directors/Producers
self.xml = self.xmlTags()
break
except Exception as e:
self.log.exception("Failed to connect to tMDB, trying again in 20 seconds.")
time.sleep(20)
def writeTags(self, mp4Path, artwork=True, thumbnail=False):
self.log.info("Tagging file: %s." % mp4Path)
ext = os.path.splitext(mp4Path)[1][1:]
if ext not in valid_output_extensions:
self.log.error("File is not the correct format.")
sys.exit()
video = MP4(mp4Path)
try:
video.delete()
except IOError:
self.log.debug("Unable to clear original tags, attempting to proceed.")
video["\xa9nam"] = self.title # Movie title
video["desc"] = self.shortdescription # Short description
video["ldes"] = self.description # Long description
video["\xa9day"] = self.date # Year
video["stik"] = [9] # Movie iTunes category
if self.HD is not None:
video["hdvd"] = self.HD
if self.genre is not None:
genre = None
for g in self.genre:
if genre is None:
genre = g['name']
break
# else:
# genre += ", " + g['name']
video["\xa9gen"] = genre # Genre(s)
video["----:com.apple.iTunes:iTunMOVI"] = self.xml # XML - see xmlTags method
rating = self.rating()
if rating is not None:
video["----:com.apple.iTunes:iTunEXTC"] = rating
if artwork:
path = self.getArtwork(mp4Path)
if path is not None:
cover = open(path, 'rb').read()
if path.endswith('png'):
video["covr"] = [MP4Cover(cover, MP4Cover.FORMAT_PNG)] # png poster
else:
video["covr"] = [MP4Cover(cover, MP4Cover.FORMAT_JPEG)] # jpeg poster
if self.original:
video["\xa9too"] = "MDH:" + os.path.basename(self.original)
else:
video["\xa9too"] = "MDH:" + os.path.basename(mp4Path)
for i in range(3):
try:
self.log.info("Trying to write tags.")
video.save()
self.log.info("Tags written successfully.")
break
except IOError as e:
self.log.info("Exception: %s" % e)
self.log.exception("There was a problem writing the tags. Retrying.")
time.sleep(5)
def rating(self):
ratings = {'G': '100',
'PG': '200',
'PG-13': '300',
'R': '400',
'NC-17': '500'}
output = None
mpaa = self.movie.get_mpaa_rating()
if mpaa in ratings:
numerical = ratings[mpaa]
output = 'mpaa|' + mpaa.capitalize() + '|' + numerical + '|'
return str(output)
def setHD(self, width, height):
if width >= 1900 or height >= 1060:
self.HD = [2]
elif width >= 1260 or height >= 700:
self.HD = [1]
else:
self.HD = [0]
def xmlTags(self):
# constants
header = "<?xml version=\"1.0\" encoding=\"UTF-8\"?><!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\"><plist version=\"1.0\"><dict>\n"
castheader = "<key>cast</key><array>\n"
writerheader = "<key>screenwriters</key><array>\n"
directorheader = "<key>directors</key><array>\n"
producerheader = "<key>producers</key><array>\n"
subfooter = "</array>\n"
footer = "</dict></plist>\n"
output = StringIO()
output.write(header)
# Write actors
output.write(castheader)
for a in self.movie.get_cast()[:5]:
if a is not None:
output.write("<dict><key>name</key><string>%s</string></dict>\n" % a['name'].encode('ascii', 'ignore'))
output.write(subfooter)
# Write screenwriters
output.write(writerheader)
for w in self.movie.get_writers()[:5]:
if w is not None:
output.write("<dict><key>name</key><string>%s</string></dict>\n" % w['name'].encode('ascii', 'ignore'))
output.write(subfooter)
# Write directors
output.write(directorheader)
for d in self.movie.get_directors()[:5]:
if d is not None:
output.write("<dict><key>name</key><string>%s</string></dict>\n" % d['name'].encode('ascii', 'ignore'))
output.write(subfooter)
# Write producers
output.write(producerheader)
for p in self.movie.get_producers()[:5]:
if p is not None:
output.write("<dict><key>name</key><string>%s</string></dict>\n" % p['name'].encode('ascii', 'ignore'))
output.write(subfooter)
# Write final footer
output.write(footer)
return output.getvalue()
output.close()
# end xmlTags
def getArtwork(self, mp4Path, filename='cover'):
# Check for local artwork in the same directory as the mp4
extensions = valid_poster_extensions
poster = None
for e in extensions:
head, tail = os.path.split(os.path.abspath(mp4Path))
path = os.path.join(head, filename + os.extsep + e)
if (os.path.exists(path)):
poster = path
self.log.info("Local artwork detected, using %s." % path)
break
# Pulls down all the poster metadata for the correct season and sorts them into the Poster object
if poster is None:
try:
poster = urlretrieve(self.movie.get_poster("l"), os.path.join(tempfile.gettempdir(), "poster-%s.jpg" % self.imdbid))[0]
except Exception as e:
self.log.error("Exception while retrieving poster %s.", str(e))
poster = None
return poster
def main():
if len(sys.argv) > 2:
mp4 = str(sys.argv[1]).replace("\\", "\\\\").replace("\\\\\\\\", "\\\\")
imdb_id = str(sys.argv[2])
tmdb_mp4_instance = tmdb_mp4(imdb_id)
if os.path.splitext(mp4)[1][1:] in valid_output_extensions:
tmdb_mp4_instance.writeTags(mp4)
else:
print("Wrong file type")
if __name__ == '__main__':
main()
| 2.359375 | 2 |
tests/test_switch/test_switchshow.py | wardy3/mdssdk | 4 | 12758854 | import unittest
from tests.test_switch.vars import *
log = logging.getLogger(__name__)
class TestSwitchShow(unittest.TestCase):
def __init__(self, testName, sw):
super().__init__(testName)
self.switch = sw
def setUp(self) -> None:
log.debug(self.switch.version)
log.debug(self.switch.ipaddr)
self.commands = "show vsan usage"
def test_show(self):
log.debug("Output of show : " + str(self.commands))
log.debug(self.switch.show(self.commands))
self.skipTest("Needs to be fixed")
def test_show_rawtext(self):
log.debug("Output of show(raw text) : " + str(self.commands))
log.debug(self.switch.show(self.commands, True))
self.skipTest("Needs to be fixed")
def tearDown(self) -> None:
pass
| 2.703125 | 3 |
buildnotifylib/app_ui.py | hennr/buildnotify | 0 | 12758855 | <reponame>hennr/buildnotify
from time import strftime
from PyQt5 import QtCore
from PyQt5.QtWidgets import QWidget, QSystemTrayIcon
from buildnotifylib.app_menu import AppMenu
class AppUi(QtCore.QObject):
reload_data = QtCore.pyqtSignal()
def __init__(self, parent, conf, build_icons):
super(AppUi, self).__init__(parent)
self.widget = QWidget()
self.build_icons = build_icons
self.tray = QSystemTrayIcon(self.build_icons.for_status(None), self.widget)
self.tray.show()
self.app_menu = AppMenu(self.widget, conf, self.build_icons)
self.app_menu.reload_data.connect(self.reload_data)
self.tray.setContextMenu(self.app_menu.menu)
def update_projects(self, integration_status):
count = len(integration_status.get_failing_builds())
self.tray.setIcon(self.build_icons.for_aggregate_status(integration_status.get_build_status(), count))
self.app_menu.update(integration_status.get_projects())
self.tray.setToolTip("Last checked: " + strftime("%Y-%m-%d %H:%M:%S"))
| 2.296875 | 2 |
tests/sqlalchemy_test.py | Theelx/jsonpickle | 0 | 12758856 | <reponame>Theelx/jsonpickle
"""Test serializing sqlalchemy models"""
import unittest
from helper import SkippableTest
import jsonpickle
try:
import sqlalchemy as sqa
from sqlalchemy.ext import declarative
from sqlalchemy.orm import Session
HAS_SQA = True
except ImportError:
HAS_SQA = False
if HAS_SQA:
Base = declarative.declarative_base()
class Table(Base):
__tablename__ = 'table'
id = sqa.Column(sqa.Integer, primary_key=True)
name = sqa.Column(sqa.Text)
value = sqa.Column(sqa.Float)
class SQLAlchemyTestCase(SkippableTest):
def setUp(self):
"""Create a new sqlalchemy engine for the test"""
if HAS_SQA:
url = 'sqlite:///:memory:'
self.engine = sqa.create_engine(url)
Base.metadata.drop_all(self.engine)
Base.metadata.create_all(self.engine)
self.should_skip = False
else:
self.should_skip = True
def test_sqlalchemy_roundtrip_with_detached_session(self):
"""Test cloned SQLAlchemy objects detached from any session"""
if self.should_skip:
return self.skip('sqlalchemy is not installed')
expect = Table(name='coolness', value=11.0)
session = Session(bind=self.engine, expire_on_commit=False)
session.add(expect)
session.commit()
jsonstr = jsonpickle.dumps(expect)
actual = jsonpickle.loads(jsonstr)
# actual is a shadow object; it cannot be added to the same
# session otherwise sqlalchemy will detect an identity conflict.
# To make this work we use expire_on_commit=True so that sqlalchemy
# allows us to do read-only operations detached from any session.
self.assertEqual(expect.id, actual.id)
self.assertEqual(expect.name, actual.name)
self.assertEqual(expect.value, actual.value)
def test_sqlalchemy_roundtrip_with_two_sessions(self):
"""Test cloned SQLAlchemy objects attached to a secondary session"""
if self.should_skip:
return self.skip('sqlalchemy is not installed')
expect = Table(name='coolness', value=11.0)
session = Session(bind=self.engine, expire_on_commit=False)
session.add(expect)
session.commit()
jsonstr = jsonpickle.dumps(expect)
actual = jsonpickle.loads(jsonstr)
# actual is a shadow object; it cannot be added to the same
# session otherwise sqlalchemy will detect an identity conflict.
# To make this work we use expire_on_commit=True so that sqlalchemy
# allows us to do read-only operations detached from any session.
self.assertEqual(expect.id, actual.id)
self.assertEqual(expect.name, actual.name)
self.assertEqual(expect.value, actual.value)
def test_sqlalchemy_with_dynamic_table(self):
"""Test creating a table dynamically, per #180"""
if self.should_skip:
return self.skip('sqlalchemy is not installed')
meta = sqa.MetaData()
expect = sqa.Table(
'test',
meta,
sqa.Column('id', sqa.Integer()),
sqa.Column('text', sqa.Text()),
)
jsonstr = jsonpickle.dumps(expect)
actual = jsonpickle.loads(jsonstr)
self.assertEqual(expect.__class__, actual.__class__)
self.assertEqual(expect.name, actual.name)
# These must be unique instances
self.assertNotEqual(expect.metadata, actual.metadata)
# Columns names must exactly match
self.assertEqual(sorted(expect.columns.keys()), sorted(actual.columns.keys()))
# As should the types
self.assertEqual(expect.c.id.name, actual.c.id.name)
self.assertEqual(expect.c.id.type.__class__, actual.c.id.type.__class__)
self.assertEqual(expect.c.text.name, actual.c.text.name)
self.assertEqual(expect.c.text.type.__class__, actual.c.text.type.__class__)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SQLAlchemyTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main()
| 2.640625 | 3 |
api/vm/define/vm_define_nic.py | DigitalOzUT/esdc-ce | 0 | 12758857 | <reponame>DigitalOzUT/esdc-ce
from django.db.transaction import atomic
from api import status as scode
from api.utils.db import get_listitem
from api.task.response import SuccessTaskResponse, FailureTaskResponse
from api.vm.define.utils import is_vm_operational
from api.vm.define.api_views import VmDefineBaseView
from api.vm.define.serializers import VmDefineNicSerializer
from api.vm.messages import LOG_NIC_CREATE, LOG_NIC_UPDATE, LOG_NIC_DELETE
NIC_ID_MIN = 0
NIC_ID_MAX = 5
def _nic_params(fun):
"""Decorator for nic functions below"""
def wrap(view, vm, nic_id, *args, **kwargs):
if nic_id is None and view.diff:
return SuccessTaskResponse(view.request, view.get_diff(vm))
if view.active:
vm.revert_active(json_only=True)
if nic_id is None:
nic = vm.json_get_nics()
nics = None
kwargs['many'] = True
else:
nics, nic = get_listitem(view.request, vm.json_get_nics(), nic_id, name='VM NIC',
max_value=NIC_ID_MAX, min_value=NIC_ID_MIN)
return fun(view, vm, nic_id, nics, nic, *args, **kwargs)
return wrap
class VmDefineNicView(VmDefineBaseView):
def get_diff(self, vm):
"""Show nic differences between active and in db json. Implies full and denies active vm_define_nic."""
def_current = VmDefineNicSerializer(self.request, vm, vm.json_get_nics(), nic_id=None, many=True).data
def_active = VmDefineNicSerializer(self.request, vm, vm.json_active_get_nics(), nic_id=None, many=True).data
return self._diff_lists(def_active, def_current)
# noinspection PyUnusedLocal
@_nic_params
def get(self, vm, nic_id, nics, nic, data, many=False):
"""Get VM nic definition"""
ser = VmDefineNicSerializer(self.request, vm, nic, nic_id=nic_id, many=many)
return SuccessTaskResponse(self.request, ser.data, vm=vm)
# noinspection PyUnusedLocal
@is_vm_operational
@atomic
@_nic_params
def post(self, vm, nic_id, nics, nic, data):
"""Create VM nic definition"""
ser = VmDefineNicSerializer(self.request, vm, nic_id=nic_id, data=data)
if ser.is_valid():
nics[nic_id] = ser.jsondata
vm.resolvers = ser.resolvers
vm.save_nics(nics, monitoring_ip=ser.get_monitoring_ip())
res = SuccessTaskResponse(self.request, ser.data,
status=scode.HTTP_201_CREATED, vm=vm,
detail='nic_id=' + str(nic_id + 1), detail_dict=ser.detail_dict(),
msg=LOG_NIC_CREATE)
ser.save_ip(res.data.get('task_id')) # Always save ip.vm
return res
return FailureTaskResponse(self.request, ser.errors, vm=vm)
@is_vm_operational
@atomic
@_nic_params
def put(self, vm, nic_id, nics, nic, data):
"""Update VM nic definition"""
ser = VmDefineNicSerializer(self.request, vm, nic.copy(), nic_id=nic_id, data=data, partial=True)
if ser.is_valid():
nics[nic_id].update(ser.jsondata)
vm.resolvers = ser.resolvers
vm.save_nics(nics, monitoring_ip=ser.get_monitoring_ip())
res = SuccessTaskResponse(self.request, ser.data, vm=vm,
detail='nic_id=' + str(nic_id + 1), detail_dict=ser.detail_dict(),
msg=LOG_NIC_UPDATE)
ser.update_ip(res.data.get('task_id')) # Always update ip.vm
return res
return FailureTaskResponse(self.request, ser.errors, vm=vm)
# noinspection PyUnusedLocal
@is_vm_operational
@atomic
@_nic_params
def delete(self, vm, nic_id, nics, nic, data):
"""Delete VM nic definition"""
ser = VmDefineNicSerializer(self.request, vm, nic)
del nics[nic_id]
vm.save_nics(nics, monitoring_ip=ser.get_monitoring_ip(delete=True))
res = SuccessTaskResponse(self.request, None, vm=vm,
detail='nic_id=' + str(nic_id + 1),
msg=LOG_NIC_DELETE)
ser.delete_ip(res.data.get('task_id')) # Set ip.vm to None
return res
| 1.859375 | 2 |
tests/whist/server/api/user/test_auth.py | Whist-Team/Whist-Server | 1 | 12758858 | <reponame>Whist-Team/Whist-Server
import unittest
from starlette.testclient import TestClient
from whist.server import app
from whist.server.database import db
class AuthTestCase(unittest.TestCase):
def setUp(self) -> None:
self.client = TestClient(app)
self.login_creds = {'username': 'marcel', 'password': '<PASSWORD>'}
_ = self.client.post(url='/user/create', json=self.login_creds)
def tearDown(self) -> None:
db.user.drop()
def test_auth_user(self):
response = self.client.post(url='/user/auth/', data=self.login_creds)
self.assertEqual(response.status_code, 200, msg=response.content)
self.assertTrue('token' in response.json())
def test_wrong_password(self):
response = self.client.post(url='/user/auth/',
data={'username': 'marcel', 'password': '<PASSWORD>'})
self.assertEqual(response.status_code, 401, msg=response.content)
self.assertFalse('token' in response.json())
def test_no_password(self):
response = self.client.post(url='/user/auth/', data={'username': 'marcel'})
self.assertEqual(response.status_code, 422, msg=response.content)
self.assertFalse('token' in response.json())
def test_no_username(self):
response = self.client.post(url='/user/auth/', data={'password': '<PASSWORD>'})
self.assertEqual(response.status_code, 422, msg=response.content)
self.assertFalse('token' in response.json())
| 2.625 | 3 |
tweetme.py | storymode7/tweet-me | 1 | 12758859 | import os
import json
import logging
from birdy.twitter import UserClient
logging.basicConfig(filename='tweetme.log', format='%(asctime)s %(message)s', level=logging.DEBUG)
def get_config_from_file(filename="config.json"):
"""
This function will check for the config.json file which holds the Twitter API
Tokens and Keys and will also give a user friendly message if they are
invalid. New file is created if not present in the project directory.
Returns False: if config.json is missing of has invalid configuration
Returns tuple (containing configurations): if config.json is present with
valid configuration
"""
if filename not in os.listdir():
with open(filename, mode='w') as f:
json.dump({
'consumer_key': 0,
'consumer_secret': 0,
'access_token': 0,
'access_token_secret': 0,
}, f)
return False
else:
with open(filename, mode='r') as f:
config = json.loads(f.read())
if 0 not in config.values():
return (
config["consumer_key"],
config["consumer_secret"],
config["access_token"],
config["access_token_secret"],
)
else:
return False
def get_next_tweet_from_file(tweets_file='tweets.txt', turn_file='next_tweet_index.txt'):
"""
This function reads Tweets file and Turn file and gets the next tweet.
Returns False: if tweets.txt is not present
Returns next Tweet: if valid tweets.txt is present
"""
if tweets_file not in os.listdir():
"""When tweets.txt is not present"""
with open(tweets_file, mode='w') as f:
f.write('Tweet :: URL\n')
with open(turn_file, mode='w') as f:
f.write('0')
return False
elif turn_file not in os.listdir():
"""When next_tweet_index.txt is not present, creates a new next_tweet_index.txt and writes 1 in it
and return the first tweet from tweets.txt"""
with open(turn_file, mode='w') as f:
f.write('1')
with open(tweets_file, mode='r') as f:
tweet_text = f.readline()
return tweet_text.split("::")
else:
"""When both files are present, check next_tweet_index.txt and use it's value as index to
find the next tweet from tweets.txt and write index + 1 in next_tweet_index.txt"""
with open(turn_file, mode='r') as f:
turn = int(f.readline())
with open(tweets_file, mode='r') as f:
tweets = f.readlines()
if len(tweets) <= turn:
turn = 0
with open(turn_file, mode='w') as f:
f.write(str(turn + 1))
return tweets[turn].split("::")
def manage_twitter_client():
"""
This function will create twitter client using configurations and send tweet.
"""
configError = (
"Please open config.json file located in the project directory and"
"replace the value '0' of all the tokens and keys in order to make "
"this bot work. Visit https://apps.twitter.com/ in order to get your "
"tokens and keys."
)
keys = get_config_from_file()
if not keys:
logging.error(configError)
else:
tweet = get_next_tweet_from_file()
if tweet:
client = UserClient(*keys)
response = client.api.statuses.update.post(status='{} {}'.format(tweet[0], tweet[1]))
logging.info(
'You tweet is out in the world.'
'Check it out https://twitter.com/{}/status/{}'.format(
response.data["user"]["screen_name"],
response.data["id_str"]
)
)
if __name__ == '__main__':
manage_twitter_client()
| 3.03125 | 3 |
feature_raster/Transformers/CommonIndex.py | hectorpatino/feature_raster | 0 | 12758860 | <gh_stars>0
from sklearn.base import BaseEstimator, TransformerMixin
import numpy as np
# to avoid the warning o generating a RuntimeWarning
np.seterr(divide='ignore', invalid='ignore')
def normalize_difference_indexes_minus_plus(band_1, band_2):
""" Uses the general formula of all normalize difference indexes
where the numerator is a substraction and the denominator is and
adition
Parameters
__________
band_1: numpy array
band_2: numpy array
"""
numerator = np.subtract(band_2, band_1, dtype=np.float64)
denominator = np.add(band_1, band_2, dtype=np.float64)
index = np.where(band_2 + band_1 == 0.,
0.,
numerator / denominator)
return index
class CommonIndex(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self):
return self
def transform(self, x):
# TODO x must be a dataframe
return x
| 2.6875 | 3 |
src/bpp/models/sloty/wydawnictwo_ciagle.py | iplweb/django-bpp | 1 | 12758861 | from decimal import Decimal
from .common import SlotMixin
class SlotKalkulator_Wydawnictwo_Ciagle_Prog1(SlotMixin):
"""
Artykuł z czasopisma z listy ministerialnej.
Dla roku 2017, 2018: punkty KBN >= 30
"""
def punkty_pkd(self, dyscyplina):
if self.ma_dyscypline(dyscyplina):
return self.original.punkty_kbn
def slot_dla_autora_z_dyscypliny(self, dyscyplina):
azd = len(self.autorzy_z_dyscypliny(dyscyplina))
if azd == 0:
return
return Decimal("1") / azd
def slot_dla_dyscypliny(self, dyscyplina):
if self.ma_dyscypline(dyscyplina):
return Decimal("1")
class SlotKalkulator_Wydawnictwo_Ciagle_Prog2(SlotMixin):
"""
Artykuł z czasopisma z listy ministerialnej.
Dla roku 2017-2018: punkty KBN 20 lub 25
"""
def punkty_pkd(self, dyscyplina):
if self.ma_dyscypline(dyscyplina):
pierwiastek = self.pierwiastek_k_przez_m(dyscyplina)
if pierwiastek is None:
return None
if self.liczba_k(dyscyplina) == 0:
return 0
return self.original.punkty_kbn * max(pierwiastek, Decimal("0.1"))
def slot_dla_autora_z_dyscypliny(self, dyscyplina):
if not self.ma_dyscypline(dyscyplina):
return
azd = len(self.autorzy_z_dyscypliny(dyscyplina))
if azd > 0:
return self.pierwiastek_k_przez_m(dyscyplina) * 1 / azd
def slot_dla_dyscypliny(self, dyscyplina):
if not self.ma_dyscypline(dyscyplina):
return
return self.pierwiastek_k_przez_m(dyscyplina)
class SlotKalkulator_Wydawnictwo_Ciagle_Prog3(SlotMixin):
"""
Artykuł z czasopisma z listy ministerialnej.
Dla roku 2017-2018: punkty KBN poniżej 20 lub 5
"""
def punkty_pkd(self, dyscyplina):
if self.ma_dyscypline(dyscyplina):
k_przez_m = self.k_przez_m(dyscyplina)
if k_przez_m is None:
return
if self.liczba_k(dyscyplina) == 0:
return 0
return self.original.punkty_kbn * max(k_przez_m, Decimal("0.1"))
def slot_dla_autora_z_dyscypliny(self, dyscyplina):
if not self.ma_dyscypline(dyscyplina):
return
return self.jeden_przez_wszyscy()
def slot_dla_dyscypliny(self, dyscyplina):
if not self.ma_dyscypline(dyscyplina):
return
return self.jeden_przez_wszyscy() * len(self.autorzy_z_dyscypliny(dyscyplina))
| 2.9375 | 3 |
tests/test_xopen.py | fanninpm/xopen | 0 | 12758862 | """
Tests for the xopen.xopen function
"""
import bz2
from contextlib import contextmanager
import functools
import gzip
import io
import itertools
import lzma
import os
from pathlib import Path
import shutil
import pytest
from xopen import xopen
# TODO this is duplicated in test_piped.py
TEST_DIR = Path(__file__).parent
CONTENT_LINES = ["Testing, testing ...\n", "The second line.\n"]
CONTENT = "".join(CONTENT_LINES)
extensions = ["", ".gz", ".bz2", ".xz"]
base = os.path.join(os.path.dirname(__file__), "file.txt")
files = [base + ext for ext in extensions]
@contextmanager
def disable_binary(tmp_path, binary_name):
"""
Find the location of the binary by its name, then set PATH to a directory that contains
the binary with permissions set to 000. If no suitable binary could be found,
PATH is set to an empty directory
"""
try:
binary_path = shutil.which(binary_name)
if binary_path:
shutil.copy(binary_path, tmp_path)
os.chmod(tmp_path / binary_name, 0)
path = os.environ["PATH"]
os.environ["PATH"] = str(tmp_path)
yield
finally:
os.environ["PATH"] = path
@pytest.fixture(params=extensions)
def ext(request):
return request.param
@pytest.fixture(params=files)
def fname(request):
return request.param
@pytest.fixture
def lacking_pigz_permissions(tmp_path):
with disable_binary(tmp_path, "pigz"):
yield
@pytest.fixture
def lacking_pbzip2_permissions(tmp_path):
with disable_binary(tmp_path, "pbzip2"):
yield
@pytest.fixture
def xopen_without_igzip(monkeypatch):
import xopen # xopen local overrides xopen global variable
monkeypatch.setattr(xopen, "igzip", None)
return xopen.xopen
def test_text(fname):
with xopen(fname, "rt") as f:
lines = list(f)
assert len(lines) == 2
assert lines[1] == "The second line.\n", fname
def test_binary(fname):
with xopen(fname, "rb") as f:
lines = list(f)
assert len(lines) == 2
assert lines[1] == b"The second line.\n", fname
def test_binary_no_isal_no_threads(fname, xopen_without_igzip):
with xopen_without_igzip(fname, "rb", threads=0) as f:
lines = list(f)
assert len(lines) == 2
assert lines[1] == b"The second line.\n", fname
def test_binary_no_isal(fname, xopen_without_igzip):
with xopen_without_igzip(fname, "rb", threads=1) as f:
lines = list(f)
assert len(lines) == 2
assert lines[1] == b"The second line.\n", fname
def test_no_context_manager_text(fname):
f = xopen(fname, "rt")
lines = list(f)
assert len(lines) == 2
assert lines[1] == "The second line.\n", fname
f.close()
assert f.closed
def test_no_context_manager_binary(fname):
f = xopen(fname, "rb")
lines = list(f)
assert len(lines) == 2
assert lines[1] == b"The second line.\n", fname
f.close()
assert f.closed
def test_bytes_path(fname):
path = fname.encode("utf-8")
with xopen(path, "rt") as f:
lines = list(f)
assert len(lines) == 2
assert lines[1] == "The second line.\n", fname
def test_readinto(fname):
content = CONTENT.encode("utf-8")
with xopen(fname, "rb") as f:
b = bytearray(len(content) + 100)
length = f.readinto(b)
assert length == len(content)
assert b[:length] == content
def test_detect_file_format_from_content(ext, tmp_path):
path = tmp_path / f"file.txt{ext}.test"
shutil.copy(TEST_DIR / f"file.txt{ext}", path)
with xopen(path, "rb") as fh:
assert fh.readline() == CONTENT_LINES[0].encode("utf-8")
def test_readline(fname):
first_line = CONTENT_LINES[0].encode("utf-8")
with xopen(fname, "rb") as f:
assert f.readline() == first_line
def test_readline_text(fname):
with xopen(fname, "r") as f:
assert f.readline() == CONTENT_LINES[0]
def test_next(fname):
with xopen(fname, "rt") as f:
_ = next(f)
line2 = next(f)
assert line2 == "The second line.\n", fname
def test_has_iter_method(ext, tmp_path):
path = tmp_path / f"out{ext}"
with xopen(path, mode="w") as f:
# Writing anything isn’t strictly necessary, but if we don’t, then
# pbzip2 causes a delay of one second
f.write("hello")
assert hasattr(f, "__iter__")
def test_iter_without_with(fname):
f = xopen(fname, "rt")
it = iter(f)
assert CONTENT_LINES[0] == next(it)
f.close()
@pytest.mark.parametrize("extension", [".gz", ".bz2"])
def test_partial_iteration_closes_correctly(extension, create_large_file):
class LineReader:
def __init__(self, file):
self.file = xopen(file, "rb")
def __iter__(self):
wrapper = io.TextIOWrapper(self.file)
yield from wrapper
large_file = create_large_file(extension)
f = LineReader(large_file)
next(iter(f))
f.file.close()
def test_nonexisting_file(ext):
with pytest.raises(IOError):
with xopen("this-file-does-not-exist" + ext):
pass # pragma: no cover
def test_write_to_nonexisting_dir(ext):
with pytest.raises(IOError):
with xopen("this/path/does/not/exist/file.txt" + ext, "w"):
pass # pragma: no cover
def test_invalid_mode(ext):
with pytest.raises(ValueError):
with xopen(TEST_DIR / f"file.txt.{ext}", mode="hallo"):
pass # pragma: no cover
def test_filename_not_a_string():
with pytest.raises(TypeError):
with xopen(123, mode="r"):
pass # pragma: no cover
def test_invalid_compression_level(tmp_path):
with pytest.raises(ValueError) as e:
with xopen(tmp_path / "out.gz", mode="w", compresslevel=17) as f:
f.write("hello") # pragma: no cover
assert "compresslevel must be" in e.value.args[0]
@pytest.mark.parametrize("ext", extensions)
def test_append(ext, tmp_path):
text = b"AB"
reference = text + text
path = tmp_path / f"the-file{ext}"
with xopen(path, "ab") as f:
f.write(text)
with xopen(path, "ab") as f:
f.write(text)
with xopen(path, "r") as f:
for appended in f:
pass
reference = reference.decode("utf-8")
assert appended == reference
@pytest.mark.parametrize("ext", extensions)
def test_append_text(ext, tmp_path):
text = "AB"
reference = text + text
path = tmp_path / f"the-file{ext}"
with xopen(path, "at") as f:
f.write(text)
with xopen(path, "at") as f:
f.write(text)
with xopen(path, "rt") as f:
for appended in f:
pass
assert appended == reference
@pytest.mark.timeout(5)
@pytest.mark.parametrize("extension", [".gz", ".bz2", ".xz"])
def test_truncated_file(extension, create_truncated_file):
truncated_file = create_truncated_file(extension)
with pytest.raises((EOFError, IOError)):
f = xopen(truncated_file, "r")
f.read()
f.close() # pragma: no cover
@pytest.mark.timeout(5)
@pytest.mark.parametrize("extension", [".gz", ".bz2", ".xz"])
def test_truncated_iter(extension, create_truncated_file):
truncated_file = create_truncated_file(extension)
with pytest.raises((EOFError, IOError)):
f = xopen(truncated_file, "r")
for line in f:
pass
f.close() # pragma: no cover
@pytest.mark.timeout(5)
@pytest.mark.parametrize("extension", [".gz", ".bz2", ".xz"])
def test_truncated_with(extension, create_truncated_file):
truncated_file = create_truncated_file(extension)
with pytest.raises((EOFError, IOError)):
with xopen(truncated_file, "r") as f:
f.read()
@pytest.mark.timeout(5)
@pytest.mark.parametrize("extension", [".gz", ".bz2", ".xz"])
def test_truncated_iter_with(extension, create_truncated_file):
truncated_file = create_truncated_file(extension)
with pytest.raises((EOFError, IOError)):
with xopen(truncated_file, "r") as f:
for line in f:
pass
def test_bare_read_from_gz():
hello_file = TEST_DIR / "hello.gz"
with xopen(hello_file, "rt") as f:
assert f.read() == "hello"
def test_read_no_threads(ext):
klasses = {
".bz2": bz2.BZ2File,
".gz": gzip.GzipFile,
".xz": lzma.LZMAFile,
"": io.BufferedReader,
}
klass = klasses[ext]
with xopen(TEST_DIR / f"file.txt{ext}", "rb", threads=0) as f:
assert isinstance(f, klass), f
def test_write_threads(tmp_path, ext):
path = tmp_path / f"out.{ext}"
with xopen(path, mode="w", threads=3) as f:
f.write("hello")
with xopen(path) as f:
assert f.read() == "hello"
def test_write_pigz_threads_no_isal(tmp_path, xopen_without_igzip):
path = tmp_path / "out.gz"
with xopen_without_igzip(path, mode="w", threads=3) as f:
f.write("hello")
with xopen_without_igzip(path) as f:
assert f.read() == "hello"
def test_write_no_threads(tmp_path, ext):
klasses = {
".bz2": bz2.BZ2File,
".gz": gzip.GzipFile,
".xz": lzma.LZMAFile,
"": io.BufferedWriter,
}
klass = klasses[ext]
with xopen(tmp_path / f"out.{ext}", "wb", threads=0) as f:
assert isinstance(f, io.BufferedWriter)
if ext:
assert isinstance(f.raw, klass), f
def test_write_gzip_no_threads_no_isal(tmp_path, xopen_without_igzip):
import gzip
with xopen_without_igzip(tmp_path / "out.gz", "wb", threads=0) as f:
assert isinstance(f.raw, gzip.GzipFile), f
def test_write_stdout():
f = xopen("-", mode="w")
print("Hello", file=f)
f.close()
# ensure stdout is not closed
print("Still there?")
def test_write_stdout_contextmanager():
# Do not close stdout
with xopen("-", mode="w") as f:
print("Hello", file=f)
# ensure stdout is not closed
print("Still there?")
def test_read_pathlib(fname):
path = Path(fname)
with xopen(path, mode="rt") as f:
assert f.read() == CONTENT
def test_read_pathlib_binary(fname):
path = Path(fname)
with xopen(path, mode="rb") as f:
assert f.read() == bytes(CONTENT, "ascii")
def test_write_pathlib(ext, tmp_path):
path = tmp_path / f"hello.txt{ext}"
with xopen(path, mode="wt") as f:
f.write("hello")
with xopen(path, mode="rt") as f:
assert f.read() == "hello"
def test_write_pathlib_binary(ext, tmp_path):
path = tmp_path / f"hello.txt{ext}"
with xopen(path, mode="wb") as f:
f.write(b"hello")
with xopen(path, mode="rb") as f:
assert f.read() == b"hello"
def test_falls_back_to_gzip_open(lacking_pigz_permissions):
with xopen(TEST_DIR / "file.txt.gz", "rb") as f:
assert f.readline() == CONTENT_LINES[0].encode("utf-8")
def test_falls_back_to_gzip_open_no_isal(lacking_pigz_permissions, xopen_without_igzip):
with xopen_without_igzip(TEST_DIR / "file.txt.gz", "rb") as f:
assert f.readline() == CONTENT_LINES[0].encode("utf-8")
def test_fals_back_to_gzip_open_write_no_isal(
lacking_pigz_permissions, xopen_without_igzip, tmp_path
):
tmp = tmp_path / "test.gz"
with xopen_without_igzip(tmp, "wb") as f:
f.write(b"hello")
assert gzip.decompress(tmp.read_bytes()) == b"hello"
def test_falls_back_to_bzip2_open(lacking_pbzip2_permissions):
with xopen(TEST_DIR / "file.txt.bz2", "rb") as f:
assert f.readline() == CONTENT_LINES[0].encode("utf-8")
def test_open_many_writers(tmp_path, ext):
files = []
# Because lzma.open allocates a lot of memory,
# open fewer files to avoid MemoryError on 32-bit architectures
n = 21 if ext == ".xz" else 61
for i in range(1, n):
path = tmp_path / f"{i:03d}.txt{ext}"
f = xopen(path, "wb", threads=2)
f.write(b"hello")
files.append(f)
for f in files:
f.close()
def test_override_output_format(tmp_path):
path = tmp_path / "test_gzip_compressed"
with xopen(path, mode="wb", format="gz") as f:
f.write(b"test")
test_contents = path.read_bytes()
assert test_contents.startswith(b"\x1f\x8b") # Gzip magic
assert gzip.decompress(test_contents) == b"test"
def test_override_output_format_unsupported_format(tmp_path):
path = tmp_path / "test_fairy_format_compressed"
with pytest.raises(ValueError) as error:
xopen(path, mode="wb", format="fairy")
error.match("not supported")
error.match("fairy")
def test_override_output_format_wrong_format(tmp_path):
path = tmp_path / "not_compressed"
path.write_text("I am not compressed.")
with pytest.raises(OSError): # BadGzipFile is a subclass of OSError
with xopen(path, "rt", format="gz") as opened_file:
opened_file.read()
# Test for threaded and non-threaded.
OPENERS = (xopen, functools.partial(xopen, threads=0))
@pytest.mark.parametrize(
["opener", "extension"], itertools.product(OPENERS, extensions)
)
def test_text_encoding_newline_passtrough(opener, extension, tmp_path):
# "Eén ree\nTwee reeën\n" latin-1 encoded with \r for as line separator.
encoded_text = b"E\xe9n ree\rTwee ree\xebn\r"
path = tmp_path / f"test.txt{extension}"
with opener(path, "wb") as f:
f.write(encoded_text)
with opener(path, "rt", encoding="latin-1", newline="\r") as f:
result = f.read()
assert result == "Eén ree\rTwee reeën\r"
@pytest.mark.parametrize(
["opener", "extension"], itertools.product(OPENERS, extensions)
)
def test_text_encoding_errors(opener, extension, tmp_path):
# "Eén ree\nTwee reeën\n" latin-1 encoded. This is not valid ascii.
encoded_text = b"E\xe9n ree\nTwee ree\xebn\n"
path = tmp_path / f"test.txt{extension}"
with opener(path, "wb") as f:
f.write(encoded_text)
with opener(path, "rt", encoding="ascii", errors="replace") as f:
result = f.read()
assert result == "E�n ree\nTwee ree�n\n"
| 2.484375 | 2 |
rcm/rcm_merge.py | breadcrumbbuilds/bcws-psu-research | 0 | 12758863 | # the -n is important on gdal_merge otherwise data gets stomped!
import os
sep = os.path.sep
s = ['5MCP19/1/20210710/rgb.bin',
'5MCP19/1/20210722/rgb.bin',
'5MCP19/1/20210714/rgb.bin',
'5MCP19/2/20210710/rgb.bin',
'5MCP19/2/20210722/rgb.bin',
'5MCP19/2/20210714/rgb.bin',
'5MCP18/1/20210718/rgb.bin',
'5MCP18/2/20210718/rgb.bin',
'5MCP13/1/20210715/rgb.bin',
'5MCP13/1/20210719/rgb.bin',
'5MCP13/2/20210715/rgb.bin',
'5MCP13/2/20210719/rgb.bin',
'5MCP7/1/20210716/rgb.bin',
'5MCP7/1/20210720/rgb.bin',
'5MCP7/1/20210708/rgb.bin',
'5MCP7/2/20210716/rgb.bin',
'5MCP7/2/20210720/rgb.bin',
'5MCP7/2/20210708/rgb.bin']
d = {}
# identify pairs to merge
for i in s:
w = i.split(sep)
beam = w[0] # beam mode
st = w[1] # dataset number
date = w[2] # date
key = beam + '_' + date# should be two sets per beam_date
if key not in d: d[key] = []
d[key].append(i)
c = []
for k in d:
print(k, d[k])
c += ['gdal_merge.py -n -o ' + k + '.bin -of ENVI -ot Float32 ' + (' '.join(d[k]))]
import multiprocessing as mp
def run(c):
return os.system(c)
def parfor(my_function, my_inputs, n_thread=mp.cpu_count()): # eval fxn in parallel, collect
pool = mp.Pool(n_thread)
result = pool.map(my_function, my_inputs)
return(result)
parfor(run, c, 4)
| 1.90625 | 2 |
flake8_pie/tests/test_pie784_celery_crontab_args.py | sbdchd/flake8-pie | 23 | 12758864 | from __future__ import annotations
import ast
import pytest
from flake8_pie import Flake8PieCheck
from flake8_pie.base import Error
from flake8_pie.pie784_celery_crontab_args import PIE784, _is_invalid_celery_crontab
from flake8_pie.tests.utils import to_errors
@pytest.mark.parametrize(
"code,expected",
[
(
"""
crontab(hour="0,12")
""",
PIE784(lineno=2, col_offset=0),
),
(
"""
crontab(hour="0,12", minute="*")
""",
None,
),
(
"""
crontab(hour="0,12", minute="*"),
""",
None,
),
(
"""
crontab(day_of_month="*", hour="0,12"),
""",
PIE784(lineno=2, col_offset=0),
),
(
"""
crontab(day_of_week="*", minute="*"),
""",
PIE784(lineno=2, col_offset=0),
),
(
"""
crontab(month_of_year="*", day_of_month="*", hour="0,12", minute="*"),
""",
PIE784(lineno=2, col_offset=0),
),
(
"""
crontab(),
""",
PIE784(lineno=2, col_offset=0),
),
(
"""
crontab(minute="*/5")
""",
None,
),
],
)
def test_celery_crontab_named_args(code: str, expected: Error | None) -> None:
"""
ensure we pass a explicit params to celery's crontab
see: https://github.com/celery/celery/blob/0736cff9d908c0519e07babe4de9c399c87cb32b/celery/schedules.py#L403
You must pass all the params below the level you are creating.
So if you pass hour, then you must pass minutes.
If you pass the day arg then you must provide hours and minutes, etc.
params: minute, hour, day_of_week, day_of_month, month_of_year
"""
node = ast.parse(code)
assert isinstance(node, ast.Module)
expected_errors = [expected] if expected else []
assert (
to_errors(Flake8PieCheck(node, filename="foo.py").run())
) == expected_errors, "missing a required argument"
@pytest.mark.parametrize(
"args,expected",
[
({"minute", "hour"}, False),
({"hour"}, True),
({"hour", "day_of_week"}, True),
({"minute", "hour", "day_of_week"}, False),
(
{
"minute",
"hour",
"day_of_week",
"day_of_month",
"month_of_year",
"another_random_arg",
},
False,
),
({"minute", "hour", "day_of_week", "day_of_month", "month_of_year"}, False),
],
)
def test_invalid_celery_crontab_kwargs(args: list[str], expected: bool) -> None:
kwargs = [ast.keyword(arg=arg, value=ast.Str(s="0,1")) for arg in args]
assert _is_invalid_celery_crontab(kwargs=kwargs) == expected
| 2.1875 | 2 |
src/infi/recipe/application_packager/installer/__init__.py | Infinidat/infi.recipe.application_packager | 2 | 12758865 | from __future__ import print_function
from infi.execute import execute
import os
import glob
import logging
import shutil
import platform
import hashlib
import stat
from contextlib import contextmanager
from six.moves.configparser import ConfigParser, NoOptionError
from tempfile import NamedTemporaryFile
log = logging.getLogger(__name__)
INSTALLER_USERDATA = os.path.join('SOFTWARE', 'Microsoft', 'Windows', 'CurrentVersion', 'Installer', 'UserData')
PYPI_HOSTS = ["127.0.0.1 pypi.infinidat.com",
"127.0.0.1 pypi",
"127.0.0.1 pypi.python.org", ]
HOSTS_FILE = os.path.join('/', 'etc', 'hosts') if os.name != 'nt' else \
os.path.join(os.environ.get("SystemRoot", r"C:\Windows"), "System32", "Drivers", "etc", "hosts")
CHMOD_755 = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH
from infi.recipe.application_packager.utils.execute import execute_assert_success
def get_pypi_addresses():
""":returns: a list of PyPI addresses being looked by easy_install and buildout"""
# TODO get the real list from pydistutils.cfg and buildout/default.cfg
return PYPI_HOSTS
@contextmanager
def prevent_access_to_pypi_servers():
with open(HOSTS_FILE, 'r') as fd:
content = fd.read()
try:
new_content = '\n'.join([content] + get_pypi_addresses())
with open(HOSTS_FILE, 'w') as fd:
fd.write(new_content)
log.info("Preventing access to pypi servers")
log.debug("Wrote {!r} to hosts file {}".format(new_content, HOSTS_FILE))
yield
finally:
with open(HOSTS_FILE, 'w') as fd:
fd.write(content)
log.info("Restoring access to pypi servers")
log.debug("Wrote original content {!r} for hosts file".format(content, HOSTS_FILE))
@contextmanager
def prevent_access_to_gcc():
from tempfile import mkdtemp
original_path = os.environ['PATH']
tempdir = mkdtemp('gcc')
fake_gcc = os.path.join(tempdir, 'gcc')
with open(fake_gcc, 'w') as fd:
fd.write("#!/bin/sh\nexit 1")
os.chmod(fake_gcc, CHMOD_755)
try:
os.environ['PATH'] = os.path.pathsep.join([tempdir, os.environ['PATH'], tempdir])
log.info("Preventing access to gcc")
log.debug("Setting PATH to {}".format(os.environ['PATH']))
yield
finally:
os.environ['PATH'] = original_path
log.debug("Restored PATH to {}".format(os.environ['PATH']))
class Installer(object):
package_extension = None
targetdir = None
executable_extension = None
def __init__(self, buildout_path='buildout.cfg'):
super(Installer, self).__init__()
self._buildout_path = os.path.abspath(buildout_path)
self._project_dir = os.path.dirname(self._buildout_path)
self._parser = ConfigParser()
self._parser.read(self._buildout_path)
@property
def product_name(self):
try:
return self._parser.get('project', 'product_name')
except NoOptionError:
return self._parser.get('project', 'name')
@property
def project_name(self):
return self.product_name.replace(' ', '-').replace('_', '-').lower()
@property
def package_name(self):
return self.project_name
@property
def company(self):
try:
return self._parser.get('project', 'company')
except NoOptionError:
return self._parser.get('project', 'None')
@property
def targetdir(self):
if os.name == 'nt':
return os.path.join(r'C:\Program Files', self.company, self.product_name)
return os.path.join(os.path.sep, 'opt', self.company.lower(), self.project_name)
def _format_executable(self, executable):
return "{}.{}".format(executable, self.executable_extension) if self.executable_extension else executable
def has_bootstrap_ocurred(self):
buildout_path = os.path.join(self.targetdir, 'bin', self._format_executable('buildout'))
buildout_exists = os.path.exists(buildout_path)
log.debug("{!r} exists: {}".format(buildout_path, buildout_exists))
return buildout_exists
def are_there_remainings_of_previous_installations(self):
filepaths = []
if os.path.exists(self.targetdir):
for root, dirs, files in os.walk(self.targetdir):
basedir = os.path.relpath(root, self.targetdir)
filepaths += [basedir + os.path.sep]
filepaths += [os.path.join(basedir, file) for file in files]
log.info("Files and directories under {!r}: {!r}".format(self.targetdir, filepaths))
return os.path.exists(self.targetdir)
def is_package_exists(self):
return len(self._get_packages()) > 0
def _get_packages(self):
packages = glob.glob(os.path.join(self._project_dir, 'parts', '*.{}'.format(self.package_extension)))
log.info("Found the following packages: {!r}".format(packages))
return packages
def get_package(self):
return self._get_packages()[0]
def create_package(self):
from ..utils import chdir
with chdir(os.path.dirname(self._buildout_path)):
python = os.path.join('bin', 'python{}'.format('.exe' if os.name == 'nt' else ''))
buildout_script = os.path.join('bin', 'buildout{}'.format('-script.py' if os.name == 'nt' else ''))
stdout = execute_assert_success([python, buildout_script, '-v', 'install', 'pack']).get_stdout()
log.debug('package created, stdout: {}'.format(stdout))
def is_product_installed(self):
raise NotImplementedError()
def install_package(self, with_custom_actions=True):
raise NotImplementedError()
def uninstall_package(self, with_custom_actions=True):
raise NotImplementedError()
class MsiInstaller(Installer):
package_extension = 'msi'
executable_extension = 'exe'
@property
def package_code(self):
return self._parser.get('project', 'upgrade_code')
@property
def package_code_formatted(self):
return self.package_code.strip('{}').replace('-', '').upper()
def _get_installed_product_from_registry(self):
from infi.registry import LocalComputer
registry = LocalComputer()
userdata = registry.local_machine[INSTALLER_USERDATA]
for user in [user for user in userdata.values() if os.path.join('Products') in user]:
for product in user['Products'].values():
display_name = product['InstallProperties'].values_store['DisplayName'].to_python_object()
log.debug("product found: {!r}".format(display_name))
if display_name == self.product_name:
log.debug("Product is indeed installed")
return product
log.debug("Product is not installed")
return None
def is_product_installed(self):
return self._get_installed_product_from_registry() is not None
def install_package(self, with_custom_actions=True):
import io
logfile = self.get_package() + '.install.log'
with open(logfile, 'w'):
pass
args = ['msiexec', '/i', self.get_package(), '/passive', '/l*vx', logfile]
if not with_custom_actions:
args.append("NO_CUSTOM_ACTIONS=1")
with prevent_access_to_pypi_servers():
try:
execute_assert_success(args)
finally:
with io.open(logfile, encoding='utf-16') as fd:
print(fd.read())
def uninstall_package(self, with_custom_actions=True):
import io
logfile = self.get_package() + '.uninstall.log'
with open(logfile, 'w'):
pass
properties = self._get_installed_product_from_registry()['InstallProperties'].values_store
uninstall_string = properties['UninstallString'].to_python_object()
args = uninstall_string.split() + ['/passive', '/l*vx', logfile]
if not with_custom_actions:
args.append("NO_CUSTOM_ACTIONS=1")
try:
execute_assert_success(args)
finally:
with io.open(logfile, encoding='utf-16') as fd:
print(fd.read())
class RpmInstaller(Installer):
package_extension = 'rpm'
def is_product_installed(self):
pid = execute_assert_success(['rpm', '-q', self.package_name], allowed_return_codes=[0, 1])
output = pid.get_stderr() + pid.get_stdout()
return b'not installed' not in output
def install_package(self, with_custom_actions=True):
env = os.environ.copy()
if not with_custom_actions:
env['NO_CUSTOM_ACTIONS'] = '1'
env['LIBPATH'] = '' # On AIX we don't want Python's LIBPATH for rpm
with prevent_access_to_pypi_servers(), prevent_access_to_gcc():
execute_assert_success(['rpm', '-Uvh', self.get_package()], env=env)
def uninstall_package(self, with_custom_actions=True):
env = os.environ.copy()
if not with_custom_actions:
env['NO_CUSTOM_ACTIONS'] = '1'
env['LIBPATH'] = '' # On AIX we don't want Python's LIBPATH for rpm
execute_assert_success(['rpm', '-e', self.package_name], env=env)
class DebInstaller(Installer):
package_extension = 'deb'
def is_product_installed(self):
output = execute_assert_success(["dpkg", "--list", self.package_name], allowed_return_codes=[0, 1]).get_stdout().splitlines()
return any([line.startswith(b'ii') and self.package_name in line.decode('ascii') for line in output])
def install_package(self, with_custom_actions=True):
env = os.environ.copy()
if not with_custom_actions:
env['NO_CUSTOM_ACTIONS'] = '1'
with prevent_access_to_pypi_servers(), prevent_access_to_gcc():
execute_assert_success(['dpkg', '-i', self.get_package()], env=env)
def uninstall_package(self, with_custom_actions=True):
env = os.environ.copy()
if not with_custom_actions:
env['NO_CUSTOM_ACTIONS'] = '1'
execute_assert_success(['dpkg', '-r', self.package_name], env=env)
class PkgInstaller(Installer):
package_extension = 'pkg.gz'
def __init__(self, *args, **kwargs):
super(PkgInstaller, self).__init__(*args, **kwargs)
admin_file_content = '\n'.join(['partial=nocheck',
'runlevel=nocheck',
'idepend=nocheck',
'rdepend=nocheck',
'setuid=nocheck',
'action=nocheck',
'partial=nocheck',
'conflict=nocheck',
'authentication=quit',
'instance=overwrite',
'basedir=default'])
self.admin_file = NamedTemporaryFile(mode='w')
self.admin_file.write(admin_file_content)
self.admin_file.flush()
os.fsync(self.admin_file.fileno())
def is_product_installed(self):
return 0 == execute(["pkginfo", self.package_name]).get_returncode()
def install_package(self, with_custom_actions=True):
response_file = NamedTemporaryFile(mode='w')
response_file.write("NO_CUSTOM_ACTIONS={}".format(int(not with_custom_actions)))
response_file.flush()
os.fsync(response_file.fileno())
with prevent_access_to_pypi_servers(), prevent_access_to_gcc():
zipped_package_name = self.get_package()
unzipped_package_name = zipped_package_name[:-3]
execute_assert_success('gunzip -c {} > {}'.format(zipped_package_name, unzipped_package_name), shell=True)
execute_assert_success(['pkgadd',
'-n',
'-a', self.admin_file.name,
'-r', response_file.name,
'-d', unzipped_package_name,
self.package_name])
def uninstall_package(self, with_custom_actions=True):
# with_custom_actions is actually ignored here. This flag is passed to the installer through the response file.
# Luckily, the preremove scripts also gets this info (it's saved somwhere in the os until the removal)
execute_assert_success(['pkgrm', '-n', '-a', self.admin_file.name, self.package_name], allowed_return_codes=[0,])
| 1.90625 | 2 |
regtests/typed/float32vec.py | ahakingdom/Rusthon | 622 | 12758866 | """simd float32vec"""
def get_data():
return [1.9, 1.8, 1.7, 0.6, 0.99,0.88,0.77,0.66]
def main():
## the translator knows this is a float32vec because there are more than 4 elements
x = y = z = w = 22/7
a = numpy.array( [1.1, 1.2, 1.3, 0.4, x,y,z,w], dtype=numpy.float32 )
## in this case the translator is not sure what the length of `u` is, so it defaults
## to using a float32vec.
u = get_data()
b = numpy.array( u, dtype=numpy.float32 )
c = a + b
print(c)
TestError( c[0]==3.0 )
TestError( c[1]==3.0 )
TestError( c[2]==3.0 )
TestError( c[3]==1.0 )
| 3.265625 | 3 |
setup.py | NIVANorge/pyniva | 0 | 12758867 | # -*- coding: utf-8 -*-
"""A setuptools based module for the NIVA tsb module/application.
"""
from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
# get the version from the __version__.py file
version_dict = {}
with open(path.join(here, 'pyniva', '__version__.py')) as f:
exec(f.read(), version_dict)
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pyniva',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=version_dict['__version__'],
description="Python wrapper/API for interacting with NIVA's data platform",
long_description=long_description,
long_description_content_type='text/markdown',
# The project's main homepage.
url='https://github.com/NIVANorge/pyniva',
# Author details
author='<NAME>',
author_email='<EMAIL>',
# Choose your license
license='MIT license',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords='metadata timeseries data',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=['pandas>=1.1,<2.0', 'numpy>=1.16,<2.0', 'requests>=2.20,<3.0',
'pyjwt>=1.7,<2.0', 'cryptography>=2.5,<3.0'],
test_suite='tests',
) | 1.523438 | 2 |
setup.py | petrieh/crl-examplelib | 0 | 12758868 | __copyright__ = 'Copyright (C) 2019, Nokia'
import os
import imp
from setuptools import setup, find_packages
VERSIONFILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'src', 'crl', 'examplelib', '_version.py')
def get_version():
return imp.load_source('_version', VERSIONFILE).get_version()
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
setup(
name='crl.examplelib',
version=get_version(),
author='<NAME>',
author_email='<EMAIL>',
description='Example of Common Robot Library',
install_requires=[],
long_description=read('README.rst'),
license='BSD-3-Clause',
keywords='robotframework, example',
url='https://github.com/nokia/crl-examplelib',
packages=find_packages('src'),
package_dir={'': 'src'},
namespace_packages=['crl'],
entry_points={'robotdocsconf': [
'robotdocsconf = crl.examplelib.robotdocsconf:robotdocs']},
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
],
)
| 1.609375 | 2 |
LetterCount/LetterCount.py | SACHIN1625/LetterCount | 0 | 12758869 | <reponame>SACHIN1625/LetterCount
def repetition(a,b):
count=0;
for i in range(len(a)):
if(a[i]==b):
count=count+1
return count
| 3.328125 | 3 |
nipy/core/image/image_list.py | yarikoptic/NiPy-OLD | 1 | 12758870 | from copy import copy
import numpy as np
from nipy.core.image.image import Image
class ImageList(object):
''' Class to contain ND image as list of (N-1)D images '''
def __init__(self, images=None):
"""
A lightweight implementation of a list of images.
Parameters
----------
images : iterable
a iterable and sliceale object whose items are meant to be
images, this is checked by asserting that each has a
`coordmap` attribute
>>> import numpy as np
>>> from nipy.testing import funcfile
>>> from nipy.core.api import Image, ImageList
>>> from nipy.io.api import load_image
>>> funcim = load_image(funcfile)
>>> ilist = ImageList(funcim)
>>> sublist = ilist[2:5]
Slicing an ImageList returns a new ImageList
>>> isinstance(sublist, ImageList)
True
Indexing an ImageList returns a new Image
>>> newimg = ilist[2]
>>> isinstance(newimg, Image)
True
>>> isinstance(newimg, ImageList)
False
>>> np.asarray(sublist).shape
(3, 2, 20, 20)
>>> np.asarray(newimg).shape
(2, 20, 20)
"""
if images is None:
self.list = []
return
for im in images:
if not hasattr(im, "coordmap"):
raise ValueError("expecting each element of images "
" to have a 'coordmap' attribute")
self.list = images
@classmethod
def from_image(klass, image, axis=-1):
if axis is None:
raise ValueError('axis must be array axis no or -1')
imlist = []
coordmap = image.coordmap
data = np.asarray(image)
data = np.rollaxis(data, axis)
imlist = [Image(dataslice, copy(coordmap))
for dataslice in data]
return klass(imlist)
def __setitem__(self, index, value):
"""
self.list[index] = value
"""
self.list[index] = value
def __getitem__(self, index):
"""
self.list[index]
"""
if type(index) is type(1):
return self.list[index]
else:
return ImageList(images=self.list[index])
def __getslice__(self, i, j):
"""
Return another ImageList instance consisting with
images self.list[i:j]
"""
return ImageList(images=self.list[i:j])
def __array__(self):
"""Return data in ndarray. Called through numpy.array.
Examples
--------
>>> import numpy as np
>>> from nipy.testing import funcfile
>>> from nipy.core.api import ImageList
>>> from nipy.io.api import load_image
>>> funcim = load_image(funcfile)
>>> ilist = ImageList(funcim)
>>> np.asarray(ilist).shape
(20, 2, 20, 20)
"""
return np.asarray([np.asarray(im) for im in self.list])
def __iter__(self):
self._iter = iter(self.list)
return self
def next(self):
return self._iter.next()
| 3.28125 | 3 |
babynames/regular_expressions.py | ubicu/google-python-exercises | 0 | 12758871 | # Regular expression exercises from Google Python class
import re
# Example 1
match = re.search('iig','called piiig')
print(match)
print(match.group())
# Example 2
match = re.search('igs','called piiig')
print(match)
def Find(pat, txt):
match = re.search(pat, txt)
if match:
print(match.group())
else:
print('Not found')
# Example 3
Find('iig','called piiig')
Find('igs','called piiig')
# Additional notes
# .(dot) any char
# \w word char
# \d digit
# \s whitespace \S non-whitespace
# + 1 or more
# * 0 or more
# Example 4
Find('...g','called piiig')
# Example 5
Find('..gs','called piiig')
# Example 6
Find('..g','called piiig Another match xxxg')
# Example 7
Find('x..g','called piiig Another match xxxg')
# Example 8
Find('c\.call','c.called piiig Another match xxxg')
# Example 9
Find(r'c\.call','c.called piiig Another match xxxg')
# Example 10
Find(r':\w\w\w','blah :cat blah blah')
# Example 11
Find(r':\d\d\d','blah :cat :123 blah blah')
# Example 12
Find(r'\d\d\d','blah :cat :123 blah blah')
# Example 13
Find(r'\d\s+\d\s+\d','blah :cat :1 2 3 blah blah')
# Example 14
Find(r':\w+','blah :kitten :1 2 3 blah blah')
# Example 15
Find(r':.+','blah :kitten :1 2 3 blah blah')
# Example 16
Find(r':\w+','blah :kitten123%&fd*^ :1 2 3 blah blah')
# Example 17
Find(r':\S+','blah :kitten123%&fd*^ :1 2 3 blah blah')
# Example 18
Find(r'\w+@\w+','My email <EMAIL> blah @')
# Example 19
Find(r'[\w.]+@[\w.]+','My email <EMAIL> blah @') # Set of characters allowed, inside []
# Example 20
Find(r'[\w.]+@[\w.]+','My email <EMAIL> blah @')
# Example 21
Find(r'\w[\w.]+@[\w.]+','My email <EMAIL> blah @')
# Example 22 - Get the username and hostname
m = re.search(r'(\w[\w.]+)@([\w.]+)', 'My email <EMAIL> blah @')
print(m)
print(m.group())
print(m.group(1))
print(m.group(2))
# Example 23
out = re.findall(r'\w[\w.]+@[\w.]+','My email .<EMAIL> blah @ <EMAIL>')
print(out)
# Example 24
out = re.findall(r'(\w[\w.]+)@([\w.]+)','My email <EMAIL> blah @ <EMAIL>')
print(out)
# Example 25
out = re.findall(r'(\w[\w.]+)@([\w.]+)','My email .<EMAIL> blah @ <EMAIL>', re.IGNORECASE)
print(out)
| 3.75 | 4 |
Generic Bacteria Simulation on Human Body/TestBacteriaCellCluster.py | dem123456789/Computer-Simulation | 2 | 12758872 | <filename>Generic Bacteria Simulation on Human Body/TestBacteriaCellCluster.py
from AbstractBacteriaCellCluster import *
from AbstractHost import *
from Point import *
from sequences import bacteriaClusterSq
import math
import parameters as p
from globals import *
class TestBacteriaCellCluster(AbstractBacteriaCellCluster):
def __init__(self, cellCount):
self.name = "<NAME>"
self.id = bacteriaClusterSq.getNextVal()
self.isDead = False
self.location = None
self.host = None
self.cellCount = cellCount
self.lifespan = p.parameters.bacteria_lifespan
self.born = globals.time
def __lt__(self,other):
return True
def getCellCount(self):
return int(self.cellCount)
def enterHost(self, host):
assert(isinstance(host, AbstractHost))
self.location = None
self.host = host
def canExitHost(self):
return True
def exitHost(self):
self.host = None
def getName(self):
return self.name
def setRelativeLocation(self, point):
assert(isinstance(point, Point))
self.location = point
def getRelativeLocation(self):
return self.location
def _reproduce(self): #private method
self.cellCount += math.ceil(self.cellCount * p.parameters.bacteria_reproduction_rate)
def death(self):
self.isDead = True
def _age(self):
if (self.born + p.parameters.bacteria_lifespan / p.parameters.delta_t) >= globals.time:
self.cellCount -= 1
if(self.cellCount <= 0):
self.isDead = True
def getMoveSpeed(self):
return 0.01
def inContact(self, cluster):
if self.isDead:
return
if(isinstance(cluster, TestBacteriaCellCluster)):
cluster.death()
#merge clusters
self.cellCount += cluster.getCellCount()
def timeStep(self):
assert(self.host is not None)
self._reproduce()
self._age()
def beDisrupted(self, count): #Return new bacteria count
self.cellCount -= int(count/self.cellCount)
def __repr__(self):
return self.name + "\n id: " + str(self.id) + str(self.location) | 3 | 3 |
prepare.py | barbmarques/classification-exercises | 1 | 12758873 | <gh_stars>1-10
import pandas as pd
from pandas import DataFrame
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
def clean_titanic(df):
'''
clean_titanic will take a dataframe acquired as df and remove columns that are:
duplicates,
have too many nulls,
and will fill in smaller amounts of nulls in embark_town
encode sex and embark_town columns
return: single cleaned dataframe
'''
df.drop_duplicates
df['embark_town'] = df['embark_town'].fillna('Southampton')
dummies = pd.get_dummies(df[['embark_town', 'sex']], drop_first=True)
dropcols = ['deck', 'class', 'embarked', 'sex', 'embark_town']
df.drop(columns=dropcols, inplace = True)
return pd.concat([df, dummies], axis=1)
def handle_missing_values(df):
return df.assign(
embark_town=df.embark_town.fillna('Southampton'),
embarked=df.embarked.fillna('O'),
)
def remove_columns(df):
return df.drop(columns=['deck'])
def encode_embarked(df):
encoder = LabelEncoder()
encoder.fit(df.embarked)
return df.assign(embarked_encode = encoder.transform(df.embarked))
def prep_titanic_data(df):
df = df\
.pipe(handle_missing_values)\
.pipe(remove_columns)\
.pipe(encode_embarked)
return df
import pandas as pd
def clean_iris(df):
"""
clean_iris will take an acquired df and
remove `species_id` and `measurement_id` columns and
rename `species_name` column to just `species` and
encode 'species_name' column into TWO new columns
return: single cleaned dataframe
"""
dropcols = ['species_id', 'measurement_id']
df = df.drop(columns= dropcols)
df = df.rename(columns={'species_name': 'species'})
dummy_sp = pd.get_dummies(df[['species']], drop_first=True)
return pd.concat([df, dummy_sp], axis =1)
def prep_iris(df):
"""
prep_iris will take one argument(df) and
run clean_iris to remove/rename/encode columns
then split our data into 20/80,
then split the 80% into 30/70
perform a train, validate, test split
return: the three split pandas dataframes-train/validate/test
"""
iris_df = clean_iris(df)
train_validate, test = train_test_split(iris_df, test_size=0.2, random_state=3210, stratify=iris_df.species)
train, validate = train_test_split(train_validate, train_size=0.7, random_state=3210, stratify=train_validate.species)
return train, validate, test
def train_validate_test_split(df, seed=123):
train_and_validate, test = train_test_split(
df, test_size=0.2, random_state=seed, stratify=None
)
train, validate = train_test_split(
train_and_validate,
test_size=0.3,
random_state=seed,
stratify=train_and_validate
)
return train, validate, test
| 3.3125 | 3 |
sstcam_sandbox/d190507_check_amplitude_calib/extract.py | watsonjj/CHECLabPySB | 0 | 12758874 | <reponame>watsonjj/CHECLabPySB<gh_stars>0
import pandas as pd
from glob import glob
from sstcam_sandbox import get_astri_2019, get_data
from CHECLabPy.core.io import TIOReader, HDF5Writer
from CHECOnsky.calib import get_nudge_and_temperature_from_reader
import re
def main():
paths = glob(get_astri_2019("*/*_r1.tio"))
pattern = re.compile(r"(?:.+?)/d(.+?)/Run(\d+?)_r1.tio")
d_list = []
for path in paths:
reader = TIOReader(path)
wfs = reader[0]
nudge, temperature = get_nudge_and_temperature_from_reader(reader)
regexp = re.search(pattern, path)
investigation = regexp.group(1)
run_id = regexp.group(2)
d_list.append(dict(
investigation=investigation,
run_id=run_id,
t_cpu=wfs.t_cpu,
nudge=nudge,
temperature=temperature
))
df = pd.DataFrame(d_list)
with HDF5Writer(get_data("d190507_check_amplitude_calib/data.h5")) as w:
w.write(data=df)
if __name__ == '__main__':
main()
| 2.046875 | 2 |
WeChatPython/wxmsg.py | snowyxx/WechatScripts | 3 | 12758875 | <filename>WeChatPython/wxmsg.py<gh_stars>1-10
#!/usr/bin/evn python
#coding:utf-8
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
from Config import wxLogger
from Config import CORPID
from Config import SECRET
from Config import TOUSER
from Config import PARTYID
from Config import TAGID
from Config import AGENTID
from Config import ACCOUNTTYPE
from Config import AppID
from Config import AppSecret
from Config import ToTags
from Config import MsgTemplateId
import urllib2
import json
import ast
import time
tokenurl_ent = "https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid={0}&corpsecret={1}"
mesgeurl_ent = "https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token="
tokenurl_sub = "https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid={0}&secret={1}"
# mesgeurl_sub =
class MXMessager():
def getAccessToken(self):
try:
with open('at.json') as f:
resDir = ast.literal_eval(f.read())
if resDir.has_key('expires_in') and resDir['accountType'] == ACCOUNTTYPE:
if time.time() - resDir['time'] < resDir['expires_in'] - 600:
return resDir['access_token']
except Exception, e:
# wxLogger.info(e)
pass
if ACCOUNTTYPE == 'enterprise' and CORPID and SECRET:
url = tokenurl_ent.format(CORPID, SECRET)
elif ACCOUNTTYPE in ['service', 'subscribe'] and AppID and AppSecret:
url = tokenurl_sub.format(AppID, AppSecret)
try:
response = urllib2.urlopen(url).read().decode('utf-8')
resDir = json.loads(response)
resDir['time'] = time.time()
resDir['accountType'] = ACCOUNTTYPE
json.dump(resDir, open('at.json', 'w'))
except Exception, e:
resDir = {}
wxLogger.info('[!]EXCEPTION -- %s' % e)
if resDir.has_key('access_token'):
return resDir['access_token']
else:
wxLogger.info(resDir)
wxLogger.info('[!]Can not get ACCESS_TOKEN, Going to exit.')
sys.exit()
def sendMesg_ent(self, access_token, content):
'''
Send message to Enterprise account
'''
url = mesgeurl_ent+access_token
postdata = {
"touser": TOUSER,
"toparty": PARTYID,
"totag": TAGID,
"msgtype": "text",
"agentid": AGENTID,
"text": {
"content": content
},
"safe":0
}
req = urllib2.Request(url)
req.add_header('Content-Type','application/json;charset=utf-8')
data = json.dumps(postdata, ensure_ascii=False)
wxLogger.info('SEND MESSAGE -- %s' % data)
response = urllib2.urlopen(req,data)
return response.read()
def sendMesg_sub(self, access_token, msgurl, severity, alertType, alertDate, device, monitorGroup,rcaMessage ):
'''
Send message to Enterprise account. Using template OPENTM207112010
Used API: https://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1433751277&token=&lang=zh_CN
Used Message Template: https://mp.weixin.qq.com/advanced/tmplmsg?action=tmpl_preview&t=tmplmsg/preview&id=OPENTM207112010&token=<PASSWORD>&lang=zh_CN
'''
ids = self.getTagIds(access_token, ToTags)
users = self.getSubTagedUsers(access_token, ids)
title = "--- 来自ME产品的运维告警 ---"
url = 'https://api.weixin.qq.com/cgi-bin/message/template/send?access_token={0}'.format(access_token)
if severity.lower() in ['critical', 'error', 'down', '严重', '严重的', '停止', '服务停止', '错误']:
severitycolor="#FF0000"
else:
severitycolor="#173177"
for user in users:
try:
color="#173177"
data={"first":{"value":title, "color":"#673ab7"},"keyword1":{"value":severity,"color":severitycolor},"keyword2":{"value":alertType,"color":color},"keyword3":{"value":alertDate,"color":color},"keyword4":{"value":device,"color":color},"keyword5":{"value":monitorGroup,"color":color},"remark":{"value":rcaMessage}}
dict_arr = {'touser': user, 'template_id':MsgTemplateId, 'url':msgurl, 'topcolor':color,'data':data}
json_template = json.dumps(dict_arr)
wxLogger.info('SEND MESSAGE -- %s' % json_template)
response = urllib2.urlopen(url,json_template)
wxLogger.info(response.read())
except Exception, e:
print ' ---- %r' % e
wxLogger.info('[!] Could not send message to user %r.' % user)
raise
def getSubTagedUsers(self, access_token, tagname):
'''
Get user id list of the users who taged by the 'tagname'
Used API: https://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1421140837&token=&lang=zh_CN
tagname: is a list
'''
ids = self.getTagIds(access_token, ToTags)
if not ids:
wxLogger.info('[!] Could not get tags id.')
return None
url = 'https://api.weixin.qq.com/cgi-bin/user/tag/get?access_token={0}'.format(access_token)
users =[]
for id in ids:
try:
postdata = {
"tagid" : id,
"next_openid":""
#TOFIX: if send too many tags realistically, we should pass next_openid
}
req = urllib2.Request(url)
req.add_header('Content-Type','application/json;charset=utf-8')
data = json.dumps(postdata)
response = urllib2.urlopen(req,data).read()
# {"count":2,"data":{"openid":["osQhiuOAGCM96q6e8gAkTFpHf_60","osQhiuHgVRrK5ciDxVvk17OEU66Q"]},"next_openid":"osQhiuHgVRrK5ciDxVvk17OEU66Q"}
resDir = json.loads(response)
users += resDir['data']['openid']
except Exception, e:
wxLogger.info('[!] Could not get user id.')
return list(set(users))
def getTagIds(self, access_token, namelist):
'''
Fetch tag id(s) of tag name(s)
Used API: https://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1421140837&token=&lang=zh_CN
'''
try:
url = 'https://api.weixin.qq.com/cgi-bin/tags/get?access_token={0}'.format(access_token)
response = urllib2.urlopen(url).read().decode('utf-8')
# {"tags":[{"id":2,"name":"星标组","count":0},{"id":100,"name":"ME测试","count":1},{"id":102,"name":"ME支持","count":0}]}
resDir = json.loads(response)
except Exception, e:
return None
taglist = resDir['tags']
ids = [x['id'] for x in taglist if x['name'] in namelist]
return ids
if __name__ == '__main__':
try:
if sys.platform == 'win32':
args = [x.decode('gbk').encode('utf-8') for x in sys.argv]
else:
args = sys.argv
if ACCOUNTTYPE in ["service", "subscribe"]:
msgurl = args[1]
severity = args[2]
alertType = args[3]
alertDate = args[4]
device = args[5]
monitorGroup = args[6]
rcaMessage = ' '.join(args[7:])
messager = MXMessager()
access_token = messager.getAccessToken()
result = messager.sendMesg_sub(access_token, msgurl, severity, alertType, alertDate, device, monitorGroup,rcaMessage )
wxLogger.info(result)
elif ACCOUNTTYPE == 'enterprise':
msg = ' '.join(args[1:])
messager = MXMessager()
access_token = messager.getAccessToken()
result = messager.sendMesg_ent(access_token, msg)
wxLogger.info(result)
else:
print 'Please setup correct account type.'
wxLogger.info('[!] Please input correct account type.')
except Exception, e:
print 'Please input correct parameters.'
wxLogger.info(e) | 1.9375 | 2 |
test/onnx/test_pytorch_onnx_onnxruntime.py | jsun94/nimble | 206 | 12758876 | <gh_stars>100-1000
import unittest
import onnxruntime # noqa
import torch
import numpy as np
import io
import itertools
import copy
from torch.nn.utils import rnn as rnn_utils
from model_defs.lstm_flattening_result import LstmFlatteningResult
from model_defs.rnn_model_with_packed_sequence import RnnModelWithPackedSequence
from test_pytorch_common import (skipIfUnsupportedMinOpsetVersion, disableScriptTest,
skipIfUnsupportedOpsetVersion, skipIfNoLapack,
skipIfUnsupportedMaxOpsetVersion, skipIfONNXShapeInference)
from test_pytorch_common import BATCH_SIZE
from test_pytorch_common import RNN_BATCH_SIZE, RNN_SEQUENCE_LENGTH, RNN_INPUT_SIZE, RNN_HIDDEN_SIZE
from typing import List
import model_defs.word_language_model as word_language_model
import torchvision
import onnx
def to_numpy(tensor):
if tensor.requires_grad:
return tensor.detach().cpu().numpy()
else:
return tensor.cpu().numpy()
def convert_to_onnx(model, input=None, opset_version=9, example_outputs=None,
do_constant_folding=True, keep_initializers_as_inputs=True,
dynamic_axes=None, input_names=None, output_names=None,
fixed_batch_size=False, training=None,
onnx_shape_inference=False,
use_new_jit_passes=False):
# export the model to ONNX
f = io.BytesIO()
input_copy = copy.deepcopy(input)
torch.onnx._export(model, input_copy, f,
opset_version=opset_version,
example_outputs=example_outputs,
do_constant_folding=do_constant_folding,
keep_initializers_as_inputs=keep_initializers_as_inputs,
dynamic_axes=dynamic_axes,
input_names=input_names, output_names=output_names,
fixed_batch_size=fixed_batch_size, training=training,
onnx_shape_inference=onnx_shape_inference,
use_new_jit_passes=use_new_jit_passes)
# compute onnxruntime output prediction
ort_sess = onnxruntime.InferenceSession(f.getvalue())
return ort_sess
def run_ort(ort_sess, input):
input_copy = copy.deepcopy(input)
input, _ = torch.jit._flatten(input_copy)
inputs = list(map(to_numpy, input))
ort_inputs = dict((ort_sess.get_inputs()[i].name, input) for i, input in enumerate(inputs))
ort_outs = ort_sess.run(None, ort_inputs)
return ort_outs
def ort_compare_with_pytorch(ort_outs, output, rtol, atol):
output, _ = torch.jit._flatten(output)
outputs = list(map(to_numpy, output))
# compare onnxruntime and PyTorch results
assert len(outputs) == len(ort_outs), "number of outputs differ"
# compare onnxruntime and PyTorch results
[np.testing.assert_allclose(out, ort_out, rtol=rtol, atol=atol) for out, ort_out in zip(outputs, ort_outs)]
def run_model_test(self, model, batch_size=2, state_dict=None,
input=None, use_gpu=True, rtol=0.001, atol=1e-7,
example_outputs=None, do_constant_folding=True,
dynamic_axes=None, test_with_inputs=None,
input_names=None, output_names=None,
fixed_batch_size=False):
model.eval()
if input is None:
input = torch.randn(batch_size, 3, 224, 224, requires_grad=True)
with torch.no_grad():
if isinstance(input, torch.Tensor):
input = (input,)
# In-place operators will update input tensor data as well.
# Thus inputs are replicated before every forward call.
input_copy = copy.deepcopy(input)
output = model(*input_copy)
if isinstance(output, torch.Tensor):
output = (output,)
ort_sess = convert_to_onnx(model, input=input, opset_version=self.opset_version,
example_outputs=output, do_constant_folding=do_constant_folding,
keep_initializers_as_inputs=self.keep_initializers_as_inputs,
dynamic_axes=dynamic_axes, input_names=input_names,
output_names=output_names, fixed_batch_size=fixed_batch_size, training=None,
onnx_shape_inference=self.onnx_shape_inference,
use_new_jit_passes=self.use_new_jit_passes)
ort_outs = run_ort(ort_sess, input)
ort_compare_with_pytorch(ort_outs, output, rtol, atol)
# if additional test inputs are provided run the onnx
# model with these inputs and check the outputs
if test_with_inputs is not None:
for test_input in test_with_inputs:
if isinstance(test_input, torch.Tensor):
test_input = (test_input,)
test_input_copy = copy.deepcopy(test_input)
output = model(*test_input_copy)
if isinstance(output, torch.Tensor):
output = (output,)
ort_outs = run_ort(ort_sess, test_input)
ort_compare_with_pytorch(ort_outs, output, rtol, atol)
class TestONNXRuntime(unittest.TestCase):
from torch.onnx.symbolic_helper import _export_onnx_opset_version
opset_version = _export_onnx_opset_version
keep_initializers_as_inputs = True # For IR version 3 type export.
use_new_jit_passes = False # For testing main code-path
onnx_shape_inference = False
def setUp(self):
torch.manual_seed(0)
onnxruntime.set_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
np.random.seed(seed=0)
self.is_script_test_enabled = True
def run_test(self, model, input, rtol=1e-3, atol=1e-7, do_constant_folding=True,
batch_size=2, use_gpu=True, dynamic_axes=None, test_with_inputs=None,
input_names=None, output_names=None, fixed_batch_size=False):
def _run_test(m):
return run_model_test(self, m, batch_size=batch_size,
input=input, use_gpu=use_gpu, rtol=rtol, atol=atol,
do_constant_folding=do_constant_folding,
dynamic_axes=dynamic_axes, test_with_inputs=test_with_inputs,
input_names=input_names, output_names=output_names,
fixed_batch_size=fixed_batch_size)
if self.is_script_test_enabled and self.use_new_jit_passes:
script_model = torch.jit.script(model)
_run_test(script_model)
_run_test(model)
def run_model_test_with_external_data(self, model, input, rtol=0.001, atol=1e-7,
example_outputs=None, do_constant_folding=True,
dynamic_axes=None, input_names=None, output_names=None,
ort_optim_on=True):
import os
import tempfile
model.eval()
with torch.no_grad():
if isinstance(input, torch.Tensor):
input = (input,)
# In-place operators will update input tensor data as well.
# Thus inputs are replicated before every forward call.
input_copy = copy.deepcopy(input)
output = model(*input_copy)
if isinstance(output, torch.Tensor):
output = (output,)
# export the model to ONNX
with tempfile.TemporaryDirectory() as tmpdirname:
model_file_name = os.path.join(tmpdirname, 'model.onnx')
input_copy = copy.deepcopy(input)
torch.onnx.export(model, input_copy, model_file_name,
opset_version=self.opset_version,
example_outputs=output,
verbose=False,
do_constant_folding=do_constant_folding,
keep_initializers_as_inputs=self.keep_initializers_as_inputs,
dynamic_axes=dynamic_axes,
input_names=input_names, output_names=output_names,
use_external_data_format=True)
# compute onnxruntime output prediction
ort_sess_opt = onnxruntime.SessionOptions()
ort_sess_opt.graph_optimization_level = \
onnxruntime.GraphOptimizationLevel.ORT_ENABLE_EXTENDED if ort_optim_on else \
onnxruntime.GraphOptimizationLevel.ORT_DISABLE_ALL
ort_sess = onnxruntime.InferenceSession(model_file_name, sess_options=ort_sess_opt)
input_copy = copy.deepcopy(input)
ort_outs = run_ort(ort_sess, input_copy)
ort_compare_with_pytorch(ort_outs, output, rtol, atol)
@skipIfUnsupportedMinOpsetVersion(9) # Because external data format was released with Opset 9.
def test_embedding_model_with_external_data(self):
class LargeModel(torch.nn.Module):
def __init__(self):
super(LargeModel, self).__init__()
dim = 15
n = 4 * 100
self.emb = torch.nn.Embedding(n, dim)
self.lin1 = torch.nn.Linear(dim, 1)
self.seq = torch.nn.Sequential(
self.emb,
self.lin1,
)
def forward(self, input):
return self.seq(input)
model = LargeModel()
x = torch.tensor([2], dtype=torch.long)
self.run_model_test_with_external_data(model, x)
@skipIfUnsupportedMinOpsetVersion(9) # Because external data format was released with Opset 9.
def test_mobilenet_v2_with_external_data(self):
model = torchvision.models.mobilenet_v2(pretrained=True)
x = torch.randn(2, 3, 224, 224, requires_grad=True)
# We are turning off Onnx Runtime optimization off in this test,
# because external data format is not supported to in ORT optimizer.
# Once that support is added, we can set ort_optim_on=True (default).
self.run_model_test_with_external_data(model, x, rtol=1e-3, atol=1e-5,
ort_optim_on=False)
@skipIfUnsupportedMinOpsetVersion(9) # Because external data format was released with Opset 9.
def test_attribute_with_external_data(self):
class LargeModel(torch.nn.Module):
def forward(self, x):
return x + torch.ones(2, 1024)
x = torch.randn(2, 1)
self.run_model_test_with_external_data(LargeModel(), x)
@skipIfUnsupportedMinOpsetVersion(9) # Because external data format was released with Opset 9.
@unittest.skip("Enable this once large model with subgraph is supported in ORT")
def test_subgraph_with_external_data(self):
class LargeModel(torch.nn.Module):
def forward(self, x):
for i in range(x.size(0)):
x = x + torch.ones(2, 1024)
return x
x = torch.randn(2, 1)
self.run_model_test_with_external_data(torch.jit.script(LargeModel()), x)
def test_fuse_conv_bn1d(self):
class Fuse(torch.nn.Module):
def __init__(self):
super(Fuse, self).__init__()
self.conv = torch.nn.Conv1d(16, 33, 3, stride=2)
self.bn = torch.nn.BatchNorm1d(33)
def forward(self, x):
out = self.conv(x)
return self.bn(out)
model = Fuse()
x = torch.randn(20, 16, 50, requires_grad=True)
self.run_test(model, (x,))
def test_fuse_conv_bn2d(self):
class Fuse(torch.nn.Module):
def __init__(self):
super(Fuse, self).__init__()
self.conv = torch.nn.Conv2d(3, 2, kernel_size=1, stride=2, padding=3, bias=False)
self.bn = torch.nn.BatchNorm2d(2)
def forward(self, x):
out = self.conv(x)
return self.bn(out)
model = Fuse()
x = torch.randn(2, 3, 2, 2, requires_grad=True)
self.run_test(model, (x,))
def test_fuse_conv_bn3d(self):
class Fuse(torch.nn.Module):
def __init__(self):
super(Fuse, self).__init__()
self.conv = torch.nn.Conv3d(3, 2, (3, 5, 2), stride=(2, 1, 1), padding=(3, 2, 0), bias=False)
self.bn = torch.nn.BatchNorm3d(2)
def forward(self, x):
out = self.conv(x)
return self.bn(out)
model = Fuse()
x = torch.randn(2, 3, 10, 50, 100, requires_grad=True)
self.run_test(model, (x,), rtol=1e-3, atol=1e-6)
def test_reshape_constant_fold(self):
class Reshape(torch.nn.Module):
def __init__(self, ):
super(Reshape, self).__init__()
self.register_buffer("weight", torch.ones(5))
def forward(self, x):
scale_1 = self.weight.reshape(1, -1, 1, 1)
return x * scale_1
x = torch.randn(4, 5)
self.run_test(Reshape(), (x,), rtol=1e-3, atol=1e-5)
def run_word_language_model(self, model_name):
ntokens = 50
emsize = 5
nhid = 5
nlayers = 5
dropout = 0.2
tied = False
batchsize = 5
model = word_language_model.RNNModel(model_name, ntokens, emsize,
nhid, nlayers, dropout, tied,
batchsize)
x = torch.arange(0, ntokens).long().view(-1, batchsize)
# Only support CPU version, since tracer is not working in GPU RNN.
self.run_test(model, (x, model.hidden))
@skipIfUnsupportedMinOpsetVersion(11)
@disableScriptTest() # Faster RCNN model is not scriptable
def test_faster_rcnn(self):
model = torchvision.models.detection.faster_rcnn.fasterrcnn_resnet50_fpn(pretrained=True, min_size=200,
max_size=300)
model.eval()
x = torch.randn(2, 3, 200, 300, requires_grad=True)
self.run_test(model, (x,), rtol=1e-3, atol=1e-5)
def get_image_from_url(self, url):
import os
from urllib.parse import urlsplit
from urllib import request
from PIL import Image
from torchvision import transforms
from torch._utils_internal import get_writable_path
filename = os.path.basename(urlsplit(url)[2])
data_dir = get_writable_path(os.path.join(os.path.dirname(__file__)))
path = os.path.join(data_dir, filename)
data = request.urlopen(url, timeout=15).read()
with open(path, 'wb') as f:
f.write(data)
image = Image.open(path).convert("RGB")
image = image.resize((300, 200), Image.BILINEAR)
to_tensor = transforms.ToTensor()
return to_tensor(image)
def get_test_images(self):
image_url = "http://farm3.staticflickr.com/2469/3915380994_2e611b1779_z.jpg"
image = self.get_image_from_url(url=image_url)
images = [image]
return images
@skipIfUnsupportedMinOpsetVersion(11)
@disableScriptTest()
def test_mask_rcnn(self):
model = torchvision.models.detection.mask_rcnn.maskrcnn_resnet50_fpn(pretrained=True, min_size=200,
max_size=300)
images = self.get_test_images()
self.run_test(model, (images,), rtol=1e-3, atol=1e-5)
@skipIfUnsupportedMinOpsetVersion(11)
@disableScriptTest()
def test_keypoint_rcnn(self):
model = torchvision.models.detection.keypoint_rcnn.keypointrcnn_resnet50_fpn(pretrained=True, min_size=200,
max_size=300)
images = self.get_test_images()
self.run_test(model, (images,), rtol=1e-3, atol=1e-5)
@disableScriptTest()
def test_word_language_model_RNN_TANH(self):
self.run_word_language_model("RNN_TANH")
@disableScriptTest()
def test_word_language_model_RNN_RELU(self):
self.run_word_language_model("RNN_RELU")
@disableScriptTest()
def test_word_language_model_LSTM(self):
self.run_word_language_model("LSTM")
@disableScriptTest()
def test_word_language_model_GRU(self):
self.run_word_language_model("GRU")
def test_index_1d(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[0]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), m1)
def test_index_2d_1dimslice(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[0:1, :]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), m1)
def test_index_2d_sliceint(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[1, :]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), m1)
def test_index_2d_neg_slice(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[0:-1, :]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), m1)
@skipIfUnsupportedMinOpsetVersion(9)
def test_index_mask(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[torch.tensor([0, 1, 0], dtype=torch.uint8)]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), m1)
class MyModel(torch.nn.Module):
def forward(self, input):
return input[torch.tensor([0, 1, 0], dtype=torch.bool)]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), m1)
@disableScriptTest()
def test_dict(self):
class MyModel(torch.nn.Module):
def forward(self, x_in):
x_out = {}
x_out["test_key_out"] = torch.add(x_in[list(x_in.keys())[0]], list(x_in.keys())[0])
return x_out
x = {torch.tensor(1.): torch.randn(1, 2, 3)}
self.run_test(MyModel(), (x,))
@disableScriptTest()
def test_dict_str(self):
class MyModel(torch.nn.Module):
def forward(self, x_in):
x_out = {}
x_out["test_key_out"] = torch.add(x_in["test_key_in"], 2.)
return x_out
x = {"test_key_in": torch.randn(1, 2, 3)}
self.run_test(MyModel(), (x,))
@skipIfUnsupportedMinOpsetVersion(9)
def test_cste_script(self):
class MyModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.zeros(x.size(0)), torch.ones((x.size(1), x.size(0)), dtype=torch.int64)
x = torch.randn(3, 4)
self.run_test(MyModel(), x)
def test_scalar_tensor(self):
class test(torch.nn.Module):
def forward(self, input):
return torch.scalar_tensor(input.size(0)), \
torch.scalar_tensor(input.size(1), dtype=torch.int64)
x = torch.randn(2, 3, 4)
y = torch.randn(7, 8, 9)
model = test()
self.run_test(model, x, test_with_inputs=[y],
input_names=['input_1'],
dynamic_axes={'input_1': [0, 1, 2]})
def test_tensor(self):
class ScalarInputModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return torch.tensor(input.shape[1])
x = torch.randn(3, 4)
self.run_test(ScalarInputModel(), x)
class TensorInputModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return torch.tensor([input.shape[0], input.shape[1]])
x = torch.randn(3, 4)
self.run_test(TensorInputModel(), x)
class FloatInputModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return torch.tensor([float(input)])
x = torch.randn(1)
self.run_test(FloatInputModel(), x)
class InputWithDtypeModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return torch.tensor(input.shape[1], dtype=torch.long)
x = torch.randn(3, 4)
self.run_test(InputWithDtypeModel(), x)
class MixedInputModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return torch.tensor([input.shape[0], int(input)])
x = torch.randn(1)
self.run_test(MixedInputModel(), x)
def test_hardtanh(self):
model = torch.nn.Hardtanh(-1.5, 2.5)
x = torch.arange(-5, 5).to(dtype=torch.float32)
self.run_test(model, x)
def test_hardtanh_script_with_default_values(self):
class MyModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.nn.functional.hardtanh(x)
x = torch.arange(-5, 5).to(dtype=torch.float32)
self.run_test(MyModel(), x)
def test_clamp(self):
class ClampModel(torch.nn.Module):
def forward(self, x):
return x.clamp(-0.5, 0.5)
x = torch.randn(3, 4)
self.run_test(ClampModel(), x)
class ClampMinModel(torch.nn.Module):
def forward(self, x):
return x.clamp(min=-0.5)
x = torch.randn(3, 4)
self.run_test(ClampMinModel(), x)
class ClampMaxModel(torch.nn.Module):
def forward(self, x):
return x.clamp(max=0.5)
x = torch.randn(3, 4)
self.run_test(ClampMaxModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_clamp_dyn(self):
class ClampMaxModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return x.clamp(None, x.size(0))
x = torch.arange(16).view(4, 4).float()
self.run_test(ClampMaxModel(), x)
class ClampMinModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return x.clamp(x.size(0), None)
x = torch.arange(16).view(4, 4).float()
self.run_test(ClampMinModel(), x)
class ClampMinMaxModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return x.clamp(x.size(0), x.size(1))
x = torch.arange(16).view(2, 8).float()
self.run_test(ClampMinMaxModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_full_trace(self):
class FullModel(torch.nn.Module):
def forward(self, x):
return torch.full((3, 4), x, dtype=torch.long)
x = torch.tensor(12)
self.run_test(FullModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_full_script(self):
class FullModelScripting(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.full((3, 4), x, dtype=torch.long)
x = torch.tensor(12)
self.run_test(FullModelScripting(), x)
def test_fuse_addmm(self):
class AddmmModel(torch.nn.Module):
def forward(self, x):
return torch.mm(x, x) + x
x = torch.ones(3, 3)
self.run_test(AddmmModel(), x)
def test_maxpool(self):
model = torch.nn.MaxPool1d(2, stride=1)
x = torch.randn(20, 16, 50)
self.run_test(model, x)
def test_conv(self):
class TraceModel(torch.nn.Module):
def __init__(self):
super(TraceModel, self).__init__()
self.conv1 = torch.nn.Conv1d(16, 33, 3, stride=2)
self.conv2 = torch.nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
self.conv3 = torch.nn.Conv3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(4, 2, 0))
def forward(self, input1, input2, input3):
return self.conv1(input1), self.conv2(input2), self.conv3(input3)
class ScriptModel(torch.jit.ScriptModule):
def __init__(self):
super(ScriptModel, self).__init__()
self.conv1 = torch.nn.Conv1d(16, 33, 3, stride=2)
self.conv2 = torch.nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
self.conv3 = torch.nn.Conv3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(4, 2, 0))
@torch.jit.script_method
def forward(self, input1, input2, input3):
return self.conv1(input1), self.conv2(input2), self.conv3(input3)
x1 = torch.randn(20, 16, 50)
x2 = torch.randn(20, 16, 50, 100)
x3 = torch.randn(20, 16, 10, 50, 100)
self.run_test(TraceModel(), (x1, x2, x3), atol=10e-5)
self.run_test(ScriptModel(), (x1, x2, x3), atol=10e-5)
def test_conv_shape_inference(self):
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv2 = torch.nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
def forward(self, input):
return self.conv2(input) + 2
x = torch.randn(20, 16, 50, 100)
self.run_test(Model(), x, atol=10e-5,
input_names=['x'],
dynamic_axes={'x': [0]})
def test_conv_transpose(self):
class TraceModel(torch.nn.Module):
def __init__(self):
super(TraceModel, self).__init__()
self.conv1 = torch.nn.ConvTranspose1d(16, 33, 3, stride=2)
self.conv2 = torch.nn.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
self.conv3 = torch.nn.ConvTranspose3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(4, 2, 0))
def forward(self, input1, input2, input3):
return self.conv1(input1), self.conv2(input2), self.conv3(input3)
class ScriptModel(torch.jit.ScriptModule):
def __init__(self):
super(ScriptModel, self).__init__()
self.conv1 = torch.nn.ConvTranspose1d(16, 33, 3, stride=2)
self.conv2 = torch.nn.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
self.conv3 = torch.nn.ConvTranspose3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(4, 2, 0))
@torch.jit.script_method
def forward(self, input1, input2, input3):
return self.conv1(input1), self.conv2(input2), self.conv3(input3)
x1 = torch.randn(20, 16, 50)
x2 = torch.randn(20, 16, 50, 100)
x3 = torch.randn(20, 16, 10, 50, 100)
self.run_test(TraceModel(), (x1, x2, x3), atol=10e-5)
self.run_test(ScriptModel(), (x1, x2, x3), atol=10e-5)
# Conversion of Transpose depends on input shape to be known.
# The following test only works when onnx shape inference is enabled.
@skipIfONNXShapeInference(False)
def test_transpose_infer_shape(self):
class TransposeModule(torch.jit.ScriptModule):
def __init__(self):
super(TransposeModule, self).__init__()
self.conv = torch.nn.Conv2d(3, 1, 3, stride=2)
@torch.jit.script_method
def forward(self, x):
x = self.conv(x)
return x.transpose(0, 1)
x = torch.randn(32, 3, 64, 64)
self.run_test(TransposeModule(), x)
def squeeze_model_tests(self, d, x1, x2):
class Squeeze(torch.nn.Module):
def __init__(self, d):
super(Squeeze, self).__init__()
self.d = d
def forward(self, x):
if self.d is not None:
return torch.squeeze(x, dim=self.d)
else:
return torch.squeeze(x)
x2 = [] if x2 is None else [x2]
self.run_test(Squeeze(d), x1, input_names=['input'], dynamic_axes={'input': {0: '0', 1: '1', 2: '2'}}, test_with_inputs=x2)
def test_squeeze_without_no_op(self):
x = torch.randn(2, 1, 4)
self.squeeze_model_tests(1, x, None)
@skipIfUnsupportedMinOpsetVersion(11)
def test_squeeze(self):
x_squeeze = torch.randn(2, 1, 4)
x_noop = torch.randn(2, 2, 3)
self.squeeze_model_tests(1, x_squeeze, x_noop)
def test_squeeze_neg_without_no_op(self):
x = torch.randn(2, 1, 4)
self.squeeze_model_tests(-2, x, None)
@skipIfUnsupportedMinOpsetVersion(11)
def test_squeeze_neg(self):
x_squeeze = torch.randn(2, 1, 4)
x_noop = torch.randn(2, 2, 3)
self.squeeze_model_tests(-2, x_squeeze, x_noop)
def test_squeeze_all_dims(self):
x_squeeze = torch.randn(2, 1, 4)
x_noop = torch.randn(2, 2, 3)
self.squeeze_model_tests(None, x_squeeze, x_noop)
@skipIfUnsupportedMinOpsetVersion(11)
def test_squeeze_no_op(self):
x_noop = torch.randn(2, 1, 4)
x_squeeze = torch.randn(2, 2, 1)
self.squeeze_model_tests(2, x_noop, x_squeeze)
def test_squeeze_no_op_without_additional_inputs(self):
x_noop = torch.randn(2, 1, 4)
self.squeeze_model_tests(2, x_noop, None)
@skipIfUnsupportedMinOpsetVersion(11)
def test_squeeze_runtime_dim(self):
class Squeeze(torch.nn.Module):
def forward(self, d1, d2):
t = torch.zeros(d1[0], d2[0])
return t.squeeze(0)
d1 = torch.tensor([1])
d3 = torch.tensor([3])
d4 = torch.tensor([4])
self.run_test(Squeeze(), (d1, d4), test_with_inputs=[(d3, d4)])
self.run_test(Squeeze(), (d3, d4), test_with_inputs=[(d1, d3)])
def test_unsqueeze(self):
class Unsqueeze(torch.nn.Module):
def forward(self, x):
return torch.unsqueeze(x, dim=-2)
x = torch.randn(2, 3, 4)
self.run_test(Unsqueeze(), x)
def test_maxpool_default_stride(self):
class MaxPoolModel(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.max_pool2d(x, 2)
model = MaxPoolModel()
x = torch.randn(10, 20, 16, 50)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(8)
def test_maxpool_adaptive(self):
model = torch.nn.AdaptiveMaxPool1d((5), return_indices=False)
x = torch.randn(20, 16, 50, requires_grad=True)
self.run_test(model, x)
def test_maxpool_2d(self):
model = torch.nn.MaxPool2d(5, padding=(1, 2))
x = torch.randn(1, 20, 16, 50, requires_grad=True)
self.run_test(model, x)
def test_maxpool_1d_ceil(self):
model = torch.nn.MaxPool1d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50)
self.run_test(model, x)
def test_maxpool_2d_ceil(self):
model = torch.nn.MaxPool2d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50, 32)
self.run_test(model, x)
def test_maxpool_3d_ceil(self):
model = torch.nn.MaxPool3d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50, 44, 31)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(8)
@disableScriptTest() # Functional module not scriptable
def test_maxpool_with_indices(self):
model = torch.nn.MaxPool1d(2, stride=1, return_indices=True)
x = torch.randn(20, 16, 50)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(10)
def test_maxpool_dilation(self):
model = torch.nn.MaxPool1d(2, stride=1, dilation=2)
x = torch.randn(20, 16, 50)
self.run_test(model, x)
def test_avgpool_default_stride(self):
class AvgPoolModel(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.avg_pool2d(x, 2)
model = AvgPoolModel()
x = torch.randn(10, 20, 16, 50)
self.run_test(model, x)
def test_avgpool(self):
model = torch.nn.AvgPool1d(2, stride=1)
x = torch.randn(20, 16, 50)
self.run_test(model, x)
def test_avgpool_1d_ceil(self):
model = torch.nn.AvgPool1d(3, 2, ceil_mode=True)
x = torch.randn(1, 1, 7)
self.run_test(model, x)
def test_avgpool_2d_ceil(self):
model = torch.nn.AvgPool2d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50, 32)
self.run_test(model, x)
def test_avgpool_3d_ceil(self):
model = torch.nn.AvgPool3d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50, 44, 31)
self.run_test(model, x)
def test_arithmetic(self):
class ArithmeticModule(torch.nn.Module):
def forward(self, x):
x = x + 2
x = x - 4
x = x * 6
x = x / 8
return x
x = torch.randn(2, 3, 4)
self.run_test(ArithmeticModule(), x)
# In scripting the first transpose node do not carry shape and dtype info.
# The following test only works when onnx shape inference is enabled.
@skipIfONNXShapeInference(False)
def test_arithmetic_infer_dtype(self):
class ArithmeticModule(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
x = x.t()
x = x + 2
x = x - 4
x = x * 6
x = x / 8
return x
x = torch.randn(2, 3)
self.run_test(ArithmeticModule(), x)
def test_floor_div(self):
class FloorDivModule(torch.nn.Module):
def forward(self, x, y):
return x // 3, x // 2., \
x.to(dtype=torch.float64) // 3, x.to(dtype=torch.float64) // 2., \
x.to(dtype=torch.int64) // 3, x.to(dtype=torch.int64) // 2., \
x // (y + 1.).to(dtype=torch.int64), x // y, \
x.to(dtype=torch.float64) // y.to(dtype=torch.int64), x.to(dtype=torch.float64) // y.to(dtype=torch.float64), \
x.to(dtype=torch.int64) // y.to(dtype=torch.int64), x.to(dtype=torch.int64) // y
x = torch.randn(2, 3, 4)
y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4)
self.run_test(FloorDivModule(), (x, y))
def test_floor_div_script(self):
class FloorDivModule(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, y):
return x // 3, x // 2., x // y
x = torch.randn(2, 3, 4)
y = torch.randn(2, 3, 4)
self.run_test(FloorDivModule(), (x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_floordiv(self):
class FloordivModule(torch.nn.Module):
def forward(self, x):
return x.new_zeros(x.size(2) // x.size(1))
x = torch.randn(2, 3, 4)
self.run_test(FloordivModule(), (x,))
def test_div(self):
class DivModule(torch.nn.Module):
def forward(self, x, y):
return x / y, torch.true_divide(x, y)
x = torch.randn(2, 3, 4).to(torch.int)
y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4).to(torch.int)
self.run_test(DivModule(), (x, y))
self.run_test(DivModule(), (x.float(), y.float()))
# Note: div cannot (generally) be exported via scripting
# since its type promotion logic is dependent on knowing the scalar types
# of the input tensors. That is, the ONNX graph is dependent on the
# data type of the inputs. This makes it appropriate for tracing only.
def test_div_promotion_trace(self):
class DivModule(torch.nn.Module):
def forward(self, x, y):
return x / y, torch.true_divide(x, y)
x = torch.randn(2, 3, 4).to(torch.int)
y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4).to(torch.int)
prev_default = torch.get_default_dtype()
torch.set_default_dtype(torch.float)
self.run_test(torch.jit.trace(DivModule(), (x, y)), (x, y))
torch.set_default_dtype(torch.double)
self.run_test(torch.jit.trace(DivModule(), (x, y)), (x, y))
torch.set_default_dtype(prev_default)
# In scripting x, y do not carry shape and dtype info.
# The following test only works when onnx shape inference is enabled.
@skipIfONNXShapeInference(False)
def test_div_promotion_script(self):
class DivModule(torch.nn.Module):
def forward(self, x, y):
# Add transpose to hide shape/type information
# Otherwise shape and type are still avaiable from input.
x = x.transpose(1, 2)
y = y.transpose(1, 2)
return x / y, torch.true_divide(x, y)
x = torch.randn(2, 3, 4).to(torch.int)
y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4).to(torch.int)
prev_default = torch.get_default_dtype()
# 1. x,y are int, and output is float.
# This can be handled by the default case, where both are cast to float.
# It works even if type of x, y are unknown.
torch.set_default_dtype(torch.float)
self.run_test(torch.jit.script(DivModule()), (x, y))
# 2. x,y are int, and output is double.
# This can be handled by the default case, where both are cast to double.
# It works even if type of x, y are unknown.
torch.set_default_dtype(torch.double)
self.run_test(torch.jit.script(DivModule()), (x, y))
# 3. x is int, y is double, and output is double.
# This can only be handled when both type of x and y are known.
torch.set_default_dtype(prev_default)
x = torch.randn(2, 3, 4).to(torch.int)
y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4).to(torch.double)
self.run_test(torch.jit.script(DivModule()), (x, y))
def test_slice_trace(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x[0:1]
x = torch.randn(3)
self.run_test(MyModule(), x)
def test_slice_neg(self):
class NegSlice(torch.nn.Module):
def forward(self, x):
return x[-1:]
x = torch.randn(3, 4, 5)
self.run_test(NegSlice(), x)
def test_slice_neg_large(self):
class NegSlice(torch.nn.Module):
def forward(self, x):
return x[:, :, -3:-1, :, -1]
x = torch.randn(3, 4, 5, 6, 7)
self.run_test(NegSlice(), x)
def test_slice_neg_large_negone(self):
class NegSlice(torch.nn.Module):
def forward(self, x):
return x[:, :, :, :, -1]
x = torch.randn(3, 4, 5, 6, 7)
self.run_test(NegSlice(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_slice_with_input_index(self):
class InputIndexSlice(torch.nn.Module):
def forward(self, x, y):
x[:y.size(0), 0, :] = y
return x
x = torch.zeros((56, 6, 256))
y = torch.rand((22, 256))
self.run_test(InputIndexSlice(), (x, y))
@skipIfUnsupportedMinOpsetVersion(10)
@disableScriptTest() # scripting tuple/list append
def test_slice_dynamic(self):
class DynamicSliceExportMod(torch.nn.Module):
def forward(self, x):
results = []
for i in range(4):
results.append(x[:x.size(0) - i, i:x.size(2), i:3])
return tuple(results)
x = torch.rand(5, 5, 5)
y = torch.randn(6, 7, 8)
self.run_test(DynamicSliceExportMod(), x, test_with_inputs=[y],
input_names=['input_1'],
output_names=['output_1'],
dynamic_axes={'input_1': [0, 1, 2],
'output_1': [0, 1, 2]})
@skipIfUnsupportedMinOpsetVersion(10)
def test_slice_dynamic_script(self):
class DynamicSliceModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return x[1:x.size(1)]
x = torch.rand(1, 2)
self.run_test(DynamicSliceModel(), x)
@skipIfUnsupportedMinOpsetVersion(10)
def test_slice_dynamic_shape_script(self):
class DynamicSliceModel(torch.nn.Module):
def forward(self, x):
return x.new_zeros(x.shape[1:x.size(2)])
x = torch.rand(1, 2, 3, 4)
self.run_test(DynamicSliceModel(), x)
@skipIfUnsupportedMinOpsetVersion(10)
@disableScriptTest() # scripting tuple/list append
def test_slice_dynamic_to_end(self):
class DynamicSliceExportMod(torch.nn.Module):
def forward(self, x):
results = []
for i in range(4):
results.append(x[:, i:, x.size(2) - 5])
return tuple(results)
x = torch.rand(5, 5, 5)
self.run_test(DynamicSliceExportMod(), x,
dynamic_axes={'input_1': [0, 1, 2],
'output_1': [0, 1, 2]})
@skipIfUnsupportedMinOpsetVersion(9)
def test_arange_dynamic(self):
class ArangeModel(torch.nn.Module):
def forward(self, input):
return torch.arange(input.shape[0]), \
torch.arange(12), \
torch.arange(start=input.shape[0], end=input.shape[0] + 5)
x = torch.randn(5, 3, 2)
y = torch.randn(8, 3, 2)
self.run_test(ArangeModel(), x, test_with_inputs=[y],
input_names=['input_1'],
output_names=['output_1', 'output_2', 'output_3'],
dynamic_axes={'input_1': [0],
'output_1': [0]})
self.run_test(torch.jit.script(ArangeModel()), x,
test_with_inputs=[y], input_names=['input_1'],
output_names=['output_1', 'output_2', 'output_3'],
dynamic_axes={'input_1': [0],
'output_1': [0]})
@skipIfUnsupportedMinOpsetVersion(9)
def test_dynamic_arange_out(self):
class ArangeOutModel(torch.nn.Module):
def forward(self, end):
out_t = torch.tensor([1], dtype=torch.int64)
return torch.arange(end, out=out_t)
x = torch.tensor(8)
self.run_test(ArangeOutModel(), (x))
@skipIfUnsupportedMinOpsetVersion(9)
def test_dynamic_arange_start_out(self):
class ArangeStartOutModel(torch.nn.Module):
def forward(self, start, end):
out_t = torch.tensor([1], dtype=torch.int64)
return torch.arange(start.size(0), end, out=out_t)
x = torch.randn(2, 3, 4)
y = torch.tensor(8)
self.run_test(ArangeStartOutModel(), (x, y))
@skipIfUnsupportedMinOpsetVersion(11)
def test_arange(self):
class ArangeModel(torch.nn.Module):
def forward(self, start, end):
return torch.arange(start.size(0), end, 1.5, dtype=torch.int64)
x = torch.randn(2, 3, 4)
y = torch.tensor(8.5, dtype=torch.float)
self.run_test(ArangeModel(), (x, y))
@skipIfUnsupportedMinOpsetVersion(11)
def test_arange_out(self):
class ArangeOutModel(torch.nn.Module):
def forward(self, end):
out_t = torch.tensor([1], dtype=torch.float)
return torch.arange(end, out=out_t)
x = torch.tensor(8.5, dtype=torch.float)
self.run_test(ArangeOutModel(), (x))
@skipIfUnsupportedMinOpsetVersion(11)
def test_arange_start_out(self):
class ArangeStartOutModel(torch.nn.Module):
def forward(self, start, end):
out_t = torch.tensor([1], dtype=torch.float)
return torch.arange(start.size(0), end, out=out_t)
x = torch.randn(2, 3, 4)
y = torch.tensor(8.5, dtype=torch.float)
self.run_test(ArangeStartOutModel(), (x, y))
@skipIfUnsupportedMinOpsetVersion(11)
def test_arange_no_type(self):
class ArangeModel(torch.nn.Module):
def forward(self, end):
return torch.arange(end), \
torch.arange(0, end)
x = torch.tensor(6.2, dtype=torch.float)
self.run_test(ArangeModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_size(self):
class SizeModel(torch.nn.Module):
def forward(self, input):
return torch.arange(input.size(0)), torch.arange(input.size(-1)), torch.ones(input.shape)
x = torch.randn(5, 3, 2)
self.run_test(SizeModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
@disableScriptTest() # x.stride() not scriptable
def test_as_strided(self):
class Model(torch.nn.Module):
def forward(self, x):
chunk_size = list(x.size())
chunk_size[1] = chunk_size[1] * 2 - 1
chunk_stride = list(x.stride())
chunk_stride[1] = chunk_stride[1] // 2
return x.as_strided((3, 3, 3), (1, 4, 2), storage_offset=2), x.as_strided(chunk_size, chunk_stride)
x = torch.randn(5, 8, 7)
self.run_test(Model(), x)
@disableScriptTest() # Ellipses followed by tensor indexing not scriptable
def test_tensor_index_advanced_indexing_ellipsis(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[..., torch.tensor([2, 1]), torch.tensor([0, 3])]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), (m1,))
def test_tensor_index_advanced_indexing(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[:, torch.tensor([[0, 2], [1, 1]]), :, torch.tensor([2, 1]), torch.tensor([0, 3])]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), (m1,))
class MyModel(torch.nn.Module):
def forward(self, input):
return input[:, torch.tensor([0, 2]), None, 2:4, torch.tensor([[1, 3], [4, 0]])]
self.run_test(MyModel(), (m1,))
class MyModel(torch.nn.Module):
def forward(self, input):
return input[:, torch.tensor([0, 2]), torch.tensor([1]), 2:4, torch.tensor([[1], [4]])]
self.run_test(MyModel(), (m1,))
def test_tensor_index_advanced_indexing_consecutive(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[:, torch.tensor([0, 2]), torch.tensor([[1, 3], [4, 0]]), None]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), (m1,))
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put(self):
class IndexPutModel(torch.nn.Module):
def forward(self, x, ind, update):
x[ind] = update
return x
x = torch.randn(3, 4)
ind = torch.tensor([1], dtype=torch.long)
update = torch.ones(4)
self.run_test(IndexPutModel(), (x, ind, update))
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put_accumulate(self):
class IndexPutModel(torch.nn.Module):
def forward(self, x, ind, update):
return x.index_put((ind, ), update, accumulate=True)
x = torch.randn(3, 4)
ind = torch.tensor([2], dtype=torch.long)
update = torch.ones(4)
self.run_test(IndexPutModel(), (x, ind, update))
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put_slice_index(self):
class IndexPutModel(torch.nn.Module):
def forward(self, x, update):
x[1:2, 1:3, torch.tensor([1])] += update
return x
x = torch.randn(3, 4, 5)
update = torch.tensor([10, 15]).view(1, 2, 1)
self.run_test(IndexPutModel(), (x, update))
class IndexPutModel2(torch.nn.Module):
def forward(self, x, update):
x[torch.tensor([0, 2]), torch.tensor([1, 2])] += update
return x
x = torch.randn(3, 4, 5)
update = torch.randn(2, 5)
self.run_test(IndexPutModel2(), (x, update))
class IndexPutModel3(torch.nn.Module):
def forward(self, x, update):
x[torch.tensor([0, 2]), 1:2] += update
return x
x = torch.randn(3, 4, 5)
update = torch.tensor([10, 15]).view(2, 1, 1)
self.run_test(IndexPutModel3(), (x, update))
class IndexPutModel4(torch.nn.Module):
def forward(self, x, update):
x[torch.tensor([0, 2]), 2] += update
return x
x = torch.randn(3, 4, 5)
update = torch.tensor([10, 15]).view(2, 1)
self.run_test(IndexPutModel4(), (x, update))
class IndexPutModel5(torch.nn.Module):
def forward(self, x, update):
x[1:3, torch.tensor([0, 2]), 2] += update
return x
x = torch.randn(3, 4, 5)
update = torch.tensor([10, 15]).view(2, 1)
self.run_test(IndexPutModel5(), (x, update))
class IndexPutModel6(torch.nn.Module):
def forward(self, x, update):
x[1:3, 0] = update
return x
x = torch.randn(3, 4, 5)
update = torch.arange(2 * 5).to(torch.float).view(2, 5)
self.run_test(IndexPutModel6(), (x, update))
class IndexPutModel7(torch.nn.Module):
def forward(self, x, update):
x[1:, 0] = update
return x
x = torch.randn(3, 4, 5)
update = torch.arange(2 * 5).to(torch.float).view(2, 5)
self.run_test(IndexPutModel7(), (x, update))
class IndexPutModel8(torch.nn.Module):
def forward(self, x, update):
x[:3, 0] = update
return x
x = torch.randn(3, 4, 5)
update = torch.arange(3 * 5).to(torch.float).view(3, 5)
self.run_test(IndexPutModel8(), (x, update))
@skipIfUnsupportedMinOpsetVersion(11)
@disableScriptTest() # Ellipses followed by tensor indexing not scriptable
def test_index_put_ellipsis(self):
class IndexPutModel(torch.nn.Module):
def forward(self, x, update):
x[..., torch.tensor([2, 1, 3]), 2:4] += update
return x
x = torch.randn(3, 4, 5, 6, 7)
update = torch.randn(3, 1, 1, 3, 2)
self.run_test(IndexPutModel(), (x, update))
class IndexPutModel2(torch.nn.Module):
def forward(self, x, update):
x[2, ..., torch.tensor([2, 1, 3]), 2:4] += update
return x
x = torch.randn(3, 4, 5, 6, 7)
update = torch.randn(4, 1, 3, 2)
self.run_test(IndexPutModel2(), (x, update))
@skipIfUnsupportedMinOpsetVersion(11)
def test_copy_(self):
class CopyModel(torch.nn.Module):
def forward(self, x, data):
x[1:3] = data
return x
x = torch.randn(3, 4)
update = torch.randn(2, 4)
self.run_test(CopyModel(), (x, update))
# mixed slice and select
class CopyModel2(torch.nn.Module):
def forward(self, x, data):
x[1:3, 0] = data
return x
x = torch.randn(3, 4)
update = torch.tensor([0], dtype=torch.float32)
self.run_test(CopyModel2(), (x, update))
update = torch.tensor([2, 3], dtype=torch.float32)
self.run_test(CopyModel2(), (x, update))
update = torch.randn(2)
self.run_test(CopyModel2(), (x, update))
class CopyModel3(torch.nn.Module):
def forward(self, x, data):
x[1, 1:3] = data
return x
x = torch.randn(3, 4)
update = torch.tensor([0], dtype=torch.float32)
self.run_test(CopyModel3(), (x, update))
update = torch.tensor([2, 3], dtype=torch.float32)
self.run_test(CopyModel3(), (x, update))
update = torch.randn(2)
self.run_test(CopyModel3(), (x, update))
class CopyModel4(torch.nn.Module):
def forward(self, x, ind, data):
x[ind] = data
return x
x = torch.randn(3, 4)
ind = torch.tensor(2)
data = torch.randn(4)
self.run_test(CopyModel4(), (x, ind, data))
@skipIfUnsupportedMinOpsetVersion(11)
@disableScriptTest() # Model not scriptable (output with shape doesn't match the broadcast shape)
def test_copy_tracing(self):
class CopyModel(torch.nn.Module):
def forward(self, x, data):
x[1, 1:3] = data
return x
x = torch.randn(3, 4)
update = torch.randn(1, 2)
self.run_test(CopyModel(), (x, update))
@skipIfUnsupportedMinOpsetVersion(11)
def test_copy_ellipsis(self):
class CopyModel(torch.nn.Module):
def forward(self, x, update):
x[..., 1] = update
return x
x = torch.randn(2, 3, 4)
update = torch.ones(1)
self.run_test(CopyModel(), (x, update))
x = torch.randn(2, 3, 4, 5, 6)
update = torch.ones(1)
self.run_test(CopyModel(), (x, update))
@skipIfUnsupportedMinOpsetVersion(11)
@disableScriptTest() # Missing input size (with ellipsis indexing)
def test_copy_ellipsis_tracing(self):
class CopyModel(torch.nn.Module):
def forward(self, x, update):
x[2, ..., 1:3] = update
return x
x = torch.randn(3, 4, 5, 6)
update = torch.ones(1)
self.run_test(CopyModel(), (x, update))
@skipIfUnsupportedMinOpsetVersion(10)
def test_flip(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return torch.flip(x, dims=[0])
x = torch.tensor(np.arange(6.0).reshape(2, 3))
self.run_test(MyModule(), x)
def test_random(self):
class RandN(torch.nn.Module):
def forward(self, x):
return torch.mul(x, (torch.randn(2, 3, 4) + x).size(0))
x = torch.randn(2, 3, 4)
self.run_test(RandN(), x)
class Rand(torch.nn.Module):
def forward(self, x):
return torch.mul(x, (torch.rand(2, 3, 4) + x).size(0))
x = torch.randn(2, 3, 4)
self.run_test(Rand(), x)
@skipIfUnsupportedMinOpsetVersion(9)
@disableScriptTest() # symbolic update for randn
def test_random_dynamic_size(self):
class RandN(torch.nn.Module):
def forward(self, x):
return torch.mul(x, torch.randn(x.size()).size(1))
x = torch.randn(2, 3, 4)
self.run_test(RandN(), x)
class Rand(torch.nn.Module):
def forward(self, x):
return torch.mul(x, torch.rand(x.size()).size(1))
x = torch.randn(2, 3, 4)
self.run_test(Rand(), x)
def test_random_like(self):
class RandNLike(torch.nn.Module):
def forward(self, x):
return torch.mul(x, torch.randn_like(x).size(0))
x = torch.randn(2, 3, 4)
self.run_test(RandNLike(), x)
self.run_test(torch.jit.script(RandNLike()), x)
class RandLike(torch.nn.Module):
def forward(self, x):
return torch.mul(x, torch.rand_like(x).size(0))
x = torch.randn(2, 3, 4)
self.run_test(RandLike(), x)
self.run_test(torch.jit.script(RandLike()), x)
def test_random_like_dtype(self):
class RandNLike(torch.nn.Module):
def forward(self, x):
return torch.mul(x.to(torch.double), torch.randn_like(x, dtype=torch.double).size(0))
x = torch.randn(2, 3, 4)
self.run_test(RandNLike(), x)
class RandLike(torch.nn.Module):
def forward(self, x):
return torch.mul(x.to(torch.double), torch.rand_like(x, dtype=torch.double).size(0))
x = torch.randn(2, 3, 4)
self.run_test(RandLike(), x)
def _interpolate(self, x, mode, use_size, is_upsample, align_corners=False):
class MyModel(torch.nn.Module):
def forward(self, x):
scale = 2.3 if is_upsample else 0.5
if len(x.size()) == 3:
scale_array = 2.3
if len(x.size()) == 4:
scale_array = [2.3, 5.1]
if len(x.size()) == 5:
scale_array = [3.3, 2.3, 5.1]
if use_size:
size_array = [int(float(v) * scale) for v in x.size()[2:]]
if align_corners:
return torch.nn.functional.interpolate(x, mode=mode, size=size_array[0], align_corners=True), \
torch.nn.functional.interpolate(x, mode=mode, size=size_array, align_corners=True)
return torch.nn.functional.interpolate(x, mode=mode, size=size_array[0]), \
torch.nn.functional.interpolate(x, mode=mode, size=size_array)
if align_corners:
return torch.nn.functional.interpolate(x, mode=mode, scale_factor=scale,
align_corners=True, recompute_scale_factor=False), \
torch.nn.functional.interpolate(x, mode=mode, scale_factor=scale_array,
align_corners=True, recompute_scale_factor=False)
return torch.nn.functional.interpolate(x, mode=mode,
scale_factor=scale, recompute_scale_factor=False), \
torch.nn.functional.interpolate(x, mode=mode,
scale_factor=scale_array, recompute_scale_factor=False)
self.run_test(MyModel(), x)
def _interpolate_script(self, x, mode, use_size, is_upsample, align_corners=False):
class MyModel(torch.jit.ScriptModule):
__constants__ = ['mode', 'use_size', 'is_upsample', 'size', 'scale', 'size_array', 'scale_array', 'align_corners']
def __init__(self, mode, use_size, is_upsample, align_corners):
super(MyModel, self).__init__()
self.mode = mode
self.use_size = use_size
self.is_upsample = is_upsample
self.align_corners = align_corners
self.scale = 2.0 if self.is_upsample else 0.5
self.size = 24 if self.is_upsample else 2
if x.dim() == 3:
self.scale_array = [2.3]
self.size_array = [16]
elif x.dim() == 4:
self.scale_array = [2.3, 3.1]
self.size_array = [16, 32]
else:
self.scale_array = [2.3, 3.1, 4.6]
self.size_array = [16, 32, 64]
@torch.jit.script_method
def forward(self, x):
if self.use_size:
if self.align_corners:
return torch.nn.functional.interpolate(x, mode=self.mode, size=self.size, align_corners=True), \
torch.nn.functional.interpolate(x, mode=self.mode, size=self.size_array, align_corners=True)
return torch.nn.functional.interpolate(x, mode=self.mode, size=self.size), \
torch.nn.functional.interpolate(x, mode=self.mode, size=self.size_array)
if self.align_corners:
return torch.nn.functional.interpolate(x, mode=self.mode,
scale_factor=self.scale, recompute_scale_factor=False), \
torch.nn.functional.interpolate(x, mode=self.mode,
scale_factor=self.scale_array, recompute_scale_factor=False)
return torch.nn.functional.interpolate(x, mode=self.mode,
scale_factor=self.scale, recompute_scale_factor=False), \
torch.nn.functional.interpolate(x, mode=self.mode,
scale_factor=self.scale_array, recompute_scale_factor=False)
model = MyModel(mode, use_size, is_upsample, align_corners)
self.run_test(model, x, atol=1e-6)
def _interpolate_tests(self, is_upsample):
# - cubic mode is not supported for opsets below 11;
# - linear mode does not match for opsets below 11;
modes = ["nearest", "linear", "bicubic"]
if self.opset_version < 11:
modes = ["nearest"]
x = [torch.randn(1, 2, 6, requires_grad=True),
torch.randn(1, 2, 4, 6, requires_grad=True),
torch.randn(1, 2, 4, 4, 6, requires_grad=True)]
for mode in modes:
for xi in x:
mode_i = mode
# TODO: enable bicubic downsample when ORT precision loss fixed
if mode == "bicubic" and xi.dim() != 4:
continue
elif mode == "linear":
if xi.dim() == 3:
# TODO : enable when linear mode is implemented for 1d inputs in ORT
continue
elif xi.dim() == 4:
mode_i = "bilinear"
elif xi.dim() == 5:
# TODO : enable when linear mode is implemented for 3d inputs in ORT
mode_i = "trilinear"
continue
self._interpolate(xi, mode_i, True, is_upsample)
# test with align_corners if supported
if mode != 'nearest':
self._interpolate(xi, mode_i, True, is_upsample, True)
self._interpolate_script(xi, mode_i, True, is_upsample, True)
# the following cases, require dynamic sizes/scales,
# which which is not supported for opset_version < 9
if self.opset_version >= 9:
self._interpolate_script(xi, mode_i, True, is_upsample)
self._interpolate(xi, mode_i, False, is_upsample)
# test with align_corners if supported
if mode != 'nearest':
self._interpolate(xi, mode_i, False, is_upsample, True)
self._interpolate_script(xi, mode_i, False, is_upsample, True)
self._interpolate_script(xi, mode_i, False, is_upsample)
@disableScriptTest()
def test_interpolate_upsample(self):
self._interpolate_tests(True)
@disableScriptTest()
@skipIfUnsupportedMinOpsetVersion(9)
def test_interpolate_function_substitution(self):
class ScriptModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.nn.functional.interpolate(x, mode="nearest", scale_factor=2.)
class ScriptModule(torch.jit.ScriptModule):
def __init__(self):
super(ScriptModule, self).__init__()
self.submodule = ScriptModel()
@torch.jit.script_method
def forward(self, input):
return self.submodule(input)
x = torch.randn(1, 2, 4, 4, 6)
self.run_test(ScriptModule(), (x,))
@torch.jit.script
def script_method(x):
return torch.nn.functional.interpolate(x, mode="nearest", scale_factor=2.)
class TracingModule(torch.nn.Module):
def forward(self, x):
return script_method(x)
self.run_test(TracingModule(), (x,))
@skipIfUnsupportedMinOpsetVersion(10)
@disableScriptTest()
def test_interpolate_downsample(self):
self._interpolate_tests(False)
@skipIfUnsupportedMinOpsetVersion(11)
@disableScriptTest()
def test_interpolate_no_shape(self):
class MyModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, y):
x = torch.add(x, x)
out1 = torch.nn.functional.interpolate(x, mode="bilinear", size=(16, 16), align_corners=False)
out2 = torch.nn.functional.interpolate(x, mode="nearest", size=(int(y.size(0)), int(y.size(1))))
return out1, out2
x = torch.randn(1, 2, 4, 4, requires_grad=True)
y = torch.randn(16, 16, requires_grad=True)
self.run_test(MyModel(), (x, y))
def test_interpolate_adaptive_pooling_error(self):
x = torch.randn(1, 2, 6, requires_grad=True)
with self.assertRaises(RuntimeError) as cm:
self._interpolate(x, "area", True, True)
with self.assertRaises(RuntimeError) as cm:
self._interpolate(x, "area", False, True)
def test_groupnorm(self):
model = torch.nn.GroupNorm(3, 6, 0.002)
x = torch.randn(4, 6, 180, 180, 180)
self.run_test(model, x)
model = torch.nn.GroupNorm(1, 6, 0.002)
x = torch.randn(4, 6, 180, 180)
self.run_test(model, x)
model = torch.nn.GroupNorm(6, 6, 0.002)
x = torch.randn(4, 6, 180, 180)
self.run_test(model, x)
@disableScriptTest()
def test_groupnorm_noaffine(self):
model = torch.nn.GroupNorm(4, 8, 0.002, affine=False)
x = torch.randn(3, 8, 224, 224)
self.run_test(model, x)
model = torch.nn.GroupNorm(1, 6, 0.002, affine=False)
x = torch.randn(4, 6, 180, 180)
self.run_test(model, x)
model = torch.nn.GroupNorm(6, 6, 0.002, affine=False)
x = torch.randn(4, 6, 180, 180)
self.run_test(model, x)
def test_std(self):
class StandardDeviation(torch.nn.Module):
def forward(self, input):
return torch.std(input, unbiased=False)
x = torch.randn(2, 3, 4)
model = StandardDeviation()
self.run_test(model, x)
def test_pow(self):
class PowModule(torch.nn.Module):
def forward(self, x, y):
return x.pow(y)
x = torch.randn(2, 3, 4)
y = torch.randn(2, 3, 4)
self.run_test(PowModule(), (x, y))
x = torch.randint(10, (2, 3, 4))
y = torch.randint(10, (2, 3, 4)).to(dtype=torch.int32)
self.run_test(PowModule(), (x, y))
x = torch.randint(10, (2, 3, 4))
y = torch.randint(10, (2, 3, 4))
self.run_test(PowModule(), (x, y))
x = torch.randn(2, 3, 4).to(dtype=torch.float64)
y = torch.randint(10, (2, 3, 4))
self.run_test(PowModule(), (x, y))
def test_std_along_dims(self):
class StandardDeviation(torch.nn.Module):
def forward(self, input):
return torch.std(input, dim=(0, 1), unbiased=False)
x = torch.randn(2, 3, 4)
model = StandardDeviation()
self.run_test(model, x)
def test_std_keepdim(self):
class StandardDeviation(torch.nn.Module):
def forward(self, input):
return torch.std(input, dim=(0, 1), unbiased=False, keepdim=True)
x = torch.randn(2, 3, 4)
model = StandardDeviation()
self.run_test(model, x)
def test_bitshift(self):
class BitshiftModel(torch.nn.Module):
def forward(self, input, input2):
return input >> 1, input << 3.1, \
input2 >> torch.tensor([1, 2]), input2 << 4.2
input = torch.arange(24, dtype=torch.float32).reshape(3, 4, 2)
input2 = torch.arange(24, dtype=torch.int64).reshape(3, 4, 2)
self.run_test(BitshiftModel(), (input, input2))
def test_bitshift_other_fp(self):
class BitshiftModel(torch.nn.Module):
def forward(self, input):
return input << 2.4
input = torch.arange(24, dtype=torch.int64).reshape(3, 4, 2)
self.run_test(BitshiftModel(), input)
# uint8 not implemented in ORT for Mul used in
# exporting bitshift for opset_version < 10
@skipIfUnsupportedMinOpsetVersion(11)
def test_bitshift_uint8(self):
class BitshiftModel(torch.nn.Module):
def forward(self, input, input2):
return input >> 1, input << 3., \
input2 >> torch.tensor([1, 2], dtype=torch.uint8), input2 << 4.
input = torch.arange(24, dtype=torch.uint8).reshape(3, 4, 2)
input2 = torch.arange(24, dtype=torch.uint8).reshape(3, 4, 2)
self.run_test(BitshiftModel(), (input, input2))
def test_narrow(self):
class NarrowModel(torch.nn.Module):
def forward(self, input):
return torch.narrow(input, 0, 0, 2)
x = torch.randn(3, 3, requires_grad=True)
self.run_test(NarrowModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_narrow_dynamic(self):
class NarrowModel(torch.nn.Module):
def forward(self, input):
return torch.narrow(input, 0, 0, input.shape[0] - 1)
x = torch.randn(3, 3, requires_grad=True)
self.run_test(NarrowModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_index_fill(self):
class IndexFillModel(torch.nn.Module):
def forward(self, input):
index = torch.tensor([2, 0])
return input.index_fill(2, index, -1)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(IndexFillModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_index_copy(self):
class IndexCopyModel(torch.nn.Module):
def forward(self, input):
index = torch.tensor([2, 0])
source = torch.ones(3, 2, 5)
return input.index_copy(1, index, source)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(IndexCopyModel(), x)
def test_select(self):
class Select(torch.nn.Module):
def forward(self, x):
return x[:, 1]
x = torch.randn(3, 4)
self.run_test(Select(), x)
def test_select_negative_index(self):
class Select(torch.nn.Module):
def forward(self, x):
return x[:, -1]
x = torch.randn(3, 4)
self.run_test(Select(), x)
# TODO: enable for opset 10 when ONNXRuntime version will be updated
def test_index_select_constant_scaler_index(self):
class IndexSelectScalerIndexModel(torch.nn.Module):
def forward(self, x):
index = 2
return torch.index_select(x, 1, torch.tensor(index))
x = torch.randn(3, 4)
self.run_test(IndexSelectScalerIndexModel(), x)
def test_index_select_scaler_index(self):
class IndexSelectScalerIndexModel(torch.nn.Module):
def __init__(self, index_base):
super(IndexSelectScalerIndexModel, self).__init__()
self.index_base = torch.tensor(index_base)
def forward(self, x, index_offset):
index = self.index_base + index_offset
return torch.index_select(x, 1, index)
x = torch.randn(3, 4)
offset = 2
index_offset = torch.tensor(offset)
base = 1
self.run_test(IndexSelectScalerIndexModel(base), (x, index_offset))
def test_take(self):
class TakeModel(torch.nn.Module):
def forward(self, x, y):
return torch.take(x, y)
x = torch.randn(6, 4, 3, 3)
y = torch.tensor([4, 1, 7, 15, 63])
self.run_test(TakeModel(), (x, y))
def test_topk(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return torch.topk(x, 3)
x = torch.arange(1., 6., requires_grad=True)
self.run_test(MyModule(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_topk_smallest_unsorted(self):
class MyModule(torch.nn.Module):
def forward(self, x, k):
# When sorted=False, order of elements in the outout tensors
# are not expected to match between PyTorch and ORT
topk_unsorted = torch.topk(x, k, largest=False, sorted=False)
topk_sorted = torch.topk(x, k, largest=False, sorted=True)
return topk_sorted, torch.sort(topk_unsorted.values).values
x = torch.arange(1., 6., requires_grad=True)
k = torch.tensor(3)
self.run_test(MyModule(), (x, k))
@skipIfUnsupportedMinOpsetVersion(10)
def test_topk_script(self):
class MyModuleDynamic(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, k):
return torch.topk(x, k)
x = torch.arange(1., 6., requires_grad=True)
k = torch.tensor(3)
self.run_test(MyModuleDynamic(), [x, k])
@skipIfUnsupportedOpsetVersion([7])
def test_normalize(self):
class Model(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.normalize(x)
x = torch.randn(3, 3)
self.run_test(Model(), x)
def test_layer_norm(self):
model = torch.nn.LayerNorm([10, 10])
x = torch.randn(20, 5, 10, 10)
self.run_test(model, x)
def test_batchnorm1d(self):
x = torch.randn(10, 10)
model = torch.nn.BatchNorm1d(10, affine=True)
self.run_test(model, x)
x = torch.randn(10, 10, 128)
self.run_test(model, x)
def test_batchnorm1d_noaffine(self):
x = torch.randn(10, 10)
model = torch.nn.BatchNorm1d(10, affine=False)
self.run_test(model, x)
x = torch.randn(10, 10, 128)
self.run_test(model, x)
def test_batchnorm2d(self):
x = torch.randn(10, 3, 128, 128)
model = torch.nn.BatchNorm2d(3, affine=True)
self.run_test(model, x)
def test_batchnorm2d_noaffine(self):
x = torch.randn(10, 3, 128, 128)
model = torch.nn.BatchNorm2d(3, affine=False)
self.run_test(model, x)
def test_batchnorm3d(self):
x = torch.randn(10, 3, 128, 128, 128)
model = torch.nn.BatchNorm3d(3, affine=True)
self.run_test(model, x)
def test_batchnorm3d_noaffine(self):
x = torch.randn(10, 3, 128, 128, 128)
model = torch.nn.BatchNorm3d(3, affine=False)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_scatter_with_scalar(self):
class ScatterModel(torch.nn.Module):
def forward(self, input, indices):
values = 1.0
return input.scatter(1, indices, values)
input = torch.tensor([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], dtype=torch.float64)
indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64)
self.run_test(ScatterModel(), input=(input, indices))
@skipIfUnsupportedMinOpsetVersion(9)
def test_scatter_with_scalar_different_types(self):
# Tests the case when scalar src (updates values) type is different
# from self type. Happens only with scalar src - PyTorch does not
# allow this when src is a tensor.
class ScatterModel(torch.nn.Module):
def forward(self, input, indices):
values = 1.0
return input.scatter(1, indices, values)
input = torch.tensor([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], dtype=torch.float32)
indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64)
self.run_test(ScatterModel(), input=(input, indices))
@skipIfUnsupportedMinOpsetVersion(9)
def test_scatter(self):
class ScatterModel(torch.nn.Module):
def forward(self, input, indices, values):
return input.scatter(1, indices, values)
input = torch.tensor([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]])
indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64)
values = torch.tensor([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]])
self.run_test(ScatterModel(), input=(input, indices, values))
input = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
indices = torch.tensor([[1, 0], [0, 2], [0, 1]], dtype=torch.int64)
values = torch.tensor([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]])
self.run_test(ScatterModel(), (input, indices, values))
input = torch.zeros(3, 4, 5, 6)
indices = torch.tensor([[1, 0], [0, 2], [0, 1]], dtype=torch.int64)
indices = indices.view(3, 2, 1, 1).expand(3, 2, 5, 6)
values = torch.arange(3 * 2 * 5 * 6, dtype=torch.float32).view(3, 2, 5, 6)
self.run_test(ScatterModel(), (input, indices, values))
input = torch.zeros(3, 4, 2)
indices = torch.tensor([[[1, 0], [0, 2]], [[1, 1], [0, 1]], [[2, 1], [2, 2]]])
values = torch.arange(3 * 2 * 2, dtype=torch.float32).view(3, 2, 2)
self.run_test(ScatterModel(), (input, indices, values))
@skipIfUnsupportedMinOpsetVersion(9)
def test_scatter_add(self):
class ScatterModel(torch.nn.Module):
def forward(self, input, indices, values):
return input.scatter_add(1, indices, values)
input = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64)
values = torch.tensor([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]])
self.run_test(ScatterModel(), input=(input, indices, values))
@skipIfUnsupportedMinOpsetVersion(9)
def test_one_hot(self):
class OneHot(torch.nn.Module):
def __init__(self, num_classes):
super().__init__()
self.num_classes = num_classes
def forward(self, x):
return torch.nn.functional.one_hot(x, self.num_classes)
x = torch.arange(10)
self.run_test(OneHot(15), (x))
@skipIfUnsupportedMinOpsetVersion(9)
def test_gather(self):
class GatherModel(torch.nn.Module):
def forward(self, input, indices):
return input.gather(1, indices)
input = torch.tensor([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]])
indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64)
self.run_test(GatherModel(), input=(input, indices))
@skipIfUnsupportedMinOpsetVersion(9)
def test_expand(self):
class ExpandModel(torch.nn.Module):
def forward(self, input):
return input.expand(2, 3, -1)
input = torch.randn(2, 1, 4)
self.run_test(ExpandModel(), input=(input))
class ExpandInferDimModel(torch.nn.Module):
def forward(self, input):
return input.expand(-1, input.size(0))
input = torch.randn(3, 1)
self.run_test(ExpandInferDimModel(), input=(input))
class ExpandTensorSizeModel(torch.nn.Module):
def forward(self, input, size):
return input.expand(size)
input = torch.randn(3,)
size = torch.tensor(-1)
self.run_test(ExpandTensorSizeModel(), input=(input, size))
def test_multinomial(self):
class Multinomial(torch.nn.Module):
def forward(self, weight):
return torch.multinomial(weight, 3, replacement=True)
class MultinomialNoReplacement(torch.nn.Module):
def forward(self, weight):
return torch.multinomial(weight, 1)
weight = torch.tensor([[0, 10, 0, 0], [0, 0, 100, 0]], dtype=torch.float)
self.run_test(Multinomial(), (weight,))
self.run_test(MultinomialNoReplacement(), (weight,))
def _test_reduced_ops(self, op):
class ReducedOpModule(torch.nn.Module):
def forward(self, input):
return op(input, dim=-1)
if op != torch.mean: # torch.mean only supports float types
x = torch.randint(10, (4, 4), dtype=torch.uint8)
self.run_test(ReducedOpModule(), x)
x = torch.randint(10, (4, 4), dtype=torch.int8)
self.run_test(ReducedOpModule(), x)
x = torch.randint(10, (4, 4), dtype=torch.int16)
self.run_test(ReducedOpModule(), x)
x = torch.randint(10, (4, 4), dtype=torch.int32)
self.run_test(ReducedOpModule(), x)
x = torch.randint(10, (4, 4), dtype=torch.int64)
self.run_test(ReducedOpModule(), x)
# torch.mean only supports float types
# ORT does not support double ReduceProd for double
if op != torch.prod and op != torch.mean:
x = torch.randn(4, 5, dtype=torch.double)
self.run_test(ReducedOpModule(), x)
if op != torch.prod: # torch.prod not implemented for Half
x = torch.randn(4, 4, dtype=torch.half)
self.run_test(ReducedOpModule(), x)
x = torch.randn(4, 5, dtype=torch.float)
self.run_test(ReducedOpModule(), x)
def test_reduced_sum(self):
return self._test_reduced_ops(op=torch.sum)
def test_reduced_mean(self):
return self._test_reduced_ops(op=torch.mean)
def test_reduced_prod(self):
return self._test_reduced_ops(op=torch.prod)
def test_reduced_min_max(self):
class ReducedMinMaxModule(torch.nn.Module):
def forward(self, input):
return torch.min(input, dim=-1)[0], torch.max(input, dim=0)[0]
x = torch.randint(10, (4, 4), dtype=torch.int32)
self.run_test(ReducedMinMaxModule(), x)
x = torch.randint(10, (4, 4), dtype=torch.int64)
self.run_test(ReducedMinMaxModule(), x)
x = torch.randn(4, 5, dtype=torch.float)
self.run_test(ReducedMinMaxModule(), x)
def test_reduce_log_sum_exp(self):
class ReduceLogSumExpModel(torch.nn.Module):
def forward(self, input):
a = torch.logsumexp(input, dim=0)
b = torch.logsumexp(input, dim=(0, 1))
return a + b
x = torch.randn(4, 4, requires_grad=True)
self.run_test(ReduceLogSumExpModel(), x)
def test_softmax(self):
for i in range(-4, 3):
model = torch.nn.Softmax(dim=i)
input = torch.randn(3, 4, 5, 6)
self.run_test(model, input)
class SoftmaxUnknownRank(torch.nn.Module):
def __init__(self, i):
super().__init__()
self.softmax = torch.nn.Softmax(dim=i)
def forward(self, x):
return self.softmax(x.reshape(3, 4, 5, 6))
model = torch.jit.script(SoftmaxUnknownRank(i))
self.run_test(model, input)
def test_softmax_large_values(self):
input = torch.tensor([[-1e12, -1e12, -1e12], [1e12, 0.0, -5.0], [3.0, 4.0, 5.0]])
for i in range(-2, 1):
model = torch.nn.Softmax(dim=i)
self.run_test(model, input)
class SoftmaxUnknownRank(torch.nn.Module):
def __init__(self, i):
super().__init__()
self.softmax = torch.nn.Softmax(dim=i)
def forward(self, x):
return self.softmax(x.reshape(3, 3))
model = torch.jit.script(SoftmaxUnknownRank(i))
self.run_test(model, input)
def test_logsoftmax(self):
for i in range(7)[2:]:
model = torch.nn.LogSoftmax(dim=i - 1)
dims = [2] * (i - 2) + [3, 4]
input = torch.ones(*dims, requires_grad=True)
self.run_test(model, input)
def test_logsoftmax_dim(self):
for i in range(-4, 3):
model = torch.nn.LogSoftmax(dim=i)
input = torch.randn(3, 4, 5, 6)
self.run_test(model, input)
@skipIfUnsupportedMinOpsetVersion(9)
@disableScriptTest() # scripting prim_dtype
def test_lstm_no_hidden(self):
class LSTMModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.rnn = torch.nn.LSTM(input_size=16, hidden_size=16)
def forward(self, x):
return self.rnn(x)
input = torch.randn((10, 16, 16))
self.run_test(LSTMModel(), (input,))
@skipIfUnsupportedMinOpsetVersion(9)
@disableScriptTest()
def test_lstm(self):
model = torch.nn.LSTM(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False)
input = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)
h0 = torch.randn(1, BATCH_SIZE, RNN_HIDDEN_SIZE)
c0 = torch.randn(1, BATCH_SIZE, RNN_HIDDEN_SIZE)
self.run_test(model, (input, (h0, c0)))
@skipIfUnsupportedMinOpsetVersion(9)
@disableScriptTest()
def test_lstm_default_init_state(self):
model = torch.nn.LSTM(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False)
input = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)
self.run_test(model, input)
@skipIfUnsupportedMinOpsetVersion(9)
@disableScriptTest() # LSTMModel model not scriptable
def test_lstm_fixed_batch_size(self):
class LSTMModel(torch.nn.Module):
def __init__(self):
super(LSTMModel, self).__init__()
self.lstm = torch.nn.LSTM(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False)
def forward(self, input):
batch_size = input.size()[1]
h0_np = np.ones([1, batch_size, RNN_HIDDEN_SIZE]).astype(np.float32)
c0_np = np.ones([1, batch_size, RNN_HIDDEN_SIZE]).astype(np.float32)
h0 = torch.from_numpy(h0_np)
c0 = torch.from_numpy(c0_np)
return self.lstm(input, (h0, c0))
input = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)
# verify with different input of same batch size
input2 = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)
self.run_test(LSTMModel(), input, fixed_batch_size=True, test_with_inputs=[input2])
@skipIfUnsupportedMinOpsetVersion(9)
@disableScriptTest()
def test_lstm_post_fix_init_state(self):
class LSTMModel(torch.nn.Module):
def __init__(self):
super(LSTMModel, self).__init__()
self.lstm = torch.nn.LSTM(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE,
1, bidirectional=False)
def forward(self, input):
batch_size = input.size()[1]
h0_np = np.ones([1, batch_size, RNN_HIDDEN_SIZE]).astype(np.float32)
c0_np = np.ones([1, batch_size, RNN_HIDDEN_SIZE]).astype(np.float32)
h0 = torch.from_numpy(h0_np)
c0 = torch.from_numpy(c0_np)
return self.lstm(input, (h0, c0))
model = LSTMModel()
input = torch.randn(RNN_SEQUENCE_LENGTH, 1, RNN_INPUT_SIZE)
# verify with different input of different batch size
input2 = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)
self.run_test(model, input, dynamic_axes={'input' : {0 : 'seq', 1 : 'batch'}},
test_with_inputs=[input2])
@disableScriptTest()
def test_lstm_constant_folding(self):
class LstmNet(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_layers, bidirectional):
super(LstmNet, self).__init__()
self.lstm = torch.nn.LSTM(input_size, hidden_size, num_layers, bidirectional=bidirectional)
def forward(self, input, initial_state):
return self.lstm(input, initial_state)
def get_LstmNet_model_and_inputs(input_size, hidden_size, num_layers, batch_size,
seq_len, bidirectional):
num_directions = 2 if bidirectional else 1
model = LstmNet(input_size, hidden_size, num_layers, bidirectional)
input = torch.randn(seq_len, batch_size, input_size)
h0 = torch.randn(num_layers * num_directions, batch_size, hidden_size)
c0 = torch.randn(num_layers * num_directions, batch_size, hidden_size)
return model, (input, (h0, c0))
batch_size1 = 3
model1, input1 = get_LstmNet_model_and_inputs(7, 3, 2, batch_size1, 5, True)
self.run_test(model1, input1, do_constant_folding=True)
batch_size2 = 4
model2, input2 = get_LstmNet_model_and_inputs(5, 4, 3, batch_size2, 7, False)
self.run_test(model2, input2, do_constant_folding=True)
@skipIfUnsupportedMinOpsetVersion(9)
@disableScriptTest()
def test_lstm_no_bias(self):
class LstmNet(torch.nn.Module):
def __init__(self, num_layers, bidirectional):
super(LstmNet, self).__init__()
self.lstm = torch.nn.LSTM(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, num_layers, bias=False, bidirectional=bidirectional)
def forward(self, input, initial_state):
return self.lstm(input, initial_state)
def get_LstmNet_model_and_inputs(num_layers, bidirectional):
input = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)
num_directions = 2 if bidirectional else 1
model = LstmNet(num_layers, bidirectional)
h0 = torch.randn(num_layers * num_directions, BATCH_SIZE, RNN_HIDDEN_SIZE)
c0 = torch.randn(num_layers * num_directions, BATCH_SIZE, RNN_HIDDEN_SIZE)
return model, (input, (h0, c0))
num_layers = [1, 1, 2, 3]
bidirectional = [True, False, True, False]
models_and_inputs = [get_LstmNet_model_and_inputs(n, b) for n, b in zip(num_layers, bidirectional)]
for model, input in models_and_inputs:
self.run_test(model, input)
@disableScriptTest()
def test_rnn_no_bias(self):
def make_model(layers, packed_sequence):
batch_first = True if packed_sequence == 2 else False
model = torch.nn.RNN(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, layers, bidirectional=False,
batch_first=batch_first, bias=False)
if packed_sequence == 1:
model = RnnModelWithPackedSequence(model, False)
if packed_sequence == 2:
model = RnnModelWithPackedSequence(model, True)
return model
def make_input(batch_size, layers, packed_sequence):
batch_first = True if packed_sequence == 2 else False
seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=batch_size)
seq_lengths = list(reversed(sorted(map(int, seq_lengths))))
inputs = [torch.randn(l, RNN_INPUT_SIZE) for l in seq_lengths]
inputs = rnn_utils.pad_sequence(inputs, batch_first=batch_first)
inputs = [inputs]
h0 = torch.randn(layers, batch_size, RNN_HIDDEN_SIZE)
inputs.append(h0)
if packed_sequence != 0:
inputs.append(torch.IntTensor(seq_lengths))
if len(inputs) == 1:
input = inputs[0]
else:
input = tuple(inputs)
return input
layers = [1, 3, 1, 3, 1, 3]
packed_sequence = [0, 0, 1, 1, 2, 2]
models = [make_model(l, p) for l, p in zip(layers, packed_sequence)]
inputs = [make_input(RNN_BATCH_SIZE, l, p) for l, p in zip(layers, packed_sequence)]
for model, input in zip(models, inputs):
self.run_test(model, input, batch_size=RNN_BATCH_SIZE)
def test_gru_no_bias(self):
class GruNet(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_layers, bidirectional):
super(GruNet, self).__init__()
self.mygru = torch.nn.GRU(input_size, hidden_size, num_layers, bidirectional=bidirectional, bias=False)
def forward(self, input, initial_state):
out = self.mygru(input, initial_state)
return out
def get_GruNet_model_and_inputs(input_size, hidden_size, num_layers, batch_size,
seq_len, bidirectional):
num_directions = 2 if bidirectional else 1
model = GruNet(input_size, hidden_size, num_layers, bidirectional)
input = torch.randn(seq_len, batch_size, input_size)
h0 = torch.randn(num_layers * num_directions, batch_size, hidden_size)
return model, (input, h0)
input_size = [7, 5]
hidden_size = [3, 4]
num_layers = [2, 3]
batch_size = [3, 4]
seq_len = [5, 7]
bidirectional = [True, False]
models_and_inputs = [get_GruNet_model_and_inputs(i, h, n, b, s, bi)
for i, h, n, b, s, bi in zip(input_size, hidden_size, num_layers, batch_size, seq_len, bidirectional)]
for model, input in models_and_inputs:
self.run_test(model, input, do_constant_folding=True)
def test_gru_constant_folding(self):
class GruNet(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_layers, bidirectional):
super(GruNet, self).__init__()
self.mygru = torch.nn.GRU(input_size, hidden_size, num_layers, bidirectional=bidirectional)
def forward(self, input, initial_state):
out = self.mygru(input, initial_state)
return out
def get_GruNet_model_and_inputs(input_size, hidden_size, num_layers, batch_size,
seq_len, bidirectional):
num_directions = 2 if bidirectional else 1
model = GruNet(input_size, hidden_size, num_layers, bidirectional)
input = torch.randn(seq_len, batch_size, input_size)
h0 = torch.randn(num_layers * num_directions, batch_size, hidden_size)
return model, (input, h0)
batch_size1 = 3
model1, input1 = get_GruNet_model_and_inputs(7, 3, 2, batch_size1, 5, True)
self.run_test(model1, input1, do_constant_folding=True)
batch_size2 = 4
model2, input2 = get_GruNet_model_and_inputs(5, 4, 3, batch_size2, 7, False)
self.run_test(model2, input2, do_constant_folding=True)
@skipIfUnsupportedMinOpsetVersion(8)
def test_max_tensors(self):
class MaxModel(torch.nn.Module):
def forward(self, input, other):
return torch.max(input, other)
model = MaxModel()
x = torch.randn(4, 4, requires_grad=True)
y = torch.randn(4, 1, requires_grad=True)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_arange_end(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return torch.arange(a.size(0), dtype=torch.float).view(-1, 1) + a
x = torch.randn(3, 4, requires_grad=True)
outputs = ArangeScript()(x)
self.run_test(ArangeScript(), x)
class ArangeModel(torch.nn.Module):
def forward(self, a):
return torch.arange(a.size(0), dtype=torch.float).view(-1, 1) + a
self.run_test(ArangeModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_arange_end_notype(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return torch.arange(a.size(0))
x = torch.randn(3, 4, requires_grad=True)
outputs = ArangeScript()(x)
self.run_test(ArangeScript(), x)
class ArangeModel(torch.nn.Module):
def forward(self, a):
return torch.arange(a.size(0))
self.run_test(ArangeModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_arange_start_end(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return torch.arange(2, a.size(0) + 2, dtype=torch.float).view(-1, 1) + a
x = torch.randn(3, 4, requires_grad=True)
self.run_test(ArangeScript(), x)
class ArangeModel(torch.nn.Module):
def forward(self, a):
return torch.arange(2, a.size(0) + 2, dtype=torch.float).view(-1, 1) + a
self.run_test(ArangeModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_arange_start_end_notype(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return torch.arange(2.7, a.size(0) + 2).view(-1, 1) + a
x = torch.randn(3, 4, requires_grad=True)
self.run_test(ArangeScript(), x)
class ArangeModel(torch.nn.Module):
def forward(self, a):
return torch.arange(2.7, a.size(0) + 2).view(-1, 1) + a
self.run_test(ArangeModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_arange_start_end_step(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return torch.arange(2, a.size(0) * a.size(1) + 2, a.size(1), dtype=torch.float).view(-1, 1) + a
x = torch.randn(3, 4, requires_grad=True)
self.run_test(ArangeScript(), x)
class ArangeModel(torch.nn.Module):
def forward(self, a):
return torch.arange(2, a.size(0) * a.size(1) + 2, a.size(1), dtype=torch.float).view(-1, 1) + a
self.run_test(ArangeModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_arange_start_end_step_notype(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return torch.arange(2.7, a.size(0) * a.size(1) + 2, a.size(1)).view(-1, 1) + a
x = torch.randn(3, 4, requires_grad=True)
self.run_test(ArangeScript(), x)
class ArangeModel(torch.nn.Module):
def forward(self, a):
return torch.arange(2.7, a.size(0) * a.size(1) + 2, a.size(1)).view(-1, 1) + a
self.run_test(ArangeModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test__dim_arange(self):
class DimArange(torch.nn.Module):
def forward(self, input):
return torch._dim_arange(input, 1)
x = torch.ones(5, 6)
self.run_test(DimArange(), x)
def _test_compare_ops(self, model, num_inputs):
x_float = torch.randn(1, 2, 3, 4, requires_grad=True)
x_int = torch.randint(10, (3, 4), dtype=torch.int32)
if num_inputs > 1:
y_float = torch.randn(1, 2, 3, 4, requires_grad=True)
y_int = torch.randint(10, (3, 4), dtype=torch.int32)
self.run_test(model, (x_float, y_float))
self.run_test(model, (x_float, y_int))
self.run_test(model, (x_int, y_float))
self.run_test(model, (x_int, y_int))
else:
self.run_test(model, x_float)
self.run_test(model, x_int)
def test_gt(self):
class GreaterModel(torch.nn.Module):
def forward(self, input, other):
return input > other
self._test_compare_ops(GreaterModel(), 2)
@skipIfUnsupportedMinOpsetVersion(9)
def test_ge(self):
class GreaterOrEqualModel(torch.nn.Module):
def forward(self, input, other):
return input >= other
self._test_compare_ops(GreaterOrEqualModel(), 2)
def test_gt_scalar(self):
class GreaterModel(torch.nn.Module):
def forward(self, input):
return input > 1
self._test_compare_ops(GreaterModel(), 1)
@skipIfUnsupportedMinOpsetVersion(9)
def test_ge_scalar(self):
class GreaterOrEqualModel(torch.nn.Module):
def forward(self, input):
return input >= 1
self._test_compare_ops(GreaterOrEqualModel(), 1)
def test_lt(self):
class LessModel(torch.nn.Module):
def forward(self, input, other):
return input > other
self._test_compare_ops(LessModel(), 2)
@skipIfUnsupportedMinOpsetVersion(9)
def test_le(self):
class LessOrEqualModel(torch.nn.Module):
def forward(self, input, other):
return input <= other
self._test_compare_ops(LessOrEqualModel(), 2)
def test_lt_scalar(self):
class LessModel(torch.nn.Module):
def forward(self, input):
return input < 1
self._test_compare_ops(LessModel(), 1)
@skipIfUnsupportedMinOpsetVersion(9)
def test_le_scalar(self):
class LessOrEqualModel(torch.nn.Module):
def forward(self, input):
return input <= 1
self._test_compare_ops(LessOrEqualModel(), 1)
def test_matmul(self):
class MatmulModel(torch.nn.Module):
def forward(self, input, other):
return torch.matmul(input, other)
x = torch.randn(3, 4, requires_grad=True)
y = torch.randn(4, 5, requires_grad=True)
self.run_test(MatmulModel(), (x, y))
x = torch.randint(10, (3, 4))
y = torch.randint(10, (4, 5))
self.run_test(MatmulModel(), (x, y))
def test_matmul_batch(self):
class MatmulModel(torch.nn.Module):
def forward(self, input, other):
return torch.matmul(input, other)
x = torch.randn(2, 3, 4, requires_grad=True)
y = torch.randn(2, 4, 5, requires_grad=True)
self.run_test(MatmulModel(), (x, y))
x = torch.randint(10, (2, 3, 4))
y = torch.randint(10, (2, 4, 5))
self.run_test(MatmulModel(), (x, y))
def _argmin_argmax_model(self, input):
class ArgminArgmaxModel(torch.nn.Module):
def forward(self, input):
return torch.argmin(input), \
torch.argmax(input), \
torch.argmin(input, keepdim=True), \
torch.argmax(input, keepdim=True)
self.run_test(ArgminArgmaxModel(), input)
def test_argmin_argmax(self):
input = torch.randn(7, 3, 5)
self._argmin_argmax_model(input)
# Argmin and Argmax with "select_last_index" is not supprted before opset 12
# "select_last_index" was added in opset 12 to deal with corner case where the
# same value appears multiple times in the tensor
@skipIfUnsupportedMinOpsetVersion(12)
def test_argmin_argmax_select_last_index(self):
input = torch.tensor([[1., 2., 3.],
[1., 1., 2.]])
self._argmin_argmax_model(input)
input = torch.ones(7, 3, 5)
self._argmin_argmax_model(input)
def test_repeat(self):
class RepeatModel(torch.nn.Module):
def forward(self, x, y):
x2 = x.repeat(y.shape[0], 1)
y1 = y.view(-1, 1)
return x2 + y1
x = torch.tensor([1, 2, 3])
y = torch.tensor([4, 5, 8, 9])
self.run_test(RepeatModel(), (x, y))
def test_view(self):
class ViewModel(torch.nn.Module):
def forward(self, input):
return input.view(4, 24)
x = torch.randint(10, (4, 2, 3, 4), dtype=torch.int32)
self.run_test(ViewModel(), x)
def test_view_dynamic(self):
class ViewModel(torch.nn.Module):
def forward(self, input, other):
return input.view(other.shape)
x = torch.randn(2, 3, 4)
shape = torch.randn(6, 4)
self.run_test(ViewModel(), (x, shape))
def test_view_dynamic_zero_dim(self):
class ViewModel(torch.nn.Module):
def forward(self, input):
input = input.view(-1, 2)
return input.view(1, -1)
x = torch.ones(2)
another_x = torch.empty((0,))
self.run_test(ViewModel(), x, test_with_inputs=[another_x],
input_names=['input_1'], dynamic_axes={'input_1': [0, ]})
def test_view_as(self):
class ViewModel(torch.nn.Module):
def forward(self, input, other):
return input.view_as(other)
x = torch.randn(2, 3, 4)
y = torch.randn(6, 4)
self.run_test(ViewModel(), (x, y))
@disableScriptTest() # ONNX Shape inference failure in if/else block for Gemm
def test_weight_norm(self):
model = torch.nn.utils.weight_norm(torch.nn.Linear(5, 10), dim=1)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(model, x)
model = torch.nn.utils.weight_norm(torch.nn.Conv1d(1, 1, 3))
x = torch.randn(1, 1, 5, requires_grad=True)
self.run_test(model, x)
model = torch.nn.utils.weight_norm(torch.nn.Conv1d(1, 1, 3), dim=-2)
x = torch.randn(1, 1, 5, requires_grad=True)
self.run_test(model, x)
model = torch.nn.utils.weight_norm(torch.nn.Conv1d(3, 6, 3), name='weight')
x = torch.randn(3, 3, 5, requires_grad=True)
self.run_test(model, x)
@disableScriptTest() # ONNX Shape inference failure in if/else block for Gemm
def test_weight_norm_nodim(self):
model = torch.nn.utils.weight_norm(torch.nn.Linear(5, 10), dim=None)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(model, x)
def test_flatten(self):
class FlattenModel(torch.nn.Module):
def forward(self, input):
return torch.flatten(input)
x = torch.randint(10, (1, 2, 3, 4))
self.run_test(FlattenModel(), x)
def test_flatten2d(self):
class FlattenModel(torch.nn.Module):
def forward(self, input):
return torch.flatten(input, 1)
x = torch.randint(10, (1, 2, 3, 4))
self.run_test(FlattenModel(), x)
def test_flatten2d_neg(self):
class FlattenModel(torch.nn.Module):
def forward(self, x):
return torch.flatten(x, 1, -1), torch.flatten(x, 0, -2), torch.flatten(x, 1, -2)
x = torch.randint(10, (1, 2, 3, 4))
self.run_test(FlattenModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_flatten_dynamic_axes(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return torch.flatten(x, start_dim=2, end_dim=3)
batch_size = 3
x = torch.randn(batch_size, 5, 4, 5)
y = torch.randn(5, 5, 4, 5)
model = MyModule()
self.run_test(model, x, test_with_inputs=[y],
input_names=['input'],
output_names=['output'],
dynamic_axes={'input' : {0 : 'batch_size'},
'output' : {0 : 'batch_size'}})
@skipIfUnsupportedMinOpsetVersion(11)
def test_getitem(self):
class GetItemModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, y, z, ind):
# this will create prim::ListConstruct(x, y, z) + aten::__getitem__
arr = [x, y, z]
return arr[ind]
x = torch.randn(3, 4, 5)
y = torch.randn(1, 4, 5)
z = torch.randn(2, 4, 5)
ind = torch.tensor(1, dtype=torch.long)
self.run_test(GetItemModel(), (x, y, z, ind))
ind = torch.tensor(-2, dtype=torch.long)
self.run_test(GetItemModel(), (x, y, z, ind))
def test_unbind(self):
class UnbindModel(torch.nn.Module):
def forward(self, input):
_, out, _ = input.unbind()
return out
x = torch.randn(3, 4, 5)
self.run_test(UnbindModel(), x)
class UnbindModel2(torch.nn.Module):
def forward(self, input):
_, out, _, _ = input.unbind(1)
return out
x = torch.randn(3, 4, 5)
self.run_test(UnbindModel2(), x)
class UnbindModel3(torch.nn.Module):
def forward(self, input):
_, out, _, _ = input.unbind(-2)
return out
x = torch.randn(3, 4, 5)
self.run_test(UnbindModel3(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_len(self):
class LenModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return len(input.unbind()) + input
x = torch.randn(4, 5)
self.run_test(LenModel(), x, input_names=['input'], dynamic_axes={'input': {0: 'seq'}},
test_with_inputs=(torch.randn(5, 5),))
@skipIfUnsupportedMinOpsetVersion(9)
def test_len_list(self):
class LenListModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return torch.ones(len(input.shape))
x = torch.randn(4, 5)
self.run_test(LenListModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_unbind_dynamic(self):
class UnbindModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return input.unbind()[1]
x = torch.randn(3, 4, 5)
self.run_test(UnbindModel(), x)
class UnbindModel2(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return input.unbind(-1)[1]
x = torch.randn(3, 4, 5)
self.run_test(UnbindModel2(), x)
def test_split(self):
class SplitModel(torch.nn.Module):
def forward(self, input):
out1, out2, out3 = input.split([2, 1, 2])
return out1, out2, out3
x = torch.randn(5, 4, 3)
self.run_test(SplitModel(), x)
class SplitModel2(torch.nn.Module):
def forward(self, input):
out1, out2, out3 = input.split([2, 1, 1], -2)
return out1, out2, out3
x = torch.randn(5, 4, 3)
self.run_test(SplitModel2(), x)
class SplitModel3(torch.nn.Module):
def forward(self, input):
out1, out2, out3 = input.split([2, 1, 2])
return out3, out1
x = torch.randn(5, 4, 3)
self.run_test(torch.jit.script(SplitModel3()), x)
@skipIfUnsupportedMinOpsetVersion(11)
@disableScriptTest()
def test_split_size_as_list(self):
class SplitModel(torch.nn.Module):
def forward(self, input, split_sizes: List[int]):
out = []
split_list: List[torch.Tensor] = input.split(split_sizes)
for ob in split_list:
out.append(ob)
return torch.cat(out, dim=0)
x = torch.randn(6, 4, 3)
split_sizes = [torch.tensor(2), torch.tensor(4)]
self.run_test(SplitModel(), (x, split_sizes))
@skipIfUnsupportedMinOpsetVersion(11)
def test_split_size_with_slice(self):
class SplitModule(torch.nn.Module):
def forward(self, x, y, t):
splits = (x.size(1), y.size(1))
out, out2 = torch.split(t, splits, dim=1)
return out, out2
x = torch.randn(2, 3)
y = torch.randn(2, 4)
t = torch.randn(2, 7)
self.run_test(SplitModule(), (x, y, t))
@skipIfUnsupportedMinOpsetVersion(11)
def test_split_dynamic(self):
class SplitModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return input.split(2)[1]
x = torch.randn(5, 4, 3)
self.run_test(SplitModel(), x)
class SplitModel2(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return input.split(2, -3)[1]
x = torch.randn(5, 4, 3)
self.run_test(SplitModel2(), x)
def test_concat(self):
class ConcatModel(torch.nn.Module):
def forward(self, x, y, z):
return torch.cat((x, y, z))
x = torch.randn(3, 4, 5)
y = torch.randn(1, 4, 5)
z = torch.randn(2, 4, 5)
self.run_test(ConcatModel(), (x, y, z))
@skipIfUnsupportedMinOpsetVersion(11)
def test_concat_dynamic(self):
class ConcatDynamicModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.cat(x.unbind())
x = torch.randn(4, 5, 6)
self.run_test(ConcatDynamicModel(), x)
def test_stack(self):
class StackModel(torch.nn.Module):
def forward(self, x, y, z):
return torch.stack((x, y, z), 1)
x = torch.randn(3, 4, 5)
y = torch.randn(3, 4, 5)
z = torch.randn(3, 4, 5)
self.run_test(StackModel(), (x, y, z))
@skipIfUnsupportedMinOpsetVersion(11)
def test_stack_dynamic(self):
class StackDynamicModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.stack(x.unbind(), 1)
x = torch.randn(4, 5, 6)
self.run_test(StackDynamicModel(), x)
def test_loop_dynamic(self):
class LoopModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
for i in range(x.size(2)):
x = x + i
return x
model = LoopModel()
inputs = torch.zeros(1, 2, 3, dtype=torch.long)
self.run_test(model, inputs)
@skipIfUnsupportedMinOpsetVersion(9)
def test_loop_nested(self):
class NestedLoopsModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
for i in range(5):
a = 0
while a < 4:
a += 1
x = x + a
return x
model = NestedLoopsModel()
inputs = torch.zeros(1, 2, 3, dtype=torch.long)
self.run_test(model, inputs)
@skipIfUnsupportedMinOpsetVersion(11)
def test_loop_with_list(self):
class ListLoopModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
res = []
res1 = []
arr = x.split([3, 4, 1, 1, 2, 3, 2], 0)
res2 = torch.zeros(3, 4, dtype=torch.long)
res3 = []
res4 = []
for i in range(len(arr)):
res = res.append(arr[i].sum(0, False))
res1 = res1.append(arr[-1 - i].sum(0, False))
res2 += 1
res3 = res3 + [arr[i].sum(0, False)]
res4 += [arr[-1 - i].sum(0, False)]
return torch.stack(res), torch.stack(res1), res2, torch.stack(res3), torch.stack(res4)
model = ListLoopModel()
inputs = torch.randn(16)
self.run_test(model, inputs)
@skipIfONNXShapeInference(False)
@skipIfUnsupportedMinOpsetVersion(11)
def test_loop_transpose(self):
class LoopModel(torch.nn.Module):
def forward(self, x):
res = torch.zeros_like(x[0])
for i in range(x.size(0)):
res += x[0].transpose(0, 1)
return res
model = torch.jit.script(LoopModel())
x = torch.randn(5, 3, 3)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_list(self):
class ListModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
tensors = x.unbind()
res = []
res.append(tensors[0])
res.append(tensors[1])
res.pop(1)
res.insert(0, tensors[1])
res.append(tensors[2])
res += [tensors[3], tensors[4]]
res = res + [tensors[5]]
return torch.ones(len(res))
model = ListModel()
inputs = torch.randn(16, 1)
self.run_test(model, inputs)
@skipIfUnsupportedMinOpsetVersion(9)
def test_tensor_factories(self):
class TensorFactory(torch.nn.Module):
def forward(self, x):
return torch.zeros(x.size()) + torch.ones(x.size())
x = torch.randn(2, 3, 4)
self.run_test(TensorFactory(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_tensor_factories_script(self):
class TensorFactory(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.zeros(x.shape, dtype=torch.float) + torch.ones(x.shape, dtype=torch.float)
x = torch.randn(2, 3, 4)
self.run_test(TensorFactory(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_tensor_like_factories_script(self):
class TensorFactory(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
zeros = torch.zeros_like(x, dtype=torch.float, layout=torch.strided, device=torch.device('cpu'))
ones = torch.ones_like(x, dtype=torch.float, layout=torch.strided, device=torch.device('cpu'))
return zeros + ones
x = torch.randn(2, 3, 4)
self.run_test(TensorFactory(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_eye(self):
class TensorFactory(torch.nn.Module):
def forward(self, x):
return torch.eye(x.size()[1], 3), torch.eye(4, 4, dtype=torch.long), torch.eye(x.size()[1], 2, dtype=torch.long)
x = torch.randn(2, 3, 4)
another_x = torch.randn(5, 6, 7)
self.run_test(TensorFactory(), x, test_with_inputs=[another_x],
input_names=['input_1'], dynamic_axes={'input_1': [0, 1, 2]})
@skipIfUnsupportedMinOpsetVersion(9)
def test_inplace_zero(self):
class Zero_(torch.nn.Module):
def forward(self, x):
return x.zero_(), x
x = torch.randn(2, 3, 4)
self.run_test(Zero_(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_new_zeros(self):
class Zero_(torch.nn.Module):
def forward(self, x):
return x.new_zeros(x.shape[1:2]), x.new_zeros(x.shape[2:], dtype=torch.long)
x = torch.randn(2, 3, 4)
self.run_test(Zero_(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_list_pass(self):
class Slice(torch.nn.Module):
def forward(self, x, y):
return x.new_zeros(x.shape[2:] + y.shape[1:])
x = torch.randn(2, 3, 4, 5)
y = torch.randn(1, 2, 3, 4)
self.run_test(Slice(), (x, y))
class Size(torch.nn.Module):
def forward(self, x, y):
return x.new_zeros(x.shape + y.shape)
x = torch.randn(2, 3, 4)
y = torch.randn(1, 2, 3)
self.run_test(Size(), (x, y))
class Array(torch.nn.Module):
def forward(self, x, y):
arr1 = [x.shape[0], x.shape[1], 2]
arr2 = [y.shape[0], y.shape[1]]
return x.new_zeros(arr1 + arr2)
x = torch.randn(2, 3, 4)
y = torch.randn(1, 2, 3)
self.run_test(Array(), (x, y))
class List(torch.nn.Module):
def forward(self, x, y):
l1 = list(x.shape)
l2 = list(y.shape)
return x.new_zeros(l1 + l2)
x = torch.randn(2, 3, 4)
y = torch.randn(1, 2, 3)
self.run_test(List(), (x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_new_empty(self):
class Emtpy(torch.nn.Module):
def forward(self, x):
return x.new_empty(x.shape[0]).fill_(0), x.new_empty(x.shape[0], dtype=torch.long) * 0
x = torch.randn(2, 3, 4)
self.run_test(Emtpy(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_new_full(self):
class Full(torch.nn.Module):
def forward(self, x):
return x.new_full(x.shape[1:2], 5), x.new_full(x.shape[0:1], 1.3, dtype=torch.long)
x = torch.randn(2, 3, 4)
self.run_test(Full(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_inplace_list(self):
class Arithmetic(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, y):
return torch.cat([x.add_(3), y.fill_(0)])
x = torch.randn(2, 3)
y = torch.randn(2, 3)
self.run_test(Arithmetic(), (x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_inplace_fill(self):
class Fill_(torch.nn.Module):
def forward(self, x):
return x.fill_(3), x
x = torch.randn(2, 3, 4)
self.run_test(Fill_(), x)
def test_inplace_arithmetic(self):
class Arithmetic(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, y):
x.add_(3)
y.mul_(x)
return x, y
x = torch.randn(2, 3, 4)
y = torch.randn(2, 3, 4)
self.run_test(Arithmetic(), (x, y))
@disableScriptTest()
def test_sort(self):
class SortModel(torch.nn.Module):
def forward(self, x):
out = []
for i in range(-2, 2):
out.append(torch.sort(x, dim=i, descending=True))
return out
x = torch.randn(3, 4)
self.run_test(SortModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
@disableScriptTest()
def test_sort_ascending(self):
class SortModel(torch.nn.Module):
def forward(self, x):
out = []
for i in range(-2, 2):
out.append(torch.sort(x, dim=i, descending=False))
return out
x = torch.randn(3, 4)
self.run_test(SortModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_masked_fill(self):
class MaskedFillModel(torch.nn.Module):
def forward(self, x):
mask = torch.tensor([[0, 0, 1], [1, 1, 0]], dtype=torch.uint8)
return x.masked_fill(mask, 2)
x = torch.zeros(4, 2, 3, requires_grad=True)
self.run_test(MaskedFillModel(), x)
class MaskedFillModel2(torch.nn.Module):
def forward(self, x):
return x.masked_fill(x > 3, -1)
x = torch.arange(16).view(2, 2, 4).to(torch.float32)
self.run_test(MaskedFillModel2(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_masked_fill_inplace(self):
class MaskedFillModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
mask = torch.tensor([[0, 0, 1], [1, 1, 0]], dtype=torch.uint8)
x.masked_fill_(mask, 2)
return x
x = torch.zeros(4, 2, 3, requires_grad=True)
self.run_test(MaskedFillModel(), x)
class MaskedFillModel2(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
x.masked_fill_(x > 3, -1)
return x
x = torch.arange(16).view(2, 2, 4).to(torch.float32)
self.run_test(MaskedFillModel2(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_masked_scatter(self):
class MaskedScatterModel(torch.nn.Module):
def forward(self, x):
return torch.masked_scatter(x, x.ge(0.5), torch.ones(100, 100) * 5)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(MaskedScatterModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_masked_select(self):
class MaskedSelectModel(torch.nn.Module):
def forward(self, x):
return torch.masked_select(x, x.ge(0.5))
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(MaskedSelectModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_pixel_shuffle(self):
class PixelShuffle(torch.nn.Module):
def forward(self, x):
return torch.pixel_shuffle(x, upscale_factor=2)
x = torch.randn(2, 16, 4, 3, requires_grad=True)
self.run_test(PixelShuffle(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_scalar_type(self):
class ArithmeticModel(torch.nn.Module):
def forward(self, x):
return x.size(0) * 2 * x
x = torch.ones(2, 3, dtype=torch.float32)
self.run_test(ArithmeticModel(), x)
class ReciprocalModel(torch.nn.Module):
def forward(self, x):
return torch.reciprocal(x)
x = torch.tensor([2.0, 4.0], dtype=torch.double)
self.run_test(ReciprocalModel(), x)
class ComparisonModel(torch.nn.Module):
def forward(self, x, y):
a = torch.tensor([12.0])
return x.lt(1.5) & y.le(2) & x.le(1), x.gt(y), x.lt(y), a.ge(x.size(0))
x = torch.ones(2, 3, dtype=torch.int32)
y = torch.ones(2, 3, dtype=torch.float32)
self.run_test(ComparisonModel(), (x, y))
class MatMulModel(torch.nn.Module):
def forward(self, x):
return (torch.mm(x, x) + x + torch.mm(x, x) + x)
x = torch.ones(3, 3)
self.run_test(MatMulModel(), x)
class AddMMModel(torch.nn.Module):
def forward(self, x):
return torch.mm(x, x) + x
x = torch.ones(3, 3)
self.run_test(AddMMModel(), x)
class FullModel(torch.nn.Module):
# add is used for exporting full
def forward(self, x):
return torch.full((3, 4), x)
x = torch.tensor(12.)
self.run_test(FullModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
@disableScriptTest() # dtype mismatch
def test_full_like(self):
class FullLikeModel(torch.nn.Module):
def forward(self, x):
return torch.full_like(x, 4)
x = torch.tensor(12)
self.run_test(FullLikeModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
@disableScriptTest() # dtype mismatch
def test_full_like_value(self):
class FullLikeModel(torch.nn.Module):
def forward(self, x, y):
out = y + 2
return torch.full_like(x, out)
x = torch.tensor(12)
y = torch.tensor(2)
self.run_test(FullLikeModel(), (x, y))
def test_l1_norm(self):
class NormModel(torch.nn.Module):
def forward(self, x):
return torch.norm(x, p=1, dim=-1, keepdim=False)
x = torch.randn(4, 2, 3, requires_grad=True)
self.run_test(NormModel(), x)
def test_l2_norm(self):
class NormModel(torch.nn.Module):
def forward(self, x):
return torch.norm(x, p=2, dim=-2, keepdim=False)
x = torch.randn(4, 2, 3, requires_grad=True)
self.run_test(NormModel(), x)
def test_frobenius_norm(self):
class NormModel(torch.nn.Module):
def forward(self, x):
return torch.norm(x, p="fro", dim=0, keepdim=False)
x = torch.randn(4, 2, 3, requires_grad=True)
self.run_test(NormModel(), x)
def test_frobenius_norm_keepdim(self):
class NormModel(torch.nn.Module):
def forward(self, x):
return torch.norm(x, p="fro", dim=(0, 1), keepdim=True)
x = torch.randn(4, 2, 3, requires_grad=True)
self.run_test(NormModel(), x)
def test_unfold(self):
class UnfoldModel(torch.nn.Module):
def forward(self, x):
return x.unfold(dimension=2, size=2, step=2)
x = torch.randn(4, 2, 3, requires_grad=True)
self.run_test(UnfoldModel(), x)
@skipIfONNXShapeInference(False)
def test_unfold_infer_shape(self):
class UnfoldModule(torch.jit.ScriptModule):
def __init__(self):
super(UnfoldModule, self).__init__()
self.conv = torch.nn.Conv1d(3, 1, 3, stride=2)
@torch.jit.script_method
def forward(self, x):
x = self.conv(x)
return x.unfold(dimension=2, size=2, step=2)
x = torch.randn(32, 3, 64)
self.run_test(UnfoldModule(), x)
def test_remainder(self):
class RemainderModel(torch.nn.Module):
def forward(self, input, other):
return torch.remainder(input, other)
x = torch.randn(4, 2, 3)
y = torch.randn(1, 2, 1)
self.run_test(RemainderModel(), (x, y))
def test_remainder_scalar(self):
class RemainderModel(torch.nn.Module):
def forward(self, input):
return torch.remainder(input, 2.55)
x = torch.randint(10, (2, 3))
self.run_test(RemainderModel(), x)
@skipIfUnsupportedMinOpsetVersion(10)
def test_fmod(self):
class FModModel(torch.nn.Module):
def forward(self, input, other):
return torch.fmod(input, other)
x = torch.randn(4, 2, 3)
y = torch.randn(1, 2, 1)
self.run_test(FModModel(), (x, y))
@skipIfUnsupportedMinOpsetVersion(10)
def test_fmod_scalar(self):
class FModModel(torch.nn.Module):
def forward(self, input):
return torch.fmod(input, 2.55)
x = torch.randint(10, (2, 3))
self.run_test(FModModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_gelu(self):
class GeluModel(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.gelu(x)
x = torch.randn(2, 4, 5, 6, requires_grad=True)
self.run_test(GeluModel(), x)
def test_add_inplace(self):
class InplaceAddModel(torch.nn.Module):
def forward(self, x):
x += 12
return x
x = torch.randn(4, 2, 3, requires_grad=True)
self.run_test(InplaceAddModel(), x)
def test_rsqrt(self):
class RsqrtModel(torch.nn.Module):
def forward(self, x):
return x.rsqrt()
x = torch.randn(4, 2, 3, requires_grad=True, dtype=torch.float64)
self.run_test(RsqrtModel(), x)
def test_rsqrt_zeros(self):
class RsqrtModel(torch.nn.Module):
def forward(self, x):
return x.rsqrt()
x = torch.zeros(4, 2, 3, requires_grad=True, dtype=torch.float64)
self.run_test(RsqrtModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_unique(self):
class UniqueModel(torch.nn.Module):
def forward(self, x):
return torch.unique(x, sorted=True, return_inverse=False, return_counts=True)
x = torch.tensor([1, 3, 2, 3], dtype=torch.long)
self.run_test(UniqueModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_unique_along_dim(self):
class UniqueModel(torch.nn.Module):
def forward(self, x):
return torch.unique(x, dim=0, sorted=True, return_inverse=True, return_counts=False)
x = torch.tensor([1, 3, 2, 3], dtype=torch.long)
self.run_test(UniqueModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_cumsum(self):
class CumSum(torch.nn.Module):
def forward(self, input):
return torch.cumsum(input, dim=0)
x = torch.randn(2, 3, 4)
model = CumSum()
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_cumsum_with_cast(self):
class CumSum(torch.nn.Module):
def forward(self, input):
return torch.cumsum(input, dim=0, dtype=torch.float32)
model = CumSum()
x = torch.tensor([2, 3, 4], dtype=torch.int32)
self.run_test(model, x)
x = torch.tensor([False, True, True])
self.run_test(model, x)
@disableScriptTest() # error in propagate as assign input shape
@skipIfUnsupportedMinOpsetVersion(10)
@skipIfUnsupportedOpsetVersion([12]) # Due to ONNX Loop shape inference issue
def test_embedding_bag(self):
model = torch.nn.EmbeddingBag(10, 5, mode='sum', scale_grad_by_freq=True)
input = torch.randint(10, (7,))
offset = torch.tensor([0, 2, 5, 6])
self.run_test(model, (input, offset))
model = torch.nn.EmbeddingBag(10, 5, mode='sum', include_last_offset=True)
input = torch.randint(10, (7,))
offset = torch.tensor([0, 2, 5, 6])
self.run_test(model, (input, offset))
model = torch.nn.EmbeddingBag(10, 5, mode='max')
input = torch.randint(10, (7, 5))
self.run_test(model, (input))
@disableScriptTest() # scripting prim::Uninitialized, prim::dtype, prim::unchecked_cast
@skipIfUnsupportedMinOpsetVersion(10)
@skipIfUnsupportedOpsetVersion([12]) # Due to ONNX Loop shape inference issue
def test_embedding_bag_1d_per_sample_weights(self):
class EmbeddingModel(torch.nn.Module):
def forward(self, embedding_matrix, input, offset, weights):
return torch.nn.functional.embedding_bag(input, embedding_matrix, offsets=offset,
mode='sum', per_sample_weights=weights)
model = EmbeddingModel()
x = torch.randint(7, (6,))
w = torch.randn(6, )
offset = torch.tensor([0, 2, 5])
embedding_matrix = torch.rand(10, 15)
self.run_test(model, (embedding_matrix, x, offset, w))
@disableScriptTest() # scripting prim::Uninitialized, prim::dtype, prim::unchecked_cast
@skipIfUnsupportedMinOpsetVersion(10)
@skipIfUnsupportedOpsetVersion([12]) # Due to ONNX Loop shape inference issue
def test_embedding_bag_2d_per_sample_weights(self):
class EmbeddingModel(torch.nn.Module):
def forward(self, embedding_matrix, input, weights):
return torch.nn.functional.embedding_bag(input, embedding_matrix,
mode='sum', per_sample_weights=weights)
embedding_matrix = torch.rand(10, 15)
model = EmbeddingModel()
x = torch.randint(7, (2, 3))
w = torch.randn(2, 3)
self.run_test(model, (embedding_matrix, x, w))
@disableScriptTest() # scripting prim::Uninitialized, prim::dtype, prim::unchecked_cast
@skipIfUnsupportedMinOpsetVersion(11)
@unittest.skip("Due to ONNX Loop shape inference issue.")
def test_embedding_bag_dynamic_input(self):
class EmbeddingModel1D(torch.nn.Module):
def forward(self, embedding_matrix, input, weights, offsets):
return torch.nn.functional.embedding_bag(input, embedding_matrix, offsets=offsets,
mode='sum', per_sample_weights=weights)
model = EmbeddingModel1D()
x = torch.randint(7, (6,))
w = torch.randn(6, )
offsets = torch.tensor([0, 2, 5], dtype=torch.long)
embedding_matrix = torch.rand(10, 15)
x2 = torch.randint(7, (2,))
w2 = torch.randn(2, )
embedding_matrix2 = torch.rand(12, 25)
offsets2 = torch.tensor([0, ], dtype=torch.long)
self.run_test(model, (embedding_matrix, x, w, offsets),
test_with_inputs=[(embedding_matrix2, x2, w2, offsets2)],
input_names=['embedding_matrix', 'x', 'offsets', 'w'],
dynamic_axes={'embedding_matrix': [0, 1], 'x': [0], 'offsets': [0], 'w': [0]})
class EmbeddingModel2D(torch.nn.Module):
def forward(self, embedding_matrix, input, weights):
return torch.nn.functional.embedding_bag(input, embedding_matrix,
mode='sum', per_sample_weights=weights)
model = EmbeddingModel2D()
x = torch.randint(7, (2, 3))
w = torch.randn(2, 3)
embedding_matrix = torch.rand(10, 15)
x2 = torch.randint(7, (3, 5))
w2 = torch.randn(3, 5)
embedding_matrix2 = torch.rand(12, 25)
self.run_test(model, (embedding_matrix, x, w),
test_with_inputs=[(embedding_matrix2, x2, w2)],
input_names=['embedding_matrix', 'x', 'w'],
dynamic_axes={'embedding_matrix': [0, 1], 'x': [0, 1], 'w': [0, 1]})
@skipIfUnsupportedMinOpsetVersion(8)
def test_meshgrid(self):
class Meshgrid(torch.nn.Module):
def forward(self, x, y, z):
output1, output2, output3 = torch.meshgrid(x, y, z)
return output1, output2, output3
x = torch.randn(3, requires_grad=True)
y = torch.zeros(4, requires_grad=True)
z = torch.randn(5, requires_grad=True)
self.run_test(Meshgrid(), (x, y, z))
@skipIfUnsupportedMinOpsetVersion(8)
def test_meshgrid_scalar(self):
class Meshgrid(torch.nn.Module):
def forward(self, x, y, z):
output1, output2, output3 = torch.meshgrid(x, y, z)
return output1, output2, output3
x = torch.ones(3, requires_grad=True)
y = torch.zeros(4, requires_grad=True)
z = torch.tensor(2.0)
self.run_test(Meshgrid(), (x, y, z))
def test_baddbmm(self):
class MyModule(torch.nn.Module):
def forward(self, input, batch1, batch2):
return torch.baddbmm(input, batch1, batch2, alpha=torch.tensor(5), beta=3.5)
x = torch.randn(10, 3, 5)
batch1 = torch.randn(10, 3, 4)
batch2 = torch.randn(10, 4, 5)
model = MyModule()
self.run_test(model, (x, batch1, batch2))
def test_baddbmm_dynamic(self):
class MyModule(torch.nn.Module):
def forward(self, input, batch1, batch2, alpha, beta):
return torch.baddbmm(input, batch1, batch2, alpha=alpha, beta=beta)
x = torch.randn(10, 3, 5)
batch1 = torch.randn(10, 3, 4)
batch2 = torch.randn(10, 4, 5)
alpha = torch.tensor(5)
beta = torch.tensor(3.5)
model = MyModule()
self.run_test(model, (x, batch1, batch2, alpha, beta))
def test_numel(self):
class MyModule(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return input.numel() * input
x = torch.randn(2, 3, 5)
model = MyModule()
self.run_test(model, (x,))
def test_numel_empty(self):
class MyModule(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return input.numel() * input
x = torch.randn(0)
model = MyModule()
self.run_test(model, (x,))
def test_cast_to(self):
class MyModule(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input, other):
return input.to(other) + other
x = torch.randn(2, 3, 4)
y = torch.tensor([1], dtype=torch.int64)
model = MyModule()
self.run_test(model, (x, y))
def test_cast_to_bool(self):
class MyModule(torch.nn.Module):
def forward(self, input, other):
return torch.cat((input.to(other), other), 0)
x = torch.randn(2, 3, 4)
y = torch.zeros([2, 3, 4], dtype=torch.bool)
model = MyModule()
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_ones_bool(self):
class MyModule(torch.nn.Module):
def forward(self, input):
true = torch.ones(input.shape, dtype=torch.bool)
return input.to(true) & true
x = torch.randn(2, 3, 4)
model = MyModule()
self.run_test(model, x)
def test_log(self):
class Log(torch.nn.Module):
def forward(self, input):
return torch.log(input)
x = torch.rand(2, 3, 4)
model = Log()
self.run_test(model, x)
def test_log1p(self):
class Log1p(torch.nn.Module):
def forward(self, input):
return torch.log1p(input)
x = torch.rand(2, 3, 4)
model = Log1p()
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_round(self):
class Round(torch.nn.Module):
def forward(self, x):
return torch.round(x)
x = torch.tensor([0.9920, -1.0362, -1.5000, 3.5000], requires_grad=True)
self.run_test(Round(), x)
def test_constant_pad(self):
model = torch.nn.ConstantPad1d(2, 3.5)
x = torch.randn(2, 4, 4)
self.run_test(model, x)
model = torch.nn.ConstantPad2d((3, 0, 2, 1), 3.5)
x = torch.randn(2, 2, 4, 4)
self.run_test(model, x)
# Dynamic padding is added in opset 11
@skipIfUnsupportedMinOpsetVersion(11)
@disableScriptTest() # Functional module not scriptable
def test_pad_types(self):
# Test for different pad integer types
class Pad(torch.nn.Module):
def forward(self, x, pad):
return torch.nn.functional.pad(x, pad)
x = torch.randn(2, 2, 4, 4)
y = pad = (torch.tensor(2, dtype=torch.int32), torch.tensor(4, dtype=torch.int32))
self.run_test(Pad(), (x, y))
y = pad = (torch.tensor(2, dtype=torch.int64), torch.tensor(4, dtype=torch.int64))
self.run_test(Pad(), (x, y))
@skipIfUnsupportedMaxOpsetVersion(10)
def test_unsupported_pad(self):
class Pad(torch.nn.Module):
def forward(self, x, pad):
return torch.nn.functional.pad(x, pad)
def run():
x = torch.randn(2, 2, 4, 4)
y = pad = (torch.tensor(2, dtype=torch.int32), torch.tensor(4, dtype=torch.int32))
p = Pad()
f = io.BytesIO()
torch.onnx._export(p, (x, y), f)
with self.assertRaises(RuntimeError) as cm:
run()
the_exception = cm.exception
self.assertEqual('Unsupported: ONNX export of Pad in opset 9. The sizes of the padding must be constant. ' +
'Please try opset version 11.', the_exception.args[0])
@disableScriptTest() # export prim::Uninitialized
def test_reflection_pad(self):
model = torch.nn.ReflectionPad1d(2)
x = torch.randn(2, 4, 4)
self.run_test(model, x)
model = torch.nn.ReflectionPad2d((3, 0, 2, 1))
x = torch.randn(2, 2, 4, 4)
self.run_test(model, x)
@disableScriptTest() # export prim::Uninitialized
def test_replication_pad(self):
model = torch.nn.ReplicationPad1d(2)
x = torch.randn(2, 4, 4)
self.run_test(model, x)
model = torch.nn.ReplicationPad2d((3, 0, 2, 1))
x = torch.randn(2, 2, 4, 4)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(11)
@disableScriptTest() # export prim::Uninitialized
def test_im2col(self):
class Unfold(torch.nn.Module):
def forward(self, input):
return torch.nn.functional.unfold(input, kernel_size=(10, 15), dilation=2, padding=5, stride=3), \
torch.nn.functional.unfold(input, kernel_size=(2, 2), dilation=1, padding=0, stride=3), \
torch.nn.functional.unfold(input, kernel_size=(1, 1), dilation=5, padding=2, stride=3)
x = torch.rand(1, 1, 200, 100)
self.run_test(Unfold(), x)
@skipIfNoLapack
@skipIfUnsupportedMinOpsetVersion(11)
def test_det(self):
class Det(torch.nn.Module):
def forward(self, x):
return torch.det(x)
x = torch.randn(2, 3, 5, 5)
self.run_test(Det(), x)
# This test checks output scalar type in the ONNX graph should not be null
# https://github.com/pytorch/pytorch/issues/28607
@skipIfUnsupportedMinOpsetVersion(10)
def test_trace_script(self):
@torch.jit.script
def center_slice_helper(input, h_offset):
return input[:, h_offset:]
class CenterCrop(torch.nn.Module):
def forward(self, input):
return center_slice_helper(input, torch.tensor(input.shape[1] - 1))
x = torch.randn(3, 4)
self.run_test(CenterCrop(), x)
@skipIfNoLapack
@skipIfUnsupportedMinOpsetVersion(11)
def test_logdet(self):
class LogDet(torch.nn.Module):
def forward(self, x):
return torch.logdet(x)
x = torch.randn(2, 3, 5, 5)
self.run_test(LogDet(), x)
def test_dim(self):
class DimModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
out = input * 2
out *= out.dim()
return out
empty_input = torch.randn(0, requires_grad=True)
multi_dim_input = torch.randn(1, 2, 3, requires_grad=True)
self.run_test(DimModel(), empty_input)
self.run_test(DimModel(), multi_dim_input)
@skipIfUnsupportedMinOpsetVersion(12)
@disableScriptTest() # variable number of inputs not scriptable
def test_einsum(self):
class EinsumModelBatchDiagonal(torch.nn.Module):
def forward(self, *tensor_list):
eqn = '...ii ->...i'
return torch.einsum(eqn, *tensor_list)
x = torch.randn(3, 5, 5)
self.run_test(EinsumModelBatchDiagonal(), input=(x,))
class EinsumModelBatchMatmul(torch.nn.Module):
def forward(self, *tensor_list):
eqn = 'bij, bjk -> bik'
return torch.einsum(eqn, *tensor_list)
x = torch.randn(5, 2, 3)
y = torch.randn(5, 3, 4)
self.run_test(EinsumModelBatchMatmul(), input=(x, y))
class EinsumModelInnerProd(torch.nn.Module):
def forward(self, *tensor_list):
eqn = 'i,i'
return torch.einsum(eqn, *tensor_list)
x = torch.randn(5)
y = torch.randn(5)
self.run_test(EinsumModelInnerProd(), input=(x, y))
class EinsumModelTranspose(torch.nn.Module):
def forward(self, *tensor_list):
eqn = 'ij->ji'
return torch.einsum(eqn, *tensor_list)
x = torch.randn(3, 4)
self.run_test(EinsumModelTranspose(), input=(x,))
@skipIfUnsupportedMinOpsetVersion(12)
@disableScriptTest() # shape/type inference
def test_crossentropyloss(self):
for ignore_index in [-100, 1]:
x = torch.randn(3, 5)
y = torch.empty(3, dtype=torch.long).random_(5)
y[y == 1] = ignore_index
self._crossentropyloss(x, y, ignore_index)
x = torch.randn(3, 5, 2)
y = torch.empty(3, 2, dtype=torch.long).random_(5)
y[y == 1] = ignore_index
self._crossentropyloss(x, y, ignore_index)
x = torch.randn(3, 5, 2, 7)
y = torch.empty(3, 2, 7, dtype=torch.long).random_(5)
y[y == 1] = ignore_index
self._crossentropyloss(x, y, ignore_index)
def _crossentropyloss(self, x, y, ignore_index):
class CrossEntropyLossNone(torch.nn.Module):
def __init__(self, ignore_index):
super(CrossEntropyLossNone, self).__init__()
if ignore_index == -100:
self.loss = torch.nn.CrossEntropyLoss(reduction='none')
else:
self.loss = torch.nn.CrossEntropyLoss(reduction='none', ignore_index=ignore_index)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(CrossEntropyLossNone(ignore_index), input=(x, y))
class CrossEntropyLossNoneWeight(torch.nn.Module):
def __init__(self, ignore_index):
super(CrossEntropyLossNoneWeight, self).__init__()
if ignore_index == -100:
self.loss = torch.nn.CrossEntropyLoss(reduction='none', weight=torch.randn(5))
else:
self.loss = torch.nn.CrossEntropyLoss(reduction='none', weight=torch.randn(5), ignore_index=ignore_index)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(CrossEntropyLossNoneWeight(ignore_index), input=(x, y))
class CrossEntropyLossSum(torch.nn.Module):
def __init__(self, ignore_index):
super(CrossEntropyLossSum, self).__init__()
if ignore_index == -100:
self.loss = torch.nn.CrossEntropyLoss(reduction='sum')
else:
self.loss = torch.nn.CrossEntropyLoss(reduction='sum', ignore_index=ignore_index)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(CrossEntropyLossSum(ignore_index), input=(x, y))
class CrossEntropyLossSumWeight(torch.nn.Module):
def __init__(self, ignore_index):
super(CrossEntropyLossSumWeight, self).__init__()
if ignore_index == -100:
self.loss = torch.nn.CrossEntropyLoss(reduction='sum', weight=torch.randn(5))
else:
self.loss = torch.nn.CrossEntropyLoss(reduction='sum', weight=torch.randn(5), ignore_index=ignore_index)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(CrossEntropyLossSumWeight(ignore_index), input=(x, y))
class CrossEntropyLossMean(torch.nn.Module):
def __init__(self, ignore_index):
super(CrossEntropyLossMean, self).__init__()
if ignore_index == -100:
self.loss = torch.nn.CrossEntropyLoss()
else:
self.loss = torch.nn.CrossEntropyLoss(ignore_index=ignore_index)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(CrossEntropyLossMean(ignore_index), input=(x, y))
class CrossEntropyLossMeanWeight(torch.nn.Module):
def __init__(self, ignore_index):
super(CrossEntropyLossMeanWeight, self).__init__()
if ignore_index == -100:
self.loss = torch.nn.CrossEntropyLoss(weight=torch.randn(5))
else:
self.loss = torch.nn.CrossEntropyLoss(weight=torch.randn(5), ignore_index=ignore_index)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(CrossEntropyLossMeanWeight(ignore_index), input=(x, y))
@skipIfUnsupportedMinOpsetVersion(9)
@disableScriptTest() # Output dtype mismatch
def test_kldiv_loss(self):
x = torch.randn(5)
y = torch.randn(5)
self._kldiv_loss(x, y)
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
self._kldiv_loss(x, y)
x = torch.randn(2, 3, 5, 7)
y = torch.randn(2, 3, 5, 7)
self._kldiv_loss(x, y)
def _kldiv_loss(self, x, y):
class KLDivLossNone(torch.nn.Module):
def __init__(self):
super(KLDivLossNone, self).__init__()
self.loss = torch.nn.KLDivLoss(reduction='none', log_target=True)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(KLDivLossNone(), input=(x, y))
class KLDivLossMean(torch.nn.Module):
def __init__(self):
super(KLDivLossMean, self).__init__()
self.loss = torch.nn.KLDivLoss(reduction='mean', log_target=False)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(KLDivLossMean(), input=(x, y))
class KLDivLossSum(torch.nn.Module):
def __init__(self):
super(KLDivLossSum, self).__init__()
self.loss = torch.nn.KLDivLoss(reduction='sum', log_target=True)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(KLDivLossSum(), input=(x, y))
class KLDivLossBatchMean(torch.nn.Module):
def __init__(self):
super(KLDivLossBatchMean, self).__init__()
self.loss = torch.nn.KLDivLoss(reduction='batchmean', log_target=False)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(KLDivLossBatchMean(), input=(x, y))
class KLDivLossMiniBatchMean(torch.nn.Module):
def __init__(self):
super(KLDivLossMiniBatchMean, self).__init__()
self.loss = torch.nn.KLDivLoss(reduction='batchmean', size_average=False, log_target=True)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(KLDivLossMiniBatchMean(), input=(x, y))
@skipIfUnsupportedMinOpsetVersion(12)
@disableScriptTest() # shape/type inference
def test_nllloss(self):
class NLLModel(torch.nn.Module):
def __init__(self):
super(NLLModel, self).__init__()
self.loss = torch.nn.NLLLoss(reduction='none')
self.m = torch.nn.LogSoftmax(dim=1)
def forward(self, input, target):
output = self.loss(self.m(2 * input), target)
return output
N, C = 5, 4
input = torch.randn(N, 16)
target = torch.empty(N, dtype=torch.long).random_(0, C)
# using test data containing default ignore_index=-100
target[target == 1] = -100
self.run_test(NLLModel(), (input, target))
@skipIfUnsupportedMinOpsetVersion(12)
@disableScriptTest() # shape/type inference
def test_nllloss_2d_none(self):
class NLLModel(torch.nn.Module):
def __init__(self):
super(NLLModel, self).__init__()
self.loss = torch.nn.NLLLoss(reduction='none')
self.conv = torch.nn.Conv2d(16, C, (3, 3))
self.m = torch.nn.LogSoftmax(dim=1)
def forward(self, input, target):
output = self.loss(self.m(self.conv(input)), target)
return output
N, C = 5, 4
input = torch.randn(N, 16, 10, 10)
target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)
# using test data containing default ignore_index=-100
target[target == 1] = -100
self.run_test(NLLModel(), (input, target))
@skipIfUnsupportedMinOpsetVersion(12)
@disableScriptTest() # shape/type inference
def test_nllloss_2d_mean(self):
class NLLModel(torch.nn.Module):
def __init__(self):
super(NLLModel, self).__init__()
self.loss = torch.nn.NLLLoss(reduction='mean')
self.conv = torch.nn.Conv2d(16, C, (3, 3))
self.m = torch.nn.LogSoftmax(dim=1)
def forward(self, input, target):
output = self.loss(self.m(self.conv(input)), target)
return output
N, C = 5, 4
input = torch.randn(N, 16, 10, 10)
target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)
# using test data containing default ignore_index=-100
target[target == 1] = -100
self.run_test(NLLModel(), (input, target))
@skipIfUnsupportedMinOpsetVersion(12)
@disableScriptTest() # shape/type inference
def test_nllloss_2d_sum(self):
class NLLModel(torch.nn.Module):
def __init__(self):
super(NLLModel, self).__init__()
self.loss = torch.nn.NLLLoss(reduction='sum')
self.conv = torch.nn.Conv2d(16, C, (3, 3))
self.m = torch.nn.LogSoftmax(dim=1)
def forward(self, input, target):
output = self.loss(self.m(self.conv(input)), target)
return output
N, C = 5, 4
input = torch.randn(N, 16, 10, 10)
target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)
# using test data containing default ignore_index=-100
target[target == 1] = -100
self.run_test(NLLModel(), (input, target))
@skipIfUnsupportedMinOpsetVersion(12)
@disableScriptTest() # shape/type inference
def test_nllloss_2d_mean_weights(self):
class NLLModel(torch.nn.Module):
def __init__(self):
super(NLLModel, self).__init__()
self.loss = torch.nn.NLLLoss(reduction='mean', weight=torch.randn(C))
self.conv = torch.nn.Conv2d(16, C, (3, 3))
self.m = torch.nn.LogSoftmax(dim=1)
def forward(self, input, target):
output = self.loss(self.m(self.conv(input)), target)
return output
N, C = 5, 4
input = torch.randn(N, 16, 10, 10)
target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)
# using test data containing default ignore_index=-100
target[target == 1] = -100
self.run_test(NLLModel(), (input, target))
@skipIfUnsupportedMinOpsetVersion(12)
@disableScriptTest() # shape/type inference
def test_nllloss_2d_mean_ignore_index(self):
class NLLModel(torch.nn.Module):
def __init__(self):
super(NLLModel, self).__init__()
self.loss = torch.nn.NLLLoss(reduction='mean', ignore_index=1)
self.conv = torch.nn.Conv2d(16, C, (3, 3))
self.m = torch.nn.LogSoftmax(dim=1)
def forward(self, input, target):
output = self.loss(self.m(self.conv(input)), target)
return output
N, C = 5, 4
input = torch.randn(N, 16, 10, 10)
target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)
self.run_test(NLLModel(), (input, target))
@skipIfUnsupportedMinOpsetVersion(12)
@disableScriptTest() # shape/type inference
def test_nllloss_2d_mean_ignore_index_weights(self):
class NLLModel(torch.nn.Module):
def __init__(self):
super(NLLModel, self).__init__()
self.loss = torch.nn.NLLLoss(reduction='mean', weight=torch.randn(C), ignore_index=1)
self.conv = torch.nn.Conv2d(16, C, (3, 3))
self.m = torch.nn.LogSoftmax(dim=1)
def forward(self, input, target):
output = self.loss(self.m(self.conv(input)), target)
return output
N, C = 5, 4
input = torch.randn(N, 16, 10, 10)
target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)
self.run_test(NLLModel(), (input, target))
def test_torch_mm(self):
class M(torch.nn.Module):
def forward(self, mat1, mat2):
mm = torch.mm(mat1, mat2)
return mm
mat1 = torch.randn(2, 3)
mat2 = torch.randn(3, 3)
self.run_test(M(), input=(mat1, mat2))
@skipIfUnsupportedMinOpsetVersion(9) # Because where op is not supported for opset < 9.
def test_where_with_bool_tensor(self):
class M(torch.nn.Module):
def forward(self, mat1, mat2):
out = torch.where(mat1 > 0, mat1, mat2)
return out
mat1 = torch.randn(2, 3)
mat2 = torch.ones(2, 3)
self.run_test(M(), input=(mat1, mat2))
@skipIfUnsupportedMinOpsetVersion(9) # Because where op is not supported for opset < 9.
def test_where_with_byte_tensor(self):
class M(torch.nn.Module):
def forward(self, cond, mat1, mat2):
out = torch.where(cond, mat1, mat2)
return out
cond = torch.ones(2, 3, dtype=torch.uint8)
cond[1, 2] = 0
mat1 = torch.randn(2, 3)
mat2 = torch.ones(2, 3)
self.run_test(M(), input=(cond, mat1, mat2))
def test_dropout(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.dropout = torch.nn.Dropout(0.3)
def forward(self, x):
dropout = self.dropout(x)
return dropout
x = torch.randn(10, 3, 53)
self.run_test(M(), (x))
def test_shape_constant_fold(self):
class ShapeModule(torch.nn.Module):
def __init__(self):
super(ShapeModule, self).__init__()
self.register_buffer("weight", torch.ones(5))
def forward(self, x):
shape = self.weight.shape[0]
return x + shape
x = torch.randn(2, 5)
self.run_test(ShapeModule(), (x,), rtol=1e-3, atol=1e-5)
@skipIfUnsupportedMinOpsetVersion(12)
def test_celu(self):
class Celu(torch.nn.Module):
def __init__(self):
super(Celu, self).__init__()
self.celu = torch.nn.CELU(alpha=1.0)
def forward(self, input):
return self.celu(input)
input = torch.randn(2)
self.run_test(Celu(), (input,))
@skipIfUnsupportedMinOpsetVersion(12)
def test_celu_default(self):
class Celu(torch.nn.Module):
def __init__(self):
super(Celu, self).__init__()
self.celu = torch.nn.CELU()
def forward(self, input):
return self.celu(input)
input = torch.randn(2)
self.run_test(Celu(), (input,))
@skipIfUnsupportedMinOpsetVersion(12)
def test_celu_alpha(self):
class Celu(torch.nn.Module):
def __init__(self):
super(Celu, self).__init__()
self.celu = torch.nn.CELU(alpha=2.)
def forward(self, input):
return self.celu(input)
input = torch.randn(2)
self.run_test(Celu(), (input,))
@skipIfUnsupportedMinOpsetVersion(12)
def test_celu_cast(self):
class Celu(torch.nn.Module):
def __init__(self):
super(Celu, self).__init__()
self.celu = torch.nn.CELU()
def forward(self, input):
return self.celu(input)
input = torch.randn(2, 5, 7, dtype=torch.float64)
self.run_test(Celu(), (input,))
@skipIfUnsupportedMinOpsetVersion(9)
def test_where(self):
class Model(torch.nn.Module):
def forward(self, cond, input, other):
return torch.where(cond, input, other)
x = torch.randint(0, 1, (2, 3, 4), dtype=torch.bool)
y = torch.randn(2, 1, 4)
z = torch.ones(2, 3, 1)
self.run_test(Model(), (x, y, z))
@skipIfUnsupportedMinOpsetVersion(9)
@disableScriptTest() # symbolic update needed for unbind: ONNX export of unbind with dynamic number of outputs
def test_where_condition(self):
class Model1(torch.nn.Module):
def forward(self, input):
return torch.stack(torch.where(input > 0.5), dim=1)
x = torch.randint(0, 2, (2, 3, 4), dtype=bool)
self.run_test(Model1(), (x))
class Model2(torch.nn.Module):
def forward(self, input, other):
return torch.stack(torch.where(input > other), dim=1)
x = torch.randint(0, 1, (2, 3, 4), dtype=bool)
y = torch.randint(1, 2, (2, 3, 4), dtype=bool)
self.run_test(Model2(), (x, y))
def test_empty_branch(self):
class EmptyBranchModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
out = input + 1
if out.dim() > 2:
if out.dim() > 3:
out += 3
else:
pass
else:
pass
return out
x = torch.randn(1, 2, 3, requires_grad=True)
self.run_test(EmptyBranchModel(), x)
@skipIfONNXShapeInference(False)
@skipIfUnsupportedMinOpsetVersion(11)
def test_if_transpose(self):
class IfModel(torch.nn.Module):
def forward(self, x):
x = x.transpose(0, 1)
if x.size(0) == 2:
return x.transpose(0, 1)
else:
return x
x = torch.randn(2, 3)
self.run_test(torch.jit.script(IfModel()), x,
output_names=['output_1'],
dynamic_axes={'output_1': [0, 1]})
def test_onnx_proto_checker(self):
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
def forward(self, x):
return 2 * x
x = torch.randn(1, 2, 3, requires_grad=True)
f = io.BytesIO()
torch.onnx._export(Model(), x, f)
model = onnx.load(f)
model.ir_version = 0
def check_proto():
torch._C._check_onnx_proto(model.SerializeToString())
self.assertRaises(RuntimeError, check_proto)
@disableScriptTest() # dtype mismatch
def test_split_tensor_scalar(self):
class SplitModel(torch.nn.Module):
def forward(self, x):
return torch.split(x, x.size(1))
x = torch.randn(1, 2, 3, requires_grad=True)
self.run_test(SplitModel(), x)
def test_split_tensor_multi(self):
class SplitModel(torch.nn.Module):
def forward(self, x):
return torch.split(x, torch.ones(3))
x = torch.randn(1, 2, 3, requires_grad=True)
def run_model():
SplitModel(x)
self.assertRaises(TypeError, run_model)
def _dispatch_rnn_test(self, name, *args, **kwargs):
if name == 'elman':
self._elman_rnn_test(*args, **kwargs)
if name == 'lstm':
self._lstm_test(*args, **kwargs)
if name == 'gru':
self._gru_test(*args, **kwargs)
def _elman_rnn_test(self, layers, nonlinearity, bidirectional,
initial_state, packed_sequence, dropout):
batch_first = True if packed_sequence == 2 else False
model = torch.nn.RNN(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, layers, nonlinearity=nonlinearity,
bidirectional=bidirectional, dropout=dropout, batch_first=batch_first)
if packed_sequence == 1:
model = RnnModelWithPackedSequence(model, False)
if packed_sequence == 2:
model = RnnModelWithPackedSequence(model, True)
def make_input(batch_size):
seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=batch_size)
seq_lengths = list(reversed(sorted(map(int, seq_lengths))))
inputs = [torch.randn(l, RNN_INPUT_SIZE) for l in seq_lengths]
inputs = rnn_utils.pad_sequence(inputs, batch_first=batch_first)
inputs = [inputs]
directions = 2 if bidirectional else 1
if initial_state:
h0 = torch.randn(directions * layers, batch_size, RNN_HIDDEN_SIZE)
inputs.append(h0)
if packed_sequence != 0:
inputs.append(torch.IntTensor(seq_lengths))
if len(inputs) == 1:
input = inputs[0]
else:
input = tuple(inputs)
return input
input = make_input(RNN_BATCH_SIZE)
self.run_test(model, input, batch_size=RNN_BATCH_SIZE)
# test that the model still runs with a different batch size
other_input = make_input(RNN_BATCH_SIZE + 1)
self.run_test(model, other_input, batch_size=RNN_BATCH_SIZE + 1)
def _lstm_test(self, layers, bidirectional, initial_state,
packed_sequence, dropout):
batch_first = True if packed_sequence == 2 else False
model = LstmFlatteningResult(
RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, layers,
bidirectional=bidirectional, dropout=dropout, batch_first=batch_first)
if packed_sequence == 1:
model = RnnModelWithPackedSequence(model, False)
if packed_sequence == 2:
model = RnnModelWithPackedSequence(model, True)
def make_input(batch_size):
seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=batch_size)
seq_lengths = list(reversed(sorted(map(int, seq_lengths))))
inputs = [torch.randn(l, RNN_INPUT_SIZE) for l in seq_lengths]
inputs = rnn_utils.pad_sequence(inputs, batch_first=batch_first)
inputs = [inputs]
directions = 2 if bidirectional else 1
if initial_state:
h0 = torch.randn(directions * layers, batch_size, RNN_HIDDEN_SIZE)
c0 = torch.randn(directions * layers, batch_size, RNN_HIDDEN_SIZE)
inputs.append((h0, c0))
if packed_sequence != 0:
inputs.append(torch.IntTensor(seq_lengths))
if len(inputs) == 1:
input = inputs[0]
else:
input = tuple(inputs)
return input
input = make_input(RNN_BATCH_SIZE)
self.run_test(model, input, batch_size=RNN_BATCH_SIZE)
# test that the model still runs with a different batch size
other_input = make_input(RNN_BATCH_SIZE + 1)
self.run_test(model, other_input, batch_size=RNN_BATCH_SIZE + 1)
def _gru_test(self, layers, bidirectional, initial_state,
packed_sequence, dropout):
batch_first = True if packed_sequence == 2 else False
model = torch.nn.GRU(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, layers, bidirectional=bidirectional, dropout=dropout,
batch_first=batch_first)
if packed_sequence == 1:
model = RnnModelWithPackedSequence(model, False)
if packed_sequence == 2:
model = RnnModelWithPackedSequence(model, True)
def make_input(batch_size):
seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=batch_size)
seq_lengths = list(reversed(sorted(map(int, seq_lengths))))
inputs = [torch.randn(l, RNN_INPUT_SIZE) for l in seq_lengths]
inputs = rnn_utils.pad_sequence(inputs, batch_first=batch_first)
inputs = [inputs]
directions = 2 if bidirectional else 1
if initial_state:
h0 = torch.randn(directions * layers, batch_size, RNN_HIDDEN_SIZE)
inputs.append(h0)
if packed_sequence != 0:
inputs.append(torch.IntTensor(seq_lengths))
if len(inputs) == 1:
input = inputs[0]
else:
input = tuple(inputs)
return input
input = make_input(RNN_BATCH_SIZE)
self.run_test(model, input, batch_size=RNN_BATCH_SIZE)
# test that the model still runs with a different batch size
other_input = make_input(RNN_BATCH_SIZE + 1)
self.run_test(model, other_input, batch_size=RNN_BATCH_SIZE + 1)
@skipIfUnsupportedMinOpsetVersion(10)
def test_fake_quantize_per_tensor(self):
class FakeQuantizePerTensorModel(torch.nn.Module):
def forward(self, input):
scale = 1. / 127
zero_point = 0
quant_min = -128
quant_max = 127
return torch.fake_quantize_per_tensor_affine(input, scale, zero_point, quant_min, quant_max)
x = torch.randn(6, 4, 3, 3)
self.run_test(FakeQuantizePerTensorModel(), (x))
@skipIfUnsupportedMinOpsetVersion(12)
def test_dropout_training(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.dropout = torch.nn.Dropout(0.4)
def forward(self, x):
dropout = self.dropout(x)
return dropout
model = MyModule()
x = torch.randn(10)
model.train()
ort_sess = convert_to_onnx(model, input=(x,), opset_version=self.opset_version,
training=torch.onnx.TrainingMode.TRAINING)
ort_outs = run_ort(ort_sess, input=(x,))
assert not torch.all(torch.eq(x, torch.from_numpy(ort_outs[0])))
@skipIfUnsupportedMinOpsetVersion(12)
def test_dropout_training_zero(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.dropout = torch.nn.Dropout(0.5)
def forward(self, x):
dropout = self.dropout(x)
return dropout
model = MyModule()
# ensure there are no zeros in the input
x = torch.randn(10, 3, 128, 128)
y = x.numpy()
y_mask = np.where(y == 0, 1, y)
input = torch.from_numpy(y_mask)
nb_elements = torch.numel(input)
model.train()
ort_sess = convert_to_onnx(model, input=(x,), opset_version=self.opset_version,
training=torch.onnx.TrainingMode.TRAINING)
ort_outs = run_ort(ort_sess, input=(x,))
y = model(input)
output = y.cpu().numpy()
ort_mask = np.where(ort_outs[0] != 0, 1, 0)
pyt_mask = np.where(output != 0, 1, 0)
ratio_pytorch = np.sum(pyt_mask) / nb_elements
ratio_ort = np.sum(ort_mask) / nb_elements
np.testing.assert_allclose(ratio_pytorch, ratio_ort, rtol=0.01, atol=0.01)
def test_conv_bn(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.conv = torch.nn.Conv2d(3, 16, kernel_size=1, stride=2, padding=3, bias=True)
self.bn = torch.nn.BatchNorm2d(16, affine=True)
def forward(self, x):
x = self.conv(x)
bn = self.bn(x)
return bn
model = MyModule()
x = torch.randn(10, 3, 128, 128)
ort_sess1 = convert_to_onnx(model, input=(x,), opset_version=self.opset_version,
training=torch.onnx.TrainingMode.TRAINING)
ort_outs1 = run_ort(ort_sess1, input=(x,))
ort_sess2 = convert_to_onnx(model, input=(x,), opset_version=self.opset_version,
training=torch.onnx.TrainingMode.EVAL)
ort_outs2 = run_ort(ort_sess2, input=(x,))
[np.testing.assert_allclose(ort_out1, ort_out2, atol=1e-7, rtol=0.001) for ort_out1, ort_out2 in
zip(ort_outs1, ort_outs2)]
def test_multiple_conv_bn(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.conv2 = torch.nn.Conv2d(64, 2, kernel_size=1, stride=1, padding=0, bias=False)
self.conv3 = torch.nn.Conv2d(2, 2, kernel_size=3, stride=1, padding=1, bias=False)
self.bn = torch.nn.BatchNorm2d(64)
self.bn2 = torch.nn.BatchNorm2d(2)
self.relu = torch.nn.ReLU(inplace=True)
self.maxpool = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
x = self.conv1(x)
x = self.bn(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn2(x)
x = self.relu(x)
return x
model = MyModule()
x = torch.randn(2, 3, 224, 224)
ort_sess1 = convert_to_onnx(model, input=(x,), opset_version=self.opset_version,
training=torch.onnx.TrainingMode.TRAINING)
ort_outs1 = run_ort(ort_sess1, input=(x,))
ort_sess2 = convert_to_onnx(model, input=(x,), opset_version=self.opset_version,
training=torch.onnx.TrainingMode.EVAL)
ort_outs2 = run_ort(ort_sess2, input=(x,))
[np.testing.assert_allclose(ort_out1, ort_out2, atol=1e-7, rtol=0.001) for ort_out1, ort_out2 in
zip(ort_outs1, ort_outs2)]
def make_test(name, base, layer, bidirectional, initial_state,
variable_length, dropout,
**extra_kwargs):
test_name = str('_'.join([
'test', name, layer[1],
bidirectional[1], initial_state[1],
variable_length[1], dropout[1]
]))
# Cannot export with older opsets because of 'ConstantFill' op
# ConstantFill was a temp op removed at opset 8. This is no longer supported by onnxruntime
@disableScriptTest() # Test code not scriptable
@skipIfUnsupportedMinOpsetVersion(9)
def f(self):
self._dispatch_rnn_test(
base,
layers=layer[0],
bidirectional=bidirectional[0],
initial_state=initial_state[0],
packed_sequence=variable_length[0],
dropout=dropout[0],
**extra_kwargs)
f.__name__ = test_name
setattr(TestONNXRuntime, f.__name__, f)
def setup_rnn_tests():
layers_opts = [
(1, 'unilayer'),
(3, 'trilayer')
]
bidirectional_opts = [
(False, 'forward'),
(True, 'bidirectional')
]
initial_state_opts = [
(True, 'with_initial_state'),
(False, 'no_initial_state')
]
variable_length_opts = [
(0, 'without_sequence_lengths'),
(1, 'with_variable_length_sequences'),
(2, 'with_batch_first_sequence_lengths')
]
dropout_opts = [
(0.2, 'with_dropout'),
(0.0, 'without_dropout')
]
test_count = 0
for (layer, bidirectional, initial_state, variable_length, dropout) in \
itertools.product(
layers_opts,
bidirectional_opts,
initial_state_opts,
variable_length_opts,
dropout_opts,):
for base, name, extra_kwargs in (
('elman', 'elman_relu', {'nonlinearity': u'relu'}),
('elman', 'elman_tanh', {'nonlinearity': u'tanh'}),
('lstm', 'lstm', {}),
('gru', 'gru', {})
):
make_test(name, base, layer, bidirectional, initial_state,
variable_length, dropout,
**extra_kwargs)
test_count += 1
# sanity check that a representative example does exist
TestONNXRuntime.test_gru_trilayer_forward_with_initial_state_without_sequence_lengths_with_dropout
# make sure no one accidentally disables all the tests without
# noticing
if test_count != 192:
raise ValueError('Expected 192 tests but found {}'.format(test_count))
setup_rnn_tests()
# opset 7 tests
TestONNXRuntime_opset7 = type(str("TestONNXRuntime_opset7"),
(unittest.TestCase,),
dict(TestONNXRuntime.__dict__, opset_version=7))
# opset 8 tests
TestONNXRuntime_opset8 = type(str("TestONNXRuntime_opset8"),
(unittest.TestCase,),
dict(TestONNXRuntime.__dict__, opset_version=8))
# opset 10 tests
TestONNXRuntime_opset10 = type(str("TestONNXRuntime_opset10"),
(unittest.TestCase,),
dict(TestONNXRuntime.__dict__, opset_version=10))
# opset 11 tests
TestONNXRuntime_opset11 = type(str("TestONNXRuntime_opset11"),
(unittest.TestCase,),
dict(TestONNXRuntime.__dict__, opset_version=11))
# opset 12 tests
TestONNXRuntime_opset12 = type(str("TestONNXRuntime_opset12"),
(unittest.TestCase,),
dict(TestONNXRuntime.__dict__, opset_version=12))
# opset 9 tests, with keep_initializers_as_inputs=False for
# IR version 4 style export.
TestONNXRuntime_opset9_IRv4 = type(str("TestONNXRuntime_opset9_IRv4"),
(unittest.TestCase,),
dict(TestONNXRuntime.__dict__,
keep_initializers_as_inputs=False))
# opset 10 tests, with keep_initializers_as_inputs=False for
# IR version 4 style export.
TestONNXRuntime_opset10_IRv4 = type(str("TestONNXRuntime_opset10_IRv4"),
(unittest.TestCase,),
dict(TestONNXRuntime.__dict__, opset_version=10,
keep_initializers_as_inputs=False))
# opset 11 tests, with keep_initializers_as_inputs=False for
# IR version 4 style export.
TestONNXRuntime_opset11_IRv4 = type(str("TestONNXRuntime_opset11_IRv4"),
(unittest.TestCase,),
dict(TestONNXRuntime.__dict__, opset_version=11,
keep_initializers_as_inputs=False))
# opset 12 tests, with keep_initializers_as_inputs=False for
# IR version 4 style export.
TestONNXRuntime_opset12_IRv4 = type(str("TestONNXRuntime_opset12_IRv4"),
(unittest.TestCase,),
dict(TestONNXRuntime.__dict__, opset_version=12,
keep_initializers_as_inputs=False))
# opset 9 tests, with use_new_jit_passes=True for using new jit API,
# and with keep_initializers_as_inputs=False for IR version 4 style export.
TestONNXRuntime_opset9_IRv4_new_jit_API = type(str("TestONNXRuntime_opset9_IRv4_new_jit_API"),
(unittest.TestCase,),
dict(TestONNXRuntime.__dict__,
keep_initializers_as_inputs=False,
use_new_jit_passes=True,
onnx_shape_inference=True))
# opset 12 tests, with use_new_jit_passes=True for using new jit API,
# and keep_initializers_as_inputs=False for IR version 4 style export.
TestONNXRuntime_opset12_IRv4_new_jit_API = type(str("TestONNXRuntime_opset12_IRv4_new_jit_API"),
(unittest.TestCase,),
dict(TestONNXRuntime.__dict__, opset_version=12,
keep_initializers_as_inputs=False,
use_new_jit_passes=True,
onnx_shape_inference=True))
# opset 12 tests, with _onnx_shape_inference=True.
TestONNXRuntime_opset12_onnx_shape_inference = type(str("TestONNXRuntime_opset12_onnx_shape_inference"),
(unittest.TestCase,),
dict(TestONNXRuntime.__dict__, opset_version=12,
onnx_shape_inference=True))
if __name__ == '__main__':
unittest.main()
| 2.109375 | 2 |
src/cli.py | guionardo/py-housekeeping | 0 | 12758877 | <gh_stars>0
""" CLI usage
# With configuration file: Just
"""
import json
import logging
import sys
import tempfile
from exceptions import JustExitException
def get_arguments(args=sys.argv[1:]):
parsed_args = ParsedArguments([
ArgumentOption('help', 'Help', default=False, default_option=True),
ArgumentOption(
'config-file', 'Configuration File', default=None),
ArgumentOption(
'folder', 'Source folder', default=None),
ArgumentOption(
'max-count', 'Maximum file count in folder', default=0),
ArgumentOption(
'max-size', 'Maximum folder size', default=0),
ArgumentOption(
'max-age', 'Maximum file age', default=0),
ArgumentOption(
'action', 'Action [delete, move, compress]', default=None),
ArgumentOption(
'destiny', 'Action destiny folder', default=None)
], args)
if parsed_args.config_file:
return parsed_args.config_file, None
if parsed_args.help:
raise JustExitException('\n'.join(parsed_args.show_help()))
configuration = [{
'folder': parsed_args.folder,
'rules': {
'max_file_count': int(parsed_args.max_count),
'max_folder_size': int(parsed_args.max_size),
'max_file_age': parsed_args.max_age
},
'action': parsed_args.action,
'action_destiny': parsed_args.destiny
}]
f = tempfile.NamedTemporaryFile(mode='w')
f.write(json.dumps(configuration))
f.flush()
return f.name, f
class ArgumentOption:
_slots_ = ['name', 'description', 'optional',
'value', 'set', 'default', 'default_option']
def __init__(self, name, description=None, optional=True, default=True, default_option=False):
self.name = name
self.description = str(description or '')
self.optional = bool(optional)
self.default = default
self.value = default
self.set = False
self.default_option = default_option
if not name:
raise ValueError("ArgumentOption 'name' must be informed")
def __str__(self):
fmt = '[--{0} VALUE] {1} (default={2})' if self.optional else '--{0} VALUE {1} (default={2})'
return fmt.format(self.name, self.description, self.default)
def __repr__(self):
return str({'name': self.name,
'description': self.description,
'optional': self.optional,
'value': self.value,
'default': self.default})
class ParsedArguments:
__slots__ = ['_options']
LOG = logging.getLogger(__name__)
def __init__(self, options, args=sys.argv[1:]):
"""
args = list of string arguments
options = list of ArgumentOption
"""
self._options = {option.name: option for option in options}
key = None
opts = self._get_opts(args)
unknown_options = []
for key in opts.keys():
if key in self._options.keys():
self._options[key].value = opts[key]
self._options[key].set = True
else:
unknown_options.append("--{0}={1}".format(key, opts[key]))
missing = []
options_count = 0
default_option = None
for key in self._options.keys():
if self._options[key].default_option:
default_option = self._options[key]
if self._options[key].set:
options_count += 1
elif not self._options[key].optional:
missing.append("--{0}".format(key))
if unknown_options:
self.LOG.warn('Unknown options (ignored): %s', unknown_options)
if missing:
raise ValueError("Missing mandatory options: %s", missing)
if options_count == 0:
default_option.value = True
def _get_opts(self, args):
opts = {}
key = None
for arg in args:
if arg.startswith('--'): # key
key = arg[2:]
opts[key] = opts.get(key, True)
elif key: # value for key
opts[key] = arg
key = None
else: # value without key
raise ValueError("Argument without previous key", arg)
return opts
def __getattr__(self, name):
if name in self._options.keys():
return self._options[name].value
name = name.replace('_', '-')
if name in self._options.keys():
return self._options[name].value
def show_help(self):
help = []
if not self._options:
help.append('No options defined')
else:
help.append('Options:')
for option in self._options:
help.append(str(self._options[option]))
return help
| 2.5 | 2 |
WebApp/app/Geolocalisation_System/midleware.py | YacineHam/GeolocalisationSys | 0 | 12758878 | from django.db import close_old_connections
from rest_framework_simplejwt.tokens import UntypedToken
from rest_framework_simplejwt.exceptions import InvalidToken, TokenError
from jwt import decode as jwt_decode
from django.conf import settings
from django.contrib.auth import get_user_model
from urllib.parse import parse_qs
from app.models import CustomUser
from asgiref.sync import sync_to_async
from channels.db import database_sync_to_async
from django.contrib.auth.models import AnonymousUser
@database_sync_to_async
def get_user(user_id):
try:
return CustomUser.objects.get(id=user_id)
except CustomUser.DoesNotExist:
return AnonymousUser()
class TokenAuthMiddleware:
def __init__(self, inner):
self.inner = inner
async def __call__(self, scope,receive,send):
token = parse_qs(scope["query_string"].decode("utf8"))["token"][0]
print(token)
try:
UntypedToken(token)
except (InvalidToken, TokenError) as e:
return None
else:
decoded_data = jwt_decode(token, settings.SECRET_KEY, algorithms=["HS256"])
scope['user'] = await get_user(int(decoded_data["user_id"]))
return await self.inner(scope,receive,send) | 2.015625 | 2 |
atcoder/abc179/B.py | Siddhant-K-code/Competitive-Programing-Submissions | 2 | 12758879 | n = int(input())
xy = [map(int, input().split()) for _ in range(n)]
x, y = [list(i) for i in zip(*xy)]
count = 0
buf = 0
for xi, yi in zip(x, y):
if xi == yi:
buf += 1
else:
if buf > count:
count = buf
buf = 0
if buf > count:
count = buf
if count >= 3:
print('Yes')
else:
print('No') | 3.046875 | 3 |
invenio_communities/alembic/5b478fe7ef7f_create_featured_communities_table.py | rekt-hard/invenio-communities | 0 | 12758880 | #
# This file is part of Invenio.
# Copyright (C) 2022 Graz University of Technology.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Create featured communities table"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import mysql
from sqlalchemy_utils import UUIDType
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = 'fbe746957cfc'
branch_labels = ()
depends_on = None
def upgrade():
"""Upgrade database."""
op.create_table(
'communities_featured',
sa.Column(
'created',
sa.DateTime().with_variant(mysql.DATETIME(fsp=6), 'mysql'),
nullable=False,
),
sa.Column(
'updated',
sa.DateTime().with_variant(mysql.DATETIME(fsp=6), 'mysql'),
nullable=False,
),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('community_id', UUIDType(), nullable=False),
sa.Column(
'start_date',
sa.DateTime().with_variant(mysql.DATETIME(fsp=6), 'mysql'),
nullable=False,
),
sa.ForeignKeyConstraint(
['community_id'],
['communities_metadata.id'],
name=op.f(
'fk_communities_featured_community_id_communities_metadata'
),
),
sa.PrimaryKeyConstraint('id', name=op.f('pk_communities_featured')),
)
def downgrade():
"""Downgrade database."""
op.drop_table('communities_featured')
| 1.976563 | 2 |
RI/flask_server/tapi_server/models/tapi_notification_notification.py | arthurMll/TAPI | 57 | 12758881 | <gh_stars>10-100
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server.models.tapi_common_global_class import TapiCommonGlobalClass # noqa: F401,E501
from tapi_server.models.tapi_common_layer_protocol_name import TapiCommonLayerProtocolName # noqa: F401,E501
from tapi_server.models.tapi_common_name_and_value import TapiCommonNameAndValue # noqa: F401,E501
from tapi_server.models.tapi_notification_alarm_info import TapiNotificationAlarmInfo # noqa: F401,E501
from tapi_server.models.tapi_notification_name_and_value_change import TapiNotificationNameAndValueChange # noqa: F401,E501
from tapi_server.models.tapi_notification_notification_type import TapiNotificationNotificationType # noqa: F401,E501
from tapi_server.models.tapi_notification_object_type import TapiNotificationObjectType # noqa: F401,E501
from tapi_server.models.tapi_notification_source_indicator import TapiNotificationSourceIndicator # noqa: F401,E501
from tapi_server.models.tapi_notification_tca_info import TapiNotificationTcaInfo # noqa: F401,E501
from tapi_server import util
class TapiNotificationNotification(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, name=None, uuid=None, target_object_type=None, additional_text=None, event_time_stamp=None, additional_info=None, sequence_number=None, tca_info=None, target_object_identifier=None, notification_type=None, target_object_name=None, layer_protocol_name=None, source_indicator=None, alarm_info=None, changed_attributes=None): # noqa: E501
"""TapiNotificationNotification - a model defined in OpenAPI
:param name: The name of this TapiNotificationNotification. # noqa: E501
:type name: List[TapiCommonNameAndValue]
:param uuid: The uuid of this TapiNotificationNotification. # noqa: E501
:type uuid: str
:param target_object_type: The target_object_type of this TapiNotificationNotification. # noqa: E501
:type target_object_type: TapiNotificationObjectType
:param additional_text: The additional_text of this TapiNotificationNotification. # noqa: E501
:type additional_text: str
:param event_time_stamp: The event_time_stamp of this TapiNotificationNotification. # noqa: E501
:type event_time_stamp: str
:param additional_info: The additional_info of this TapiNotificationNotification. # noqa: E501
:type additional_info: List[TapiCommonNameAndValue]
:param sequence_number: The sequence_number of this TapiNotificationNotification. # noqa: E501
:type sequence_number: int
:param tca_info: The tca_info of this TapiNotificationNotification. # noqa: E501
:type tca_info: TapiNotificationTcaInfo
:param target_object_identifier: The target_object_identifier of this TapiNotificationNotification. # noqa: E501
:type target_object_identifier: str
:param notification_type: The notification_type of this TapiNotificationNotification. # noqa: E501
:type notification_type: TapiNotificationNotificationType
:param target_object_name: The target_object_name of this TapiNotificationNotification. # noqa: E501
:type target_object_name: List[TapiCommonNameAndValue]
:param layer_protocol_name: The layer_protocol_name of this TapiNotificationNotification. # noqa: E501
:type layer_protocol_name: TapiCommonLayerProtocolName
:param source_indicator: The source_indicator of this TapiNotificationNotification. # noqa: E501
:type source_indicator: TapiNotificationSourceIndicator
:param alarm_info: The alarm_info of this TapiNotificationNotification. # noqa: E501
:type alarm_info: TapiNotificationAlarmInfo
:param changed_attributes: The changed_attributes of this TapiNotificationNotification. # noqa: E501
:type changed_attributes: List[TapiNotificationNameAndValueChange]
"""
self.openapi_types = {
'name': List[TapiCommonNameAndValue],
'uuid': str,
'target_object_type': TapiNotificationObjectType,
'additional_text': str,
'event_time_stamp': str,
'additional_info': List[TapiCommonNameAndValue],
'sequence_number': int,
'tca_info': TapiNotificationTcaInfo,
'target_object_identifier': str,
'notification_type': TapiNotificationNotificationType,
'target_object_name': List[TapiCommonNameAndValue],
'layer_protocol_name': TapiCommonLayerProtocolName,
'source_indicator': TapiNotificationSourceIndicator,
'alarm_info': TapiNotificationAlarmInfo,
'changed_attributes': List[TapiNotificationNameAndValueChange]
}
self.attribute_map = {
'name': 'name',
'uuid': 'uuid',
'target_object_type': 'target-object-type',
'additional_text': 'additional-text',
'event_time_stamp': 'event-time-stamp',
'additional_info': 'additional-info',
'sequence_number': 'sequence-number',
'tca_info': 'tca-info',
'target_object_identifier': 'target-object-identifier',
'notification_type': 'notification-type',
'target_object_name': 'target-object-name',
'layer_protocol_name': 'layer-protocol-name',
'source_indicator': 'source-indicator',
'alarm_info': 'alarm-info',
'changed_attributes': 'changed-attributes'
}
self._name = name
self._uuid = uuid
self._target_object_type = target_object_type
self._additional_text = additional_text
self._event_time_stamp = event_time_stamp
self._additional_info = additional_info
self._sequence_number = sequence_number
self._tca_info = tca_info
self._target_object_identifier = target_object_identifier
self._notification_type = notification_type
self._target_object_name = target_object_name
self._layer_protocol_name = layer_protocol_name
self._source_indicator = source_indicator
self._alarm_info = alarm_info
self._changed_attributes = changed_attributes
@classmethod
def from_dict(cls, dikt) -> 'TapiNotificationNotification':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.notification.Notification of this TapiNotificationNotification. # noqa: E501
:rtype: TapiNotificationNotification
"""
return util.deserialize_model(dikt, cls)
@property
def name(self):
"""Gets the name of this TapiNotificationNotification.
List of names. A property of an entity with a value that is unique in some namespace but may change during the life of the entity. A name carries no semantics with respect to the purpose of the entity. # noqa: E501
:return: The name of this TapiNotificationNotification.
:rtype: List[TapiCommonNameAndValue]
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this TapiNotificationNotification.
List of names. A property of an entity with a value that is unique in some namespace but may change during the life of the entity. A name carries no semantics with respect to the purpose of the entity. # noqa: E501
:param name: The name of this TapiNotificationNotification.
:type name: List[TapiCommonNameAndValue]
"""
self._name = name
@property
def uuid(self):
"""Gets the uuid of this TapiNotificationNotification.
UUID: An identifier that is universally unique within an identifier space, where the identifier space is itself globally unique, and immutable. An UUID carries no semantics with respect to the purpose or state of the entity. UUID here uses string representation as defined in RFC 4122. The canonical representation uses lowercase characters. Pattern: [0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-' + '[0-9a-fA-F]{4}-[0-9a-fA-F]{12} Example of a UUID in string representation: f81d4fae-7dec-11d0-a765-00a0c91e6bf6 # noqa: E501
:return: The uuid of this TapiNotificationNotification.
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this TapiNotificationNotification.
UUID: An identifier that is universally unique within an identifier space, where the identifier space is itself globally unique, and immutable. An UUID carries no semantics with respect to the purpose or state of the entity. UUID here uses string representation as defined in RFC 4122. The canonical representation uses lowercase characters. Pattern: [0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-' + '[0-9a-fA-F]{4}-[0-9a-fA-F]{12} Example of a UUID in string representation: f81d4fae-7dec-11d0-a765-00a0c91e6bf6 # noqa: E501
:param uuid: The uuid of this TapiNotificationNotification.
:type uuid: str
"""
self._uuid = uuid
@property
def target_object_type(self):
"""Gets the target_object_type of this TapiNotificationNotification.
:return: The target_object_type of this TapiNotificationNotification.
:rtype: TapiNotificationObjectType
"""
return self._target_object_type
@target_object_type.setter
def target_object_type(self, target_object_type):
"""Sets the target_object_type of this TapiNotificationNotification.
:param target_object_type: The target_object_type of this TapiNotificationNotification.
:type target_object_type: TapiNotificationObjectType
"""
self._target_object_type = target_object_type
@property
def additional_text(self):
"""Gets the additional_text of this TapiNotificationNotification.
none # noqa: E501
:return: The additional_text of this TapiNotificationNotification.
:rtype: str
"""
return self._additional_text
@additional_text.setter
def additional_text(self, additional_text):
"""Sets the additional_text of this TapiNotificationNotification.
none # noqa: E501
:param additional_text: The additional_text of this TapiNotificationNotification.
:type additional_text: str
"""
self._additional_text = additional_text
@property
def event_time_stamp(self):
"""Gets the event_time_stamp of this TapiNotificationNotification.
none # noqa: E501
:return: The event_time_stamp of this TapiNotificationNotification.
:rtype: str
"""
return self._event_time_stamp
@event_time_stamp.setter
def event_time_stamp(self, event_time_stamp):
"""Sets the event_time_stamp of this TapiNotificationNotification.
none # noqa: E501
:param event_time_stamp: The event_time_stamp of this TapiNotificationNotification.
:type event_time_stamp: str
"""
self._event_time_stamp = event_time_stamp
@property
def additional_info(self):
"""Gets the additional_info of this TapiNotificationNotification.
none # noqa: E501
:return: The additional_info of this TapiNotificationNotification.
:rtype: List[TapiCommonNameAndValue]
"""
return self._additional_info
@additional_info.setter
def additional_info(self, additional_info):
"""Sets the additional_info of this TapiNotificationNotification.
none # noqa: E501
:param additional_info: The additional_info of this TapiNotificationNotification.
:type additional_info: List[TapiCommonNameAndValue]
"""
self._additional_info = additional_info
@property
def sequence_number(self):
"""Gets the sequence_number of this TapiNotificationNotification.
A monotonous increasing sequence number associated with the notification. The exact semantics of how this sequence number is assigned (per channel or subscription or source or system) is left undefined. # noqa: E501
:return: The sequence_number of this TapiNotificationNotification.
:rtype: int
"""
return self._sequence_number
@sequence_number.setter
def sequence_number(self, sequence_number):
"""Sets the sequence_number of this TapiNotificationNotification.
A monotonous increasing sequence number associated with the notification. The exact semantics of how this sequence number is assigned (per channel or subscription or source or system) is left undefined. # noqa: E501
:param sequence_number: The sequence_number of this TapiNotificationNotification.
:type sequence_number: int
"""
self._sequence_number = sequence_number
@property
def tca_info(self):
"""Gets the tca_info of this TapiNotificationNotification.
:return: The tca_info of this TapiNotificationNotification.
:rtype: TapiNotificationTcaInfo
"""
return self._tca_info
@tca_info.setter
def tca_info(self, tca_info):
"""Sets the tca_info of this TapiNotificationNotification.
:param tca_info: The tca_info of this TapiNotificationNotification.
:type tca_info: TapiNotificationTcaInfo
"""
self._tca_info = tca_info
@property
def target_object_identifier(self):
"""Gets the target_object_identifier of this TapiNotificationNotification.
none # noqa: E501
:return: The target_object_identifier of this TapiNotificationNotification.
:rtype: str
"""
return self._target_object_identifier
@target_object_identifier.setter
def target_object_identifier(self, target_object_identifier):
"""Sets the target_object_identifier of this TapiNotificationNotification.
none # noqa: E501
:param target_object_identifier: The target_object_identifier of this TapiNotificationNotification.
:type target_object_identifier: str
"""
self._target_object_identifier = target_object_identifier
@property
def notification_type(self):
"""Gets the notification_type of this TapiNotificationNotification.
:return: The notification_type of this TapiNotificationNotification.
:rtype: TapiNotificationNotificationType
"""
return self._notification_type
@notification_type.setter
def notification_type(self, notification_type):
"""Sets the notification_type of this TapiNotificationNotification.
:param notification_type: The notification_type of this TapiNotificationNotification.
:type notification_type: TapiNotificationNotificationType
"""
self._notification_type = notification_type
@property
def target_object_name(self):
"""Gets the target_object_name of this TapiNotificationNotification.
none # noqa: E501
:return: The target_object_name of this TapiNotificationNotification.
:rtype: List[TapiCommonNameAndValue]
"""
return self._target_object_name
@target_object_name.setter
def target_object_name(self, target_object_name):
"""Sets the target_object_name of this TapiNotificationNotification.
none # noqa: E501
:param target_object_name: The target_object_name of this TapiNotificationNotification.
:type target_object_name: List[TapiCommonNameAndValue]
"""
self._target_object_name = target_object_name
@property
def layer_protocol_name(self):
"""Gets the layer_protocol_name of this TapiNotificationNotification.
:return: The layer_protocol_name of this TapiNotificationNotification.
:rtype: TapiCommonLayerProtocolName
"""
return self._layer_protocol_name
@layer_protocol_name.setter
def layer_protocol_name(self, layer_protocol_name):
"""Sets the layer_protocol_name of this TapiNotificationNotification.
:param layer_protocol_name: The layer_protocol_name of this TapiNotificationNotification.
:type layer_protocol_name: TapiCommonLayerProtocolName
"""
self._layer_protocol_name = layer_protocol_name
@property
def source_indicator(self):
"""Gets the source_indicator of this TapiNotificationNotification.
:return: The source_indicator of this TapiNotificationNotification.
:rtype: TapiNotificationSourceIndicator
"""
return self._source_indicator
@source_indicator.setter
def source_indicator(self, source_indicator):
"""Sets the source_indicator of this TapiNotificationNotification.
:param source_indicator: The source_indicator of this TapiNotificationNotification.
:type source_indicator: TapiNotificationSourceIndicator
"""
self._source_indicator = source_indicator
@property
def alarm_info(self):
"""Gets the alarm_info of this TapiNotificationNotification.
:return: The alarm_info of this TapiNotificationNotification.
:rtype: TapiNotificationAlarmInfo
"""
return self._alarm_info
@alarm_info.setter
def alarm_info(self, alarm_info):
"""Sets the alarm_info of this TapiNotificationNotification.
:param alarm_info: The alarm_info of this TapiNotificationNotification.
:type alarm_info: TapiNotificationAlarmInfo
"""
self._alarm_info = alarm_info
@property
def changed_attributes(self):
"""Gets the changed_attributes of this TapiNotificationNotification.
none # noqa: E501
:return: The changed_attributes of this TapiNotificationNotification.
:rtype: List[TapiNotificationNameAndValueChange]
"""
return self._changed_attributes
@changed_attributes.setter
def changed_attributes(self, changed_attributes):
"""Sets the changed_attributes of this TapiNotificationNotification.
none # noqa: E501
:param changed_attributes: The changed_attributes of this TapiNotificationNotification.
:type changed_attributes: List[TapiNotificationNameAndValueChange]
"""
self._changed_attributes = changed_attributes
| 1.65625 | 2 |
setup.py | kyungjunleeme/azure_blob_check | 1 | 12758882 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='azure_blob_check',
version='2.0',
description='azure blob filelist check',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/kyungjunleeme/azure_blob_check',
download_url='https://github.com/kyungjunleeme/azure_blob_check/archive/main.zip',
packages=find_packages(exclude=['docs', 'tests*']),
install_requires=['azure-storage-blob', 'pytz', 'pandas', 'openpyxl'],
entry_points={'console_scripts': [
'blob_check=azure_blob_check.blob_check:main']},
keywords=['azure_blob', 'blob_list'],
python_requires='>=3.6',
zip_safe=False,
classifiers=[
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
]
)
| 1.210938 | 1 |
Compiler/Codegen/PyDefines/Generate.py | EmilPi/PuzzleLib | 52 | 12758883 | <gh_stars>10-100
import os
from PuzzleLib.Compiler.Toolchain import copySource
def generatePyDefines(path):
dirname = os.path.dirname(__file__)
copySource(os.path.join(dirname, "PyDefines.h"), os.path.join(path, "PyDefines.gen.h"))
| 1.609375 | 2 |
torchpcp/modules/XTransformation.py | Obarads/torch_point_cloud | 1 | 12758884 | <reponame>Obarads/torch_point_cloud
import torch
from torch import nn
from torchpcp.modules.Layer import Conv2D
class XTransform(nn.Module):
def __init__(self, in_channel, k):
super().__init__()
self.conv1 = Conv2D(in_channel, k*k, (1,k)) # [B, k*k, N, 1] # pf.conv2d is not this order
self.conv2 = Conv2D(k*k, k*k, (1,1), conv_args={"groups":k}) # DepthwiseConv2D(k, k, (1, k)) & convert(x)
self.conv3 = Conv2D(k*k, k*k, (1,1), act=None, conv_args={"groups":k}) # DepthwiseConv2D(k, k, (1, k), act=None) & convert(x)
self.k = k
def forward(self, x):
"""
Parameter
---------
x: [B, C, N, k]
Inputs.
Returns
-------
trans: [B, N, k, k]
X-transformation matrix.
"""
# B, C, N, k = x.shape
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
trans = self.to_trans(x)
return trans
def to_trans(self, x):
B, kk, N, _ = x.shape
x = x.permute(0,2,3,1).contiguous()
x = x.view(B, N, self.k, self.k).contiguous()
return x
| 2.65625 | 3 |
malaya_boilerplate/backblaze.py | huseinzol05/malaya-boilerplate | 1 | 12758885 | <gh_stars>1-10
import requests
import os
import logging
from tqdm import tqdm
from glob import glob
from .utils import _delete_folder, _get_home
logger = logging.getLogger('backblaze')
def check_file_cloud(base_url, url):
url = base_url + url
r = requests.head(url)
exist = r.status_code == 200
if exist:
version = int(r.headers.get('X-Bz-Upload-Timestamp', 0))
else:
version = 0
return exist, version
def check_files_local(file):
for key, item in file.items():
if 'version' in key:
continue
if not os.path.isfile(item):
return False
return True
def download_file_cloud(base_url, url, filename):
if 'http' not in url:
url = base_url + url
r = requests.get(url, stream=True)
total_size = int(r.headers['content-length'])
version = int(r.headers.get('X-Bz-Upload-Timestamp', 0))
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, 'wb') as f:
for data in tqdm(
iterable=r.iter_content(chunk_size=1_048_576),
total=total_size / 1_048_576,
unit='MB',
unit_scale=True,
):
f.write(data)
return version
def download_from_dict(file, s3_file, package, base_url, validate=True, quantized=False):
home, _ = _get_home(package=package)
if quantized:
if 'quantized' not in file:
f = file.replace(home, '').split('/')
raise ValueError(
f'Quantized model for {f[1]} module is not available, please load normal model.'
)
model = 'quantized'
logger.warning('Load quantized model will cause accuracy drop.')
else:
model = 'model'
if validate:
base_location = os.path.dirname(file[model])
version = os.path.join(base_location, 'version')
download = False
if os.path.isfile(version):
with open(version) as fopen:
if not file['version'] in fopen.read():
print(f'Found old version of {base_location}, deleting..')
_delete_folder(base_location)
download = True
else:
for key, item in file.items():
if not os.path.exists(item):
download = True
break
else:
download = True
if download:
for key, item in file.items():
if 'version' in key:
continue
if model == 'quantized' and key == 'model':
continue
if model == 'model' and key == 'quantized':
continue
if not os.path.isfile(item):
logger.info(f'downloading frozen {key} to {item}')
download_file_cloud(base_url, s3_file[key], item)
with open(version, 'w') as fopen:
fopen.write(file['version'])
else:
if not check_files_local(file):
path = file[model]
path = os.path.sep.join(
os.path.normpath(path).split(os.path.sep)[1:-1]
)
raise Exception(
f'{path} is not available, please `validate = True`'
)
def download_from_string(
path, module, keys, package, base_url, validate=True, quantized=False
):
home, _ = _get_home(package=package)
model = path
keys = keys.copy()
keys['version'] = 'version'
if quantized:
path = os.path.join(module, f'{path}-quantized')
quantized_path = os.path.join(path, 'model.pb').replace('\\', '/')
if not check_file_cloud(base_url, quantized_path)[0]:
raise Exception(
f'Quantized model for `{os.path.join(module, model)}` is not available, please load normal model.'
)
logger.warning('Load quantized model will cause accuracy drop.')
else:
path = os.path.join(module, path)
path_local = os.path.join(home, path)
files_local = {'version': os.path.join(path_local, 'version')}
files_cloud = {}
for key, value in keys.items():
if '/' in value:
f_local = os.path.join(path_local, value.split('/')[-1])
f_cloud = value
else:
f_local = os.path.join(path_local, value)
f_cloud = os.path.join(path, value)
f_cloud = f_cloud.replace('\\', '/')
files_local[key] = f_local
files_cloud[key] = f_cloud
if validate:
download = False
version = files_local['version']
latest = str(
max(
[check_file_cloud(base_url, item)[1] for key, item in files_cloud.items()]
)
)
if os.path.isfile(version):
with open(version) as fopen:
v = fopen.read()
if latest not in v:
p = os.path.dirname(version)
logger.info(f'Found old version in {p}, deleting..')
_delete_folder(p)
download = True
else:
for key, item in files_local.items():
if not os.path.exists(item):
download = True
break
else:
download = True
if download:
versions = []
for key, item in files_local.items():
if 'version' in key:
continue
if not os.path.isfile(item):
logger.info(f'downloading frozen {key} to {item}')
versions.append(download_file_cloud(base_url, files_cloud[key], item))
latest = str(max(versions))
with open(version, 'w') as fopen:
fopen.write(latest)
else:
if not check_files_local(files_local):
path = files_local['model']
path = os.path.sep.join(
os.path.normpath(path).split(os.path.sep)[1:-1]
)
raise Exception(
f'{path} is not available, please `validate = True`'
)
return files_local
def check_file(
file,
package,
base_url,
s3_file=None,
module=None,
keys=None,
validate=True,
quantized=False,
**kwargs,
):
if isinstance(file, dict) and isinstance(s3_file, dict):
download_from_dict(
file=file,
s3_file=s3_file,
package=package,
base_url=base_url,
validate=validate,
quantized=quantized,
)
else:
file = download_from_string(
path=file,
module=module,
keys=keys,
package=package,
base_url=base_url,
validate=validate,
quantized=quantized,
)
return file
def upload(module: str, model: str, directory: str, bucket: str = 'malaya',
application_key_id: str = os.environ.get('backblaze_application_key_id'),
application_key: str = os.environ.get('backblaze_application_key')):
"""
Upload directory with malaya-style pattern.
Parameters
----------
module: str
model: str
directory: str
bucket: str, optional (default='malaya')
application_key_id: str, optional (default=os.environ.get('backblaze_application_key_id'))
application_key: str, optional (default=os.environ.get('backblaze_application_key'))
"""
if not application_key_id or not application_key:
raise ValueError('must set `backblaze_application_key_id` and `backblaze_application_key` are None.')
from b2sdk.v1 import B2Api, InMemoryAccountInfo
info = InMemoryAccountInfo()
b2_api = B2Api(info)
b2_api.authorize_account('production', application_key_id, application_key)
file_info = {'how': 'good-file'}
b2_bucket = b2_api.get_bucket_by_name(bucket)
for file in glob(os.path.join(directory, '*')):
if file.endswith('frozen_model.pb'):
outPutname = f'{module}/{model}/model.pb'
elif file.endswith('frozen_model.pb.quantized'):
outPutname = f'{module}/{model}-quantized/model.pb'
else:
outPutname = f'{module}/{model}/{file}'
logger.info(f'Uploading from local {file} to {bucket}/{outPutname}')
b2_bucket.upload_local_file(
local_file=file,
file_name=outPutname,
file_infos=file_info,
)
| 2.4375 | 2 |
ikibardin/spacenet5/src/processing/split_folds.py | SpaceNetChallenge/SpaceNet_Optimized_Routing_Solutions | 27 | 12758886 | import os
import argparse
import multiprocessing
from typing import Dict, Union
import numpy as np
import pandas as pd
import skimage.io
from tqdm import tqdm
from src import config
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument(
'--masks',
type=str,
required=True,
help='Path to a directory with training masks',
)
parser.add_argument(
'--out-csv',
type=str,
required=True,
help='Where to save .csv dataframe with folds split',
)
return parser.parse_args()
class MetadataGetter:
def __init__(self, masks_dir: str):
self._masks_dir = masks_dir
def get_metadata(self, image_id: str) -> Dict[str, Union[str, float]]:
mask = self._load_mask(image_id) > 127
return {
'id': image_id,
'city_id': self._get_city_id(image_id),
'roads_ratio': mask.sum() / mask.size,
}
def _load_mask(self, image_id: str) -> np.ndarray:
path = os.path.join(self._masks_dir, f'{image_id}.tif')
if not os.path.exists(path):
raise ValueError(path)
mask = skimage.io.imread(path)
assert mask is not None, path
# print('SHAPE', mask.shape)
mask = mask[:, :, -1]
return mask
@staticmethod
def _get_city_id(image_id: str) -> str:
city_id = '_'.join(image_id.split('_')[3:6])
assert city_id in config.TRAINING_CITIES, (city_id, config.TRAINING_CITIES)
return city_id
def get_folds_split(df: pd.DataFrame, num_folds: int = 5) -> pd.DataFrame:
df = df.sample(n=len(df), replace=False)
df = df.sort_values(by=['city_id', 'roads_ratio']).reset_index(drop=True)
df['fold_id'] = np.arange(len(df)) % num_folds
return df
def main():
args = parse_args()
image_ids = [os.path.splitext(filename)[0] for filename in os.listdir(args.masks)]
with multiprocessing.Pool(16) as pool:
metadata = list(tqdm(
pool.imap_unordered(MetadataGetter(args.masks).get_metadata, image_ids),
total=len(image_ids),
desc='Extracting metadata...',
))
df = pd.DataFrame(metadata)
df = get_folds_split(df)
df.to_csv(args.out_csv, index=False)
print(f'Saved folds dataframe of shape {df.shape} to `{args.out_csv}`')
if __name__ == '__main__':
main()
| 2.203125 | 2 |
readthedocs/projects/migrations/0021_add-webhook-deprecation-feature.py | tkoyama010/readthedocs.org | 4,054 | 12758887 | <gh_stars>1000+
# -*- coding: utf-8 -*-
"""Add feature for allowing access to deprecated webhook endpoints."""
from django.db import migrations
FEATURE_ID = 'allow_deprecated_webhooks'
def forward_add_feature(apps, schema_editor):
Feature = apps.get_model('projects', 'Feature')
Feature.objects.create(
feature_id=FEATURE_ID,
default_true=True,
)
def reverse_add_feature(apps, schema_editor):
Feature = apps.get_model('projects', 'Feature')
Feature.objects.filter(feature_id=FEATURE_ID).delete()
class Migration(migrations.Migration):
dependencies = [
('projects', '0020_add-api-project-proxy'),
]
operations = [
migrations.RunPython(forward_add_feature, reverse_add_feature),
]
| 1.828125 | 2 |
libsaas/services/stripe/plans.py | MidtownFellowship/libsaas | 1 | 12758888 | from libsaas.services import base
from . import resource
class PlansBaseResource(resource.StripeResource):
path = 'plans'
class Plan(PlansBaseResource):
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
class Plans(resource.ListResourceMixin, PlansBaseResource):
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
| 2.296875 | 2 |
test/tet_train.py | luoyudong593/Keras-TextClassification | 1,339 | 12758889 | # !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2019/11/12 16:45
# @author : Mo
# @function:
from keras_textclassification import train
train(graph='TextCNN', # 必填, 算法名, 可选"ALBERT","BERT","XLNET","FASTTEXT","TEXTCNN","CHARCNN",
# "TEXTRNN","RCNN","DCNN","DPCNN","VDCNN","CRNN","DEEPMOJI",
# "SELFATTENTION", "HAN","CAPSULE","TRANSFORMER"
label=17, # 必填, 类别数, 训练集和测试集合必须一样
path_train_data=None, # 必填, 训练数据文件, csv格式, 必须含'label,ques'头文件, 详见keras_textclassification/data
path_dev_data=None, # 必填, 测试数据文件, csv格式, 必须含'label,ques'头文件, 详见keras_textclassification/data
rate=1, # 可填, 训练数据选取比例
hyper_parameters=None) # 可填, json格式, 超参数, 默认embedding为'char','random'
| 2.734375 | 3 |
pxr/usdImaging/lib/usdviewq/adjustClipping.py | marsupial/USD | 9 | 12758890 | <reponame>marsupial/USD
#
# Copyright 2016 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
from PySide import QtGui, QtCore
from adjustClippingUI import Ui_AdjustClipping
class AdjustClipping(QtGui.QDialog):
def __init__(self, parent):
QtGui.QDialog.__init__(self,parent)
self._ui = Ui_AdjustClipping()
self._ui.setupUi(self)
self._parent = parent
self._viewer = parent._stageView
clipRange = self._viewer.computeGfCamera().frustum.nearFar
self._nearCache = self._viewer.overrideNear or clipRange.min
self._farCache = self._viewer.overrideFar or clipRange.max
self._refreshTimer = QtCore.QTimer(self)
self._refreshTimer.setInterval(250)
self._refreshTimer.start()
# Connect timer
QtCore.QObject.connect(self._refreshTimer, QtCore.SIGNAL('timeout()'),
self._updateAutomaticValues)
# When the checkboxes change, we want to update instantly
QtCore.QObject.connect(self._ui.overrideNear,
QtCore.SIGNAL('stateChanged(int)'),
self._overrideNearToggled)
QtCore.QObject.connect(self._ui.overrideFar,
QtCore.SIGNAL('stateChanged(int)'),
self._overrideFarToggled)
# we also want to update the clipping planes as the user is typing
QtCore.QObject.connect(self._ui.nearEdit,
QtCore.SIGNAL('textChanged(QString)'),
self._nearChanged)
QtCore.QObject.connect(self._ui.farEdit,
QtCore.SIGNAL('textChanged(QString)'),
self._farChanged)
# uncheck the main window menu item when the window is closed
QtCore.QObject.connect(self,
QtCore.SIGNAL('finished(int)'),
self._cleanUpAndClose)
# Set the checkboxes to their initial state
self._ui.overrideNear.setChecked(self._viewer.overrideNear \
is not None)
self._ui.overrideFar.setChecked(self._viewer.overrideFar \
is not None)
# load the initial values for the text boxes, but first deactivate them
# if their corresponding checkbox is off.
self._ui.nearEdit.setEnabled(self._ui.overrideNear.isChecked())
self._ui.nearEdit.setText(str(self._nearCache))
self._ui.farEdit.setEnabled(self._ui.overrideFar.isChecked())
self._ui.farEdit.setText(str(self._farCache))
# Make sure only doubles can be typed in the text boxes
self._ui.nearEdit.setValidator(QtGui.QDoubleValidator(self))
self._ui.farEdit.setValidator(QtGui.QDoubleValidator(self))
def _updateAutomaticValues(self):
"""Read the automatically computed clipping planes and put them
in the text boxes when they are deactivated"""
clipRange = self._viewer.computeGfCamera().frustum.nearFar
if (not self._ui.overrideNear.isChecked()) and \
self._nearCache != clipRange.min :
self._nearCache = clipRange.min
self._ui.nearEdit.setText(str(self._nearCache))
if (not self._ui.overrideFar.isChecked()) and \
self._farCache != clipRange.max :
self._farCache = clipRange.max
self._ui.farEdit.setText(str(self._farCache))
def _overrideNearToggled(self, state):
"""Called when the "Override Near" checkbox is toggled"""
self._ui.nearEdit.setEnabled(state)
if state:
self._viewer.overrideNear = self._nearCache
else:
self._viewer.overrideNear = None
def _overrideFarToggled(self, state):
"""Called when the "Override Far" checkbox is toggled"""
self._ui.farEdit.setEnabled(state)
if state:
self._viewer.overrideFar = self._farCache
else:
self._viewer.overrideFar = None
def _nearChanged(self, text):
"""Called when the Near text box changed. This can happen when we
are updating the value but the widget is actually inactive - don't
do anything in that case."""
if len(text) == 0 or not self._ui.nearEdit.isEnabled():
return
try:
self._viewer.overrideNear = float(text)
except ValueError:
pass
def _farChanged(self, text):
"""Called when the Far text box changed. This can happen when we
are updating the value but he widget is actually inactive - don't
do anything in that case."""
if len(text) == 0 or not self._ui.farEdit.isEnabled():
return
try:
self._viewer.overrideFar = float(text)
except ValueError:
pass
def _cleanUpAndClose(self, result):
self._refreshTimer.stop()
self._parent._ui.actionAdjust_Clipping.setChecked(False)
| 1.8125 | 2 |
myhvac_web/main.py | alanquillin/myhvac_web | 0 | 12758891 | from app import app
import routes
import rest
from myhvac_core import cfg
from myhvac_core.db import api as db
from myhvac_core import log
import logging
LOG = logging.getLogger(__name__)
opts = [
cfg.BoolOpt('debug', default=False,
help='Enables debug mode for the flask rest api'),
cfg.IntOpt('port', default=8081, help='Http port of the webserver')
]
CONF = cfg.CONF
CONF.register_opts(opts, 'rest_api')
CONF = cfg.CONF
def init():
try:
CONF(project='myhvac_service')
except cfg.RequiredOptError:
CONF.print_help()
raise SystemExit(1)
log.init_log()
db.init_db()
if __name__ == '__main__':
init()
app.run()
| 2.34375 | 2 |
adv/vanessa.py.void.py | hcc123915/dl | 0 | 12758892 | import adv_test
import adv
import vanessa
def module():
return Vanessa
class Vanessa(vanessa.Vanessa):
comment = 'void weapon vs HMS'
def pre(this):
this.conf['str_w'] = 1.5*380
this.conf['mod_w'] = ('att','killer',0.2)
if this.condition('last offense'):
this.o_init = this.init
this.init = this.c_init
def init(this):
this.charge_p('prep','50%')
def c_init(this):
this.o_init()
adv.Selfbuff('last_offense',0.3,15).on()
if __name__ == '__main__':
conf = {}
conf['acl'] = """
`s1
`s2
`fs,seq=5
"""
adv_test.test(module(), conf, verbose=0)
| 2.484375 | 2 |
lib/RV_mod/signal_data.py | Simske/exostriker | 69 | 12758893 | <reponame>Simske/exostriker<filename>lib/RV_mod/signal_data.py
#!/usr/bin/python
__author__ = '<NAME>, <NAME>'
#import sys
#sys.path.insert(0, '../lib')
import numpy as np
import gls as gls
from functions import *
from errors import Error, InputError, FittingError
from Warning_log import Warning_log
# class for signal to run gls and plotting functions on (it can be both the entire signal and O-C, depends what we pass as rvs in the __ini__ function)
# This class must completely refurbished!
class signal_data(object):
def __init__(self,jd,rvs,rv_error, sig_for_gls=np.array([0.1,0.01,0.001])):
self.jd=np.array(list(map(float,jd))) # for some reason sometimes we get strings instead of floats, so...
self.rvs=np.array(list(map(float,rvs)))
self.rv_error=np.array(list(map(float,rv_error)))
self.sig=sig_for_gls # list of significances for gls, so the algorithm will do the computation for all three of them and include relevant levels in the z array outputed by lomb_scargle
self.sig=np.array(self.sig)
# It is convenient to store significance levels in decreasing order, so let's sort them
sort = convert_array_to_int(np.array(sorted(range(len(self.sig)), key=lambda k: self.sig[k], reverse=True))) # find the permutation in which they are sorted in decreasing order
# sorting based on the permutation sort calculated above
self.sig = self.sig[sort]
def gls(self, ind_sig=2,sig_for_gls=np.array([0.1,0.01,0.001])): # ind_sig will be the index of the significance level which we assume to be sufficient to declare a planet candidate
gls_warnings=Warning_log([],'Running gls')
if not (is_int(str(ind_sig),bounded=[True,True],bounds=[0,len(self.sig)],equal=[True,False])):
ind_sig=len(self.sig)-1
gls_warnings.update_warning_list('ind_sig for gls must be a non-negative integer smaller than the size of sig_for_gls array! Default value of len(sig)-1 will be assumed.')
### Compute the Lomb-Scargle Periodogram
try:
#self.gls_range = (float(max(self.jd))-float(min(self.jd)))*2 # range for gls, twice the time range
#self.periods = np.linspace(1, self.gls_range, 2000) # abscissas for the period range
#omega = TAU / self.periods # converting the periods array into the frequency range for the Lomb-Scargle Periodogram evaluation
# omega = 1 / self.periods # converting the periods array into the frequency range for the Lomb-Scargle Periodogram evaluation
#omega = 1/ np.logspace(-0.05, 4, num=1000)
omega = 1/ np.logspace(-0.05, 4, num=1000)
RV_gls = gls.Gls((self.jd, self.rvs, self.rv_error), fast=True, verbose=False, norm= "ZK",ofac=5, fbeg=omega[999], fend=omega[ 0],)
#self.P_G, self.z = lomb_scargle(self.jd, self.rvs, self.rv_error, omega, generalized=True, significance=self.sig) # Lomb-Scargle for the RV signal
self.P_G = RV_gls.power
self.z = RV_gls.powerLevel(sig_for_gls)
self.periods = 1/RV_gls.freq
self.gls_range = (float(max(self.jd))-float(min(self.jd)))*2 # range for gls, twice the time range
per_ind = argrelextrema(self.P_G, np.greater) # generates an array with the indices of all the relative extrema in the periodogram
self.best_per = self.periods[per_ind] # periods corresponding to the indices calculated above
self.best_peaks = self.P_G[per_ind] # peak heights of these extrema
if (len(self.best_peaks)>0): # don't sort if there's no peaks
sort = convert_array_to_int(np.array(sorted(range(len(self.best_peaks)), key=lambda k: self.best_peaks[k], reverse=True))) # find the permutation of the peaks in which they are sorted by height in decreasing order
# sorting both arrays (periods and peak heights) by peak height, based on the permutation sort calculated above
self.best_peaks = self.best_peaks[sort]
self.best_per = self.best_per[sort]
# now we save the number of peaks which exceed the significance requirement
for i in range(len(self.best_peaks)):
if not (self.best_peaks[i]>self.z[ind_sig]): # z[ind_sig] will be the minimum height of a peak according to the chosen significance level
break # since best_peaks and best_per are already sorted, if we break at this point i will be the index of the first peak which is too low, and the rest is then too low also
self.number_of_significant_peaks=i
else:
self.number_of_significant_peaks=0
except (RuntimeError, ValueError, TypeError):
gls_warnings.update_warning_list('Failed to conduct gls, assuming no peaks')
self.number_of_significant_peaks=0
self.P_G=[]
self.z=[]
self.best_per=[]
self.best_peaks=[]
############### Sorting the best peaks ##########################
gls_warnings.print_warning_log()
return
| 2.671875 | 3 |
gcp_variant_transforms/transforms/merge_header_definitions_test.py | tsa87/gcp-variant-transforms | 113 | 12758894 | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test cases for merge_header_definitions module."""
import unittest
from pysam import libcbcf
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms import Create
from gcp_variant_transforms.beam_io import vcf_header_io
from gcp_variant_transforms.transforms import merge_header_definitions
from gcp_variant_transforms.libs.vcf_header_definitions_merger import Definition
from gcp_variant_transforms.libs.vcf_header_definitions_merger import VcfHeaderDefinitions
class MergeHeadersTest(unittest.TestCase):
def _get_header_from_lines(self, lines, file_path):
header = libcbcf.VariantHeader()
for line in lines[:-1]:
header.add_line(line)
return vcf_header_io.VcfHeader(infos=header.info,
filters=header.filters,
alts=header.alts,
formats=header.formats,
contigs=header.contigs,
file_path=file_path)
def test_merge_header_definitions_one_header(self):
lines = [
'##INFO=<ID=NS,Number=1,Type=Integer,Description="Number samples">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample1 Sample2\n'
]
headers = self._get_header_from_lines(lines, 'file1')
pipeline = TestPipeline()
merged_definitions = (
pipeline
| Create([headers])
| 'MergeDefinitions' >> merge_header_definitions.MergeDefinitions())
expected = VcfHeaderDefinitions()
expected._infos = {'NS': {Definition(1, 'Integer'): ['file1']}}
assert_that(merged_definitions, equal_to([expected]))
pipeline.run()
def test_merge_header_definitions_two_conflicting_headers(self):
lines_1 = [
'##INFO=<ID=NS,Number=1,Type=Integer,Description="Number samples">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample1 Sample2\n'
]
lines_2 = [
'##INFO=<ID=NS,Number=1,Type=Float,Description="Number samples">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample3\n'
]
headers_1 = self._get_header_from_lines(lines_1, 'file1')
headers_2 = self._get_header_from_lines(lines_2, 'file2')
pipeline = TestPipeline()
merged_definitions = (
pipeline
| Create([headers_1, headers_2])
| 'MergeDefinitions' >> merge_header_definitions.MergeDefinitions())
expected = VcfHeaderDefinitions()
expected._infos = {'NS': {Definition(1, 'Integer'): ['file1'],
Definition(1, 'Float'): ['file2']}}
assert_that(merged_definitions, equal_to([expected]))
pipeline.run()
def test_merge_header_definitions_no_conflicting_headers(self):
lines_1 = [
'##FORMAT=<ID=NS,Number=1,Type=Float,Description="Number samples">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample1 Sample2\n'
]
lines_2 = [
'##FORMAT=<ID=DP,Number=2,Type=Float,Description="Total Depth">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample3\n'
]
headers_1 = self._get_header_from_lines(lines_1, 'file1')
headers_2 = self._get_header_from_lines(lines_2, 'file2')
pipeline = TestPipeline()
merged_definitions = (
pipeline
| Create([headers_1, headers_2])
| 'MergeDefinitions' >> merge_header_definitions.MergeDefinitions())
expected = VcfHeaderDefinitions()
expected._formats = {'NS': {Definition(1, 'Float'): ['file1']},
'DP': {Definition(2, 'Float'): ['file2']}}
assert_that(merged_definitions, equal_to([expected]))
pipeline.run()
def test_merge_header_definitions_same_id_in_info_and_format_headers(self):
lines_1 = [
'##INFO=<ID=NS,Number=1,Type=Integer,Description="Number samples">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample1 Sample2\n'
]
lines_2 = [
'##FORMAT=<ID=NS,Number=1,Type=Float,Description="Number samples">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample3\n'
]
headers_1 = self._get_header_from_lines(lines_1, 'file1')
headers_2 = self._get_header_from_lines(lines_2, 'file2')
pipeline = TestPipeline()
merged_definitions = (
pipeline
| Create([headers_1, headers_2])
| 'MergeDefinitions' >> merge_header_definitions.MergeDefinitions())
expected = VcfHeaderDefinitions()
expected._infos = {'NS': {Definition(1, 'Integer'): ['file1']}}
expected._formats = {'NS': {Definition(1, 'Float'): ['file2']}}
assert_that(merged_definitions, equal_to([expected]))
pipeline.run()
def test_merge_header_definitions_save_five_copies(self):
lines_1 = [
'##INFO=<ID=NS,Number=1,Type=Float,Description="Number samples">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample1 Sample2\n'
]
lines_2 = [
'##INFO=<ID=NS,Number=1,Type=Integer,Description="Number samples">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample3\n'
]
file_names = ['file1', 'file2', 'file3', 'file4', 'file5', 'file6']
headers = []
for file_name in file_names:
headers.append(self._get_header_from_lines(lines_1, file_name))
headers.append(self._get_header_from_lines(lines_2, 'file7'))
pipeline = TestPipeline()
merged_definitions = (
pipeline
| Create(headers, reshuffle=False)
| 'MergeDefinitions' >> merge_header_definitions.MergeDefinitions())
expected = VcfHeaderDefinitions()
expected._infos = {
'NS': {Definition(1, 'Float'):
['file1', 'file2', 'file3', 'file4', 'file5'],
Definition(1, 'Integer'): ['file7']}}
assert_that(merged_definitions, equal_to([expected]))
pipeline.run()
| 2.0625 | 2 |
optimization/Profit_Calculator.py | SankaW/teamfx | 0 | 12758895 | <filename>optimization/Profit_Calculator.py<gh_stars>0
from random import *
from . import Application
from pandas.io.common import file_path_to_url
from tabulate import tabulate
import pandas as pd
import numpy as np
def fitness(individual,strategy):
print("Calculaing Profit: ", individual)
stoplossValue = 0.00000
takeprofitValue = 0.00000
stoploss_pip = 0.00000
takeprofit_pip = 0.00000
if (strategy == "Moving Average"):
stoploss_pip = individual[2]
takeprofit_pip = individual[3]
elif (strategy == "Fuzzy Moving Average"):
stoploss_pip = individual[2]
takeprofit_pip = individual[3]
elif (strategy == "Bollinger Band"):
stoploss_pip = individual[2]
takeprofit_pip = individual[3]
elif (strategy == "MACD"):
stoploss_pip = individual[3]
takeprofit_pip = individual[4]
elif (strategy == "Stochastic"):
stoploss_pip = individual[4]
takeprofit_pip = individual[5]
elif (strategy == "RSI"):
stoploss_pip = individual[2]
takeprofit_pip = individual[3]
inv_return = Application.optimize(individual,strategy)
#inv_return.to_csv('without.csv', encoding='utf-8')
#print(inv_return)
### Filter Regions ###
inv_return = inv_return[(inv_return["signal"] == 1) | (inv_return["positions"] == -1)]
#print("output inv_return_filter", inv_return)
#inv_return.to_csv('filtered.csv', encoding='utf-8')
#print(inv_return)
print(" ")
#print(inv_return)
#inv_return.to_csv('without.csv', encoding='utf-8')
#list = inv_return['close'].tolist()
#se = pd.Series(list)
#inv_return['stoploss'] = inv_return['close'] - stoploss_pip / 10000
inv_return["group"] = (inv_return.positions == -1).shift(1).fillna(0).cumsum()
stoploss = []
takeprofit = []
check = []
group = -1
for index, row in inv_return.iterrows():
if(row['group'] == group):
stoploss.append(stoplossValue)
takeprofit.append(takeprofitValue)
if (row['close'] < stoplossValue):
check.append('stoploss')
elif (row['close'] > takeprofitValue):
check.append('takeprofit')
else:
check.append('null')
else:
group = row['group']
stoplossValue = row['close'] - stoploss_pip / 10000
takeprofitValue = row['close'] + takeprofit_pip / 10000
stoploss.append(stoplossValue)
takeprofit.append(takeprofitValue)
check.append('null')
inv_return['stoploss'] = stoploss
inv_return['takeprofit'] = takeprofit
inv_return['check'] = check
#print(inv_return)
returns = []
total_profit = 0
total_loss =0
for index, row in inv_return.iterrows():
if(row['check'] == 'stoploss'):
returns.append(0)
elif (row['check'] == 'takeprofit'):
returns.append(0)
else:
return_value = row['returns']
returns.append(return_value)
if (return_value>0):
total_profit += return_value
else:
total_loss -= return_value
total = []
total = [sum(returns[0:x + 1]) for x in range(0, len(returns))]
inv_return['returns'] = returns
inv_return['total'] = total
#print(inv_return.iloc[-1].tolist()[5])
#inv_return.to_csv('finalresult.csv', encoding='utf-8')
"""from subprocess import Popen
p = Popen('finalresult.csv', shell=True)"""
#inv_return.to_csv('without2.csv', encoding='utf-8')
#profit = inv_return.iloc[-1].tolist()[5]
#print("profit : ",profit)
max_drawdown = (inv_return['returns'].max())
# print(max_drawdown)
min_drawdown = (inv_return['returns'].min())
# print(min_drawdown)
if (max_drawdown != 0):
risk_factor = abs((max_drawdown - min_drawdown) / max_drawdown)
else:
risk_factor = 0
if (total_loss != 0):
profit_factor = abs(total_profit / total_loss)
else:
profit_factor = 1
fitness = profit_factor
# fitness = risk_factor + profit_factor
if (np.isnan(fitness)):
fitness = 0
# print("aaaaaaaaa")
# print(max_drawdown,min_drawdown,risk_factor,total_profit,total_loss,profit_factor,individual)
print("fitness", fitness)
return fitness
#fitness([44, 140, 0, 200]) | 2.984375 | 3 |
algorithms/quick_sort.py | mserevko/sorting_algorithms | 0 | 12758896 | """
Quick Sort is one of the most efficient sorting algorithms.
It is based on the splitting of the input list into smaller lists.
Quick Sort works better with smaller data sets in comparison to merge sort.
"""
import random
rand_list = [random.randint(1, 100) for i in range(0,8)]
def swap(arr: list, i: int, k: int) -> None:
"""
:param arr: input array
:type arr: list
:param i: first element to swap in list
:type i: int
:param k: second element to swap in list
:type k: int
:return: None
"""
arr[i], arr[k] = arr[k], arr[i]
| 4.28125 | 4 |
doubanTop.py | Cvencent/python_crawl | 0 | 12758897 | import json
import requests
from requests.exceptions import RequestException
import re
def get_one_page(url,**headers):
try:
response = requests.get(url,headers = headers)
if response.status_code == 200:
return response.text
return None
except RequestException:
return 'Exception'
def parse_one_page(html):
pattern = re.compile('<div class="hd".*?href="(.*?)".*?"title">(.*?)</span>.*?"bd">.*?<p class="">(.*?)</p>.*?"star">.*?"v:average">(.*?)</span>.*?inq">(.*?)</span>', re.S)
items = re.findall(pattern, html)
for item in items:
yield {
'url':item[0],
'name':item[1],
'actor':re.sub(' |...<br>\\n', '', item[2].strip()),
'motor':item[3]
}
def write_to_file(content):
with open('doubanTop250.txt','a',encoding='utf-8') as f:
f.write(json.dumps(content,ensure_ascii=False) + '\n' )
def main(num):
url = 'https://movie.douban.com/top250?start='+str(num)+'&filter='
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'}
html = get_one_page(url,**headers)
for item in parse_one_page(html):
print (item)
write_to_file(item)
if __name__ == '__main__':
for i in range(0,225,25):
main(i) | 3.140625 | 3 |
pypipegraph/job.py | bopopescu/pypipegraph-2 | 0 | 12758898 | <filename>pypipegraph/job.py
from __future__ import print_function
"""
"""
License = """
The MIT License (MIT)
Copyright (c) 2012, <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from . import ppg_exceptions
from . import util
import pathlib
logger = util.start_logging("job")
import re
try:
import cStringIO
io = cStringIO
except ImportError:
import io
import os
import stat
from . import util
import sys
import dis
import shutil
import hashlib
try:
import cPickle
pickle = cPickle
except ImportError:
import pickle
import traceback
import platform
import time
import six
is_pypy = platform.python_implementation() == "PyPy"
module_type = type(sys)
checksum_file = util.checksum_file
register_tags = False
class JobList(object):
"""For when you want to return a list of jobs that mostly behaves like a single Job.
(Ie. when it must have a depends_on() method. Otherwise, a regular list will do fine).
"""
def __init__(self, jobs):
jobs = list(jobs)
for job in jobs:
if not isinstance(job, Job):
raise ppg_exceptions.ValueError("%s was not a job object" % job)
self.jobs = set(jobs)
def __iter__(self):
for job in self.jobs:
yield job
def __add__(self, other_job):
if isinstance(other_job, list):
other_job = JobList(other_job)
def iter():
for job in self.jobs:
yield job
if isinstance(other_job, Job):
yield other_job
else:
for job in other_job:
yield job
return JobList(iter())
def __len__(self):
return len(self.jobs)
def depends_on(self, other_job):
for job in self.jobs:
job.depends_on(other_job)
def __str__(self):
return "JobList of %i jobs: %s" % (
len(self),
", ".join(str(x) for x in self.jobs),
)
class Job(object):
"""Base class for all Jobs - never instanciated itself.
This class also provides the pipegraph-lifetime singletonizing of Jobs - ie.
jobs with the same job_id (=name) will be the same object as long as no new pipegraph
is generated via new_pipegraph()
"""
def __new__(cls, job_id, *args, **kwargs):
"""Handles the singletonization on the job_id"""
# logger.info("New for %s %s" % (cls, job_id))
if not isinstance(job_id, str):
if isinstance(job_id, pathlib.Path):
job_id = str(job_id)
else:
raise ValueError(
"Job_id must be a string, was %s %s" % (job_id, type(job_id))
)
if job_id not in util.job_uniquifier:
util.job_uniquifier[job_id] = object.__new__(cls)
util.job_uniquifier[
job_id
].job_id = (
job_id
) # doing it later will fail because hash apperantly might be called before init has run?
else:
if util.job_uniquifier[job_id].__class__ != cls:
if args and hasattr(args[0], "__code__"):
x = (args[0].__code__.co_filename, args[0].__code__.co_firstlineno)
else:
x = ""
raise ppg_exceptions.JobContractError(
"Same job id, different job classes for %s - was %s and %s.\nOld job: %s\n My args: %s %s\n%s"
% (
job_id,
util.job_uniquifier[job_id].__class__,
cls,
str(util.job_uniquifier[job_id]),
args,
kwargs,
x,
)
)
if util.global_pipegraph is None:
raise ValueError(
"You must first instanciate a pypipegraph before creating jobs" ""
)
return util.job_uniquifier[job_id]
# def __getnewargs__(self):
# """Provides unpickeling support"""
# return (self.job_id, )
def __init__(self, job_id):
# logger.info("init for %s" % job_id)
if isinstance(job_id, pathlib.Path):
job_id = str(job_id)
if not hasattr(self, "dependants"): # test any of the following
# else: this job was inited before, and __new__ returned an existing instance
self.job_id = job_id
self.job_no = -1
self.cores_needed = 1
self.memory_needed = -1
self.dependants = set()
self.prerequisites = set()
self.failed = None
self.error_reason = "no error"
self.stdout = None
self.stderr = None
self.exception = None
self.was_run = False
self.was_done_on = set() # on which slave(s) was this job run?
self.was_loaded = False
self.was_invalidated = False
self.invalidation_count = (
0
) # used to save some time in graph.distribute_invariant_changes
self.was_cleaned_up = False
self.always_runs = False
self.start_time = None
self.stop_time = None
self.is_final_job = False
self.do_cleanup_if_was_never_run = False
self.invariant_cache = None
self.is_temp_job = False
self._is_done = None
self.do_cache = False
# logger.info("adding self %s to %s" % (job_id, id(util.global_pipegraph)))
util.global_pipegraph.add_job(util.job_uniquifier[job_id])
def depends_on(self, job_joblist_or_list_of_jobs):
"""Declare that this job depends on the ones passed in (which must be Jobs, JobLists or iterables of tsuch).
This means that this job can only run, if all previous ones have been done sucessfully.
"""
# if isinstance(job_joblist_or_list_of_jobs, Job):
# job_joblist_or_list_of_jobs = [job_joblist_or_list_of_jobs]
if job_joblist_or_list_of_jobs is self:
raise ppg_exceptions.CycleError(
"job.depends_on(self) would create a cycle: %s" % (self.job_id)
)
for job in job_joblist_or_list_of_jobs:
if not isinstance(job, Job):
if hasattr(job, "__iter__") and not isinstance(
job, str
): # a nested list
self.depends_on(job)
pass
else:
raise ValueError(
"Can only depend on Job objects, was: %s" % type(job)
)
else:
if self in job.prerequisites:
raise ppg_exceptions.CycleError(
"Cycle adding %s to %s" % (self.job_id, job.job_id)
)
if isinstance(job, FinalJob):
raise ppg_exceptions.JobContractError(
"No jobs can depend on FinalJobs"
)
for job in job_joblist_or_list_of_jobs:
if isinstance(
job, Job
): # skip the lists here, they will be delegated to further calls during the checking...
self.prerequisites.add(job)
return self
def is_in_dependency_chain(self, other_job, max_depth):
"""check wether the other job is in this job's dependency chain.
We check at most @max_depth levels, starting with this job (ie.
max_depth = 2 means this job and it's children).
Use a -1 for 'unlimited' (up to the maximum recursion depth of python ;))
"""
if max_depth == 0:
return False
if other_job in self.prerequisites:
return True
else:
for preq in self.prerequisites:
if preq.is_in_dependency_chain(other_job, max_depth - 1):
return True
return False
def ignore_code_changes(self):
"""Tell the job not to autogenerate a FunctionInvariant for it's callback(s)"""
raise ValueError("This job does not support ignore_code_changes")
def inject_auto_invariants(self):
"""Create the automagically generated FunctionInvariants if applicable"""
pass
def get_invariant(self, old, all_invariant_stati):
"""Retrieve the invariant 'magic cookie' we should store for this Job.
The job (and it's descendands) will be invalidated if you return anything
but @old. You may escape this and raise a NothingChanged(new_value) exception,
then the new_value will be stored, but no invalidation will occur.
(Example: FileChecksumInvariant jobs test the filetime first. Only if that differs,
they check the checksum. If that stayed the same, raising NothingChanged((new_filentime, checksum))
allows us to not check the file again next time
Invariant return values are cached by this function, please overwrite
_get_invariant(old) in subclasses.
"""
if self.invariant_cache is None or self.invariant_cache[0] != old:
self.invariant_cache = (old, self._get_invariant(old, all_invariant_stati))
return self.invariant_cache[1]
def _get_invariant(self, old, all_invariant_stati):
"""The actual workhorse/sub class specific function for get_invariant
"""
return False
def is_done(self, depth=0):
if not self.do_cache or self._is_done is None:
# logger.info("recalc is_done %s" % self)
self._is_done = self.calc_is_done(depth)
# logger.info("called %s.is_done - result %s" % (self, self._is_done))
return self._is_done
def calc_is_done(self, depth=0):
"""Is this Job done ( ie. does it need to be in the execution order)"""
return True
def _reset_is_done_cache(self):
self._is_done = None
for child in self.dependants:
child._reset_is_done_cache()
def is_loadable(self):
"""Is this a job that's modifies the in memory data of our process?"""
return False
def load(self):
"""Actually modify the in memory data"""
if not self.is_loadable():
raise ValueError("Called load() on a job that was not loadable")
raise ValueError(
"Called load() on a j'ob that had is_loadable, but did not overwrite load() as it should"
)
def runs_in_slave(self):
"""Is this a job that runs in our slave, ie. in a spawned job"""
return True
def modifies_jobgraph(self):
"""Is this a job that can modify the jobgraph at runtime?
"""
return False
def invalidated(self, reason=""):
"""This job was invalidated - throw away any existing output for recalculation"""
logger.info("%s invalidated called, reason: %s" % (self, reason))
self.was_invalidated = True
self.distribute_invalidation()
def distribute_invalidation(self):
"""Depth first descend to pass invalidated into all Jobs that dependend on this one"""
for dep in self.dependants:
if not dep.was_invalidated:
dep.invalidated(reason="preq invalidated %s" % self)
def can_run_now(self):
"""Can this job run right now?
"""
# logger.info("can_run_now %s" % self)
for preq in self.prerequisites:
# logger.info("checking preq %s" % preq)
if preq.is_done():
if preq.was_invalidated and not preq.was_run and not preq.is_loadable():
# was_run is necessary, a filegen job might have already created the file (and written a bit to it), but that does not mean that it's done enough to start the next one. Was_run means it has returned.
# On the other hand, it might have been a job that didn't need to run, then was_invalidated should be false.
# or it was a loadable job anyhow, then it doesn't matter.
# logger.info("case 1 - false %s" % preq)
return False # false means no way
else: # pragma: no cover
# logger.info("case 2 - delay") #but we still need to try the other preqs if it was ok
pass
else:
# logger.info("case 3 - not done")
return False
# logger.info("case 4 - true")
return True
def list_blocks(self): # pragma: no cover
"""A helper to list what blocked this job from running - debug function"""
res = []
for preq in self.prerequisites:
if preq.is_done():
if (
preq.was_invalidated and not preq.was_run and not preq.is_loadable()
): # see can_run_now for why
res.append((str(preq), "not run"))
else:
# logger.info("case 2 - delay") #but we still need to try the other preqs if it was ok
pass
else:
# logger.info("case 3 - not done")
if preq.was_run:
if preq.was_cleaned_up:
res.append(
(str(preq), "not done - but was run! - after cleanup")
)
else:
res.append((str(preq), "not done - but was run! - no cleanup"))
else:
res.append((str(preq), "not done"))
break
# return False
return res
def run(self): # pragma: no cover
"""Do the actual work"""
pass
def check_prerequisites_for_cleanup(self):
"""If for one of our prerequisites, all dependands have run, we can
call it's cleanup function (unload data, remove tempfile...)
"""
for preq in self.prerequisites:
logger.info("check_prerequisites_for_cleanup %s" % preq)
all_done = True
for dep in preq.dependants:
logger.info(
"checking %s, failed %s, was_run: %s"
% (dep, dep.failed, dep.was_run)
)
if dep.failed or (not dep.was_run) or not preq.is_done():
all_done = False
break
if all_done:
logger.info("Calling %s cleanup" % preq)
preq.cleanup()
preq.was_cleaned_up = True
def cleanup(self):
"""Cleanup after all your direct dependands have finished running"""
pass
def __eq__(self, other):
"""Jobs are only equal if they are the same object"""
return other is self
def __hash__(self):
"""We can simply hash on our job_id"""
return hash(self.job_id)
def __add__(self, other_job):
"""Creates JobLists from two jobs
"""
def iter():
yield self
for job in other_job:
yield job
return JobList(iter())
def __iter__(self):
yield self
def __str__(self):
if hasattr(self, "callback"):
return "%s (job_id=%s,id=%s\n Callback: %s:%s)" % (
self.__class__.__name__,
self.job_id,
id(self),
self.callback.__code__.co_filename,
self.callback.__code__.co_firstlineno,
)
else:
return "%s (job_id=%s,id=%s)" % (
self.__class__.__name__,
self.job_id,
id(self),
)
class _InvariantJob(Job):
"""common code for all invariant jobs"""
def depends_on(self, job_joblist_or_list_of_jobs):
raise ppg_exceptions.JobContractError("Invariants can't have dependencies")
def runs_in_slave(self):
return False
def get_cython_filename_and_line_no(cython_func):
first_doc_line = cython_func.__doc__.split("\n")[0]
if not first_doc_line.startswith("File:"):
raise ValueError(
"No file/line information in doc string. Make sure your cython is compiled with -p (or #embed_pos_in_docstring=True atop your pyx"
)
line_no = int(
first_doc_line[
first_doc_line.find("starting at line ")
+ len("starting at line ") : first_doc_line.find(")")
]
)
# find the right module
module_name = cython_func.im_class.__module__
found = False
for name in sorted(sys.modules):
if name == module_name or name.endswith("." + module_name):
try:
if (
getattr(sys.modules[name], cython_func.im_class.__name__)
== cython_func.im_class
):
found = sys.modules[name]
break
except AttributeError:
continue
elif hasattr(sys.modules[name], module_name):
sub_module = getattr(sys.modules[name], module_name)
try:
if (
getattr(sub_module, cython_func.im_class.__name__)
== cython_func.im_class
):
found = sys.moduls[name].sub_module
break
except AttributeError:
continue
if not found:
raise ValueError("Could not find module for %s" % cython_func)
filename = found.__file__.replace(".so", ".pyx").replace(
".pyc", ".py"
) # pyc replacement is for mock testing
return filename, line_no
def function_to_str(func):
if str(func).startswith("<built-in function"):
return "%s" % func
elif hasattr(func, "im_func") and (
"cyfunction" in repr(func.im_func)
or ("<built-in function" in repr(func.im_func))
):
return "%s %i" % get_cython_filename_and_line_no(func)
else:
return "%s %i" % (
func.__code__.co_filename if func else "None",
func.__code__.co_firstlineno if func else 0,
)
class FunctionInvariant(_InvariantJob):
"""FunctionInvariant detects (bytecode) changes in a python function,
currently via disassembly"""
def __init__(self, job_id, function):
if not hasattr(function, "__call__") and function is not None:
raise ValueError("%s function was not a callable (or None)" % job_id)
Job.__init__(self, job_id)
if hasattr(self, "function") and function != self.function:
raise ppg_exceptions.JobContractError(
"FunctionInvariant %s created twice with different functions: \n%s\n%s"
% (job_id, function_to_str(function), function_to_str(self.function))
)
self.function = function
def __str__(self):
if (
hasattr(self, "function")
and self.function
and hasattr(self.function, "__code__")
): # during creating, __str__ migth be called by a debug function before function is set...
return "%s (job_id=%s,id=%s\n Function: %s:%s)" % (
self.__class__.__name__,
self.job_id,
id(self),
self.function.__code__.co_filename,
self.function.__code__.co_firstlineno,
)
elif hasattr(self, "function") and str(self.function).startswith(
"<built-in function"
):
return "%s (job_id=%s,id=%s, Function: %s)" % (
self.__class__.__name__,
self.job_id,
id(self),
self.function,
)
else:
return "%s (job_id=%s,id=%s, Function: None)" % (
self.__class__.__name__,
self.job_id,
id(self),
)
def _get_invariant(self, old, all_invariant_stati):
if self.function is None:
return (
None
) # since the 'default invariant' is False, this will still read 'invalidated the first time it's being used'
if not hasattr(self.function, "__code__"):
if str(self.function).startswith("<built-in function"):
return str(self.function)
elif hasattr(self.function, "im_func") and (
"cyfunction" in repr(self.function.im_func)
or repr(self.function.im_func).startswith("<built-in function")
):
return self.get_cython_source(self.function)
else:
print(repr(self.function))
print(repr(self.function.im_func))
raise ValueError("Can't handle this object %s" % self.function)
try:
closure = self.function.func_closure
except AttributeError:
closure = self.function.__closure__
key = (id(self.function.__code__), id(closure))
if key not in util.func_hashes:
if hasattr(self.function, "im_func") and "cyfunction" in repr(
self.function.im_func
):
invariant = self.get_cython_source(self.function)
else:
invariant = self.dis_code(self.function.__code__, self.function)
if closure:
for name, cell in zip(self.function.__code__.co_freevars, closure):
# we ignore references to self - in that use case you're expected to make your own ParameterInvariants, and we could not detect self.parameter anyhow (only self would be bound)
# we also ignore bound functions - their address changes all the time. IDEA: Make this recursive (might get to be too expensive)
try:
if (
name != "self"
and not hasattr(cell.cell_contents, "__code__")
and not isinstance(cell.cell_contents, module_type)
):
if isinstance(cell.cell_contents, dict):
x = repr(sorted(list(cell.cell_contents.items())))
elif isinstance(cell.cell_contents, set) or isinstance(
cell.cell_contents, frozenset
):
x = repr(sorted(list(cell.cell_contents)))
else:
x = repr(cell.cell_contents)
if (
"at 0x" in x
): # if you don't have a sensible str(), we'll default to the class path. This takes things like <chipseq.quality_control.AlignedLaneQualityControl at 0x73246234>.
x = x[: x.find("at 0x")]
if "id=" in x:
print(x)
raise ValueError("Still an issue")
invariant += "\n" + x
except ValueError as e:
if str(e) == "Cell is empty":
pass
else:
raise
util.func_hashes[id(self.function.__code__)] = invariant
return util.func_hashes[id(self.function.__code__)]
inner_code_object_re = re.compile(
r"(<code\sobject\s<?[^>]+>?\sat\s0x[a-f0-9]+[^>]+)"
+ "|"
+ "(<code\tobject\t<[^>]+>,\tfile\t'[^']+',\tline\t[0-9]+)" # that's the cpython way # that's how they look like in pypy. More sensibly, actually
)
def dis_code(self, code, function):
"""'dissassemble' python code.
Strips lambdas (they change address every execution otherwise)"""
# TODO: replace with bytecode based smarter variant
out = io.StringIO()
old_stdout = sys.stdout
try:
sys.stdout = out
dis.dis(code)
finally:
sys.stdout = old_stdout
discode = out.getvalue().split("\n")
# now, eat of the line nos, if there are any
res = []
for row in discode:
row = row.split()
res.append("\t".join(row[1:]))
res = "\n".join(res)
res = self.inner_code_object_re.sub("lambda", res)
if function and hasattr(function, "__qualname__"):
res = res.replace(function.__qualname__, "<func name ommited>")
for ii, constant in enumerate(code.co_consts):
if hasattr(constant, "co_code"):
res += "inner no %i" % ii
res += self.dis_code(constant, None)
return res
def get_cython_source(self, cython_func):
"""Attemp to get the cython source for a function.
Requires cython code to be compiled with -p or #embed_pos_in_docstring=True in the source file
Unfortunatly, finding the right module (to get an absolute file path) is not straight forward,
we inspect all modules in sys.module, and their children, but we might be missing sub-sublevel modules,
in which case we'll need to increase search depth
"""
# check there's actually the file and line no documentation
filename, line_no = get_cython_filename_and_line_no(cython_func)
# load the source code
op = open(filename, "rb")
d = op.read().decode("utf-8").split("\n")
op.close()
# extract the function at hand, minus doc string
remaining_lines = d[line_no:]
first_line = remaining_lines[0]
first_line_indent = len(first_line) - len(first_line.lstrip())
starts_with_double_quote = first_line.strip().startswith('"""')
starts_with_single_quote = first_line.strip().startswith("'''")
if starts_with_single_quote or starts_with_double_quote: # there is a docstring
text = "\n".join(remaining_lines).strip()
text = text[3:] # cut of initial ###
if starts_with_single_quote:
text = text[text.find("'''") + 3 :]
else:
text = text[text.find('"""') + 3 :]
remaining_lines = text.split("\n")
last_line = len(remaining_lines)
for ii, line in enumerate(remaining_lines):
line_strip = line.strip()
if line_strip:
indent = len(line) - len(line_strip)
if indent < first_line_indent:
last_line = ii
break
return "\n".join(remaining_lines[:last_line]).strip()
class ParameterInvariant(_InvariantJob):
"""ParameterInvariants encapsulate smalling parameters, thresholds etc. that your work-jobs
depend on. They prefix their job_id with 'PI' so given
a = FileGeneratingJob("A")
you can simply say
a.depends_on(pypipegraph.ParameterInvariant('A', (my_threshold_value)))
In the special case that you need to extend a parameter, but the (new) default is the old behaviour,
so no recalc is necessary, you can pass @accept_as_unchanged_func
accept_as_unchanged_func will be called with the invariant from the last run,
and you need to return True if you want to accept it.
"""
def __new__(cls, job_id, *parameters, **kwargs):
job_id = "PI" + str(job_id)
return Job.__new__(cls, job_id)
def __init__(self, job_id, parameters, accept_as_unchanged_func=None):
job_id = "PI" + str(job_id)
self.parameters = parameters
self.accept_as_unchanged_func = accept_as_unchanged_func
Job.__init__(self, job_id)
def _get_invariant(self, old, all_invariant_stati):
if self.accept_as_unchanged_func is not None:
if self.accept_as_unchanged_func(old):
logger.info("Nothing Changed for %s" % self)
raise util.NothingChanged(self.parameters)
return self.parameters
class _FileChecksumInvariant(_InvariantJob):
"""Invalidates when the (md5) checksum of a file changed.
Checksum only get's recalculated if the file modification time changed.
"""
def __init__(self, filename):
Job.__init__(self, filename)
if len(self.job_id) < 3:
raise ValueError(
"This is probably not the filename you intend to use: {}".format(
filename
)
)
self.input_file = self.job_id
def _get_invariant(self, old, all_invariant_stati):
st = util.stat(self.input_file)
filetime = st[stat.ST_MTIME]
filesize = st[stat.ST_SIZE]
try:
if not old or old == filetime or old[1] != filesize or old[0] != filetime:
# print 'triggered checksum', self.input_file
# print 'old', old
# print 'new', filetime, filesize
chksum = self.checksum()
if old == filetime: # we converted from a filetimeinvariant
# print ('nothingchanged', self.job_id)
raise util.NothingChanged((filetime, filesize, chksum))
elif old and old[2] == chksum:
raise util.NothingChanged((filetime, filesize, chksum))
else:
# print ('returning new', self.job_id)
return filetime, filesize, chksum
else:
return old
except TypeError: # could not parse old tuple... possibly was an FileTimeInvariant before...
chksum = self.checksum()
# print ('type error', self.job_id)
return filetime, filesize, chksum
def checksum(self):
md5_file = self.input_file + ".md5sum"
if os.path.exists(md5_file):
st = util.stat(self.input_file)
st_md5 = util.stat(md5_file)
if st[stat.ST_MTIME] == st_md5[stat.ST_MTIME]:
with open(md5_file, "rb") as op:
return op.read()
else:
checksum = self._calc_checksum()
with open(md5_file, "wb") as op:
op.write(checksum.encode("utf-8"))
os.utime(md5_file, (st[stat.ST_MTIME], st[stat.ST_MTIME]))
return checksum
else:
return self._calc_checksum()
def _calc_checksum(self):
return checksum_file(self.job_id)
class RobustFileChecksumInvariant(_FileChecksumInvariant):
"""A file checksum invariant that is robust against file moves (but not against renames!"""
def _get_invariant(self, old, all_invariant_stati):
if (
old
): # if we have something stored, this acts like a normal FileChecksumInvariant
return _FileChecksumInvariant._get_invariant(self, old, all_invariant_stati)
else:
basename = os.path.basename(self.input_file)
st = util.stat(self.input_file)
filetime = st[stat.ST_MTIME]
filesize = st[stat.ST_SIZE]
checksum = self.checksum()
for job_id in all_invariant_stati:
if os.path.basename(job_id) == basename: # could be a moved file...
old = all_invariant_stati[job_id]
if isinstance(old, tuple):
if len(old) == 2:
old_filesize, old_chksum = old
else:
dummy_old_filetime, old_filesize, old_chksum = old
if old_filesize == filesize:
if (
old_chksum == checksum
): # don't check filetime, if the file has moved it will have changed
# print("checksum hit %s" % self.input_file)
raise util.NothingChanged(
(filetime, filesize, checksum)
)
# no suitable old job found.
return (filetime, filesize, checksum)
FileChecksumInvariant = RobustFileChecksumInvariant
FileTimeInvariant = RobustFileChecksumInvariant
class MultiFileInvariant(Job):
"""A (robust) FileChecksumInvariant that depends
on a list of files.
Triggers when files are added or removed,
or one of the files changes.
"""
def __new__(cls, filenames, *args, **kwargs):
if isinstance(filenames, str):
raise ValueError(
"Filenames must be a list (or at least an iterable), not a single string"
)
if not hasattr(filenames, "__iter__"):
raise TypeError("filenames was not iterable")
job_id = "_MFC_" + ":".join(sorted(str(x) for x in filenames))
return Job.__new__(cls, job_id)
def __getnewargs__(self): # so that unpickling works
return (self.filenames,)
def __init__(self, filenames):
sorted_filenames = list(sorted(str(x) for x in filenames))
for x in sorted_filenames:
if not isinstance(x, six.string_types):
raise ValueError(
"Not all filenames passed to MultiFileGeneratingJob were string objects"
)
job_id = "_MFC_" + ":".join(sorted_filenames)
Job.__init__(self, job_id)
self.filenames = sorted_filenames
if not self.filenames:
raise ValueError("filenames was empty!")
for fn in self.filenames:
if not os.path.exists(fn):
raise ValueError("File did not exist: %s" % fn)
def _get_invariant(self, old, all_invariant_stati):
if not old:
old = self.find_matching_renamed(all_invariant_stati)
checksums = self.calc_checksums(old)
if old is False:
raise util.NothingChanged(checksums)
elif old is None:
return checksums
else:
old_d = {x[0]: x[1:] for x in old}
checksums_d = {x[0]: x[1:] for x in checksums}
for fn in self.filenames:
if old_d[fn][2] != checksums_d[fn][2]: # checksum mismatch!
return checksums
raise util.NothingChanged(checksums)
def find_matching_renamed(self, all_invariant_stati):
def to_basenames(job_id):
fp = job_id[len("_MFC_") :].split(":")
return [os.path.basename(f) for f in fp]
def to_by_filename(job_id):
fp = job_id[len("_MFC_") :].split(":")
return {os.path.basename(f): f for f in fp}
my_basenames = to_basenames(self.job_id)
if len(my_basenames) != len(
set(my_basenames)
): # can't mach if the file names are not distinct.
return False
for job_id in all_invariant_stati:
if job_id.startswith("_MFC_"):
their_basenames = to_basenames(job_id)
if my_basenames == their_basenames:
mine_by_filename = to_by_filename(self.job_id)
old = all_invariant_stati[job_id]
new = []
if old is not False:
for tup in old:
fn = tup[0]
new_fn = mine_by_filename[os.path.basename(fn)]
new_tup = (new_fn,) + tup[1:]
new.append(new_tup)
return new
# ok, no perfect match - how about a subset?
for job_id in all_invariant_stati:
if job_id.startswith("_MFC_"):
their_basenames = to_basenames(job_id)
if (
len(set(their_basenames).difference(my_basenames)) == 0
) and their_basenames:
# less filenames, but otherwise same set...
return None
elif len(set(my_basenames).difference(their_basenames)) == 0:
return None
return False
def calc_checksums(self, old):
"""return a list of tuples
(filename, filetime, filesize, checksum)"""
result = []
if old:
old_d = {x[0]: x[1:] for x in old}
else:
old_d = {}
for fn in self.filenames:
st = os.stat(fn)
filetime = st[stat.ST_MTIME]
filesize = st[stat.ST_SIZE]
if (
fn in old_d
and (old_d[fn][0] == filetime)
and (old_d[fn][1] == filesize)
): # we can reuse the checksum
result.append((fn, filetime, filesize, old_d[fn][2]))
else:
result.append((fn, filetime, filesize, checksum_file(fn)))
return result
class FileGeneratingJob(Job):
"""Create a single output file of more than 0 bytes."""
def __init__(
self, output_filename, function, rename_broken=False, empty_file_allowed=False
):
"""If @rename_broken is set, any eventual outputfile that exists
when the job crashes will be renamed to output_filename + '.broken'
(overwriting whatever was there before)
"""
if isinstance(output_filename, pathlib.Path):
output_filename = str(output_filename)
if not hasattr(function, "__call__"):
raise ValueError("function was not a callable")
if (
output_filename in util.filename_collider_check
and util.filename_collider_check[output_filename] is not self
):
raise ValueError(
"Two jobs generating the same file: %s %s%"
% (self, util.filename_collider_check[output_filename])
)
else:
util.filename_collider_check[output_filename] = self
self.empty_file_allowed = empty_file_allowed
self.filenames = [
self.job_id
] # so the downstream can treat this one and MultiFileGeneratingJob identically
Job.__init__(self, output_filename)
self.callback = function
self.rename_broken = rename_broken
self.do_ignore_code_changes = False
self._is_done_cache = None
self._was_run = None
# the motivation for this chaching is that we do a lot of stat calls. Tens of thousands - and the answer can basically only change
# when you either run or invalidate the job. This apperantly cuts down about 9/10 of all stat calls
def get_was_run(self):
return self._was_run
def set_was_run(self, value):
self._was_run = value
self._is_done_cache = None
was_run = property(get_was_run, set_was_run)
def ignore_code_changes(self):
self.do_ignore_code_changes = True
def inject_auto_invariants(self):
if not self.do_ignore_code_changes:
# logger.info("Injecting outa invariants %s" % self)
self.depends_on(FunctionInvariant(self.job_id + "_func", self.callback))
else:
pass
# logger.info("not Injecting outa invariants %s" % self)
def calc_is_done(self, depth=0):
if self._is_done_cache is None:
if self.empty_file_allowed:
self._is_done_cache = util.file_exists(self.job_id)
else:
self._is_done_cache = util.output_file_exists(self.job_id)
return self._is_done_cache
def invalidated(self, reason=""):
try:
logger.info("unlinking %s" % self.job_id)
os.unlink(self.job_id)
self._is_done_cache = False
except OSError:
pass
Job.invalidated(self, reason)
def run(self):
try:
try:
self.callback()
except TypeError as e:
if "takes exactly 1 argument (0 given)" in str(
e
) or " missing 1 required positional argument:" in str( # python2
e
): # python3
self.callback(self.job_id)
else:
raise
except Exception:
exc_info = sys.exc_info()
tb = traceback.format_exc()
sys.stderr.write(tb)
try:
if self.rename_broken:
shutil.move(self.job_id, self.job_id + ".broken")
else:
logger.info("unlinking %s" % self.job_id)
os.unlink(self.job_id)
except (OSError, IOError):
pass
util.reraise(exc_info[1], None, exc_info[2])
if self.empty_file_allowed:
filecheck = util.file_exists
else:
filecheck = util.output_file_exists
if not filecheck(self.job_id):
raise ppg_exceptions.JobContractError(
"%s did not create its file %s %s"
% (
self,
self.callback.__code__.co_filename,
self.callback.__code__.co_firstlineno,
)
)
class MultiFileGeneratingJob(FileGeneratingJob):
"""Create multiple files - recreate all of them if at least one is missing.
"""
def __new__(cls, filenames, *args, **kwargs):
if isinstance(filenames, str):
raise ValueError(
"Filenames must be a list (or at least an iterable), not a single string"
)
if not hasattr(filenames, "__iter__"):
raise TypeError("filenames was not iterable")
for x in filenames:
if not (isinstance(x, six.string_types) or isinstance(x, pathlib.Path)):
raise ValueError("filenames must be a list of strings or pathlib.Path")
job_id = ":".join(sorted(str(x) for x in filenames))
return Job.__new__(cls, job_id)
def __getnewargs__(self): # so that unpickling works
return (self.filenames,)
def __init__(self, filenames, function, rename_broken=False, empty_files_ok=False):
"""If @rename_broken is set, any eventual outputfile that exists
when the job crashes will be renamed to output_filename + '.broken'
(overwriting whatever was there before)
"""
if not hasattr(function, "__call__"):
raise ValueError("function was not a callable")
sorted_filenames = list(
sorted(str(x) for x in filenames)
) # type checking in __new__
for x in sorted_filenames:
if (
x in util.filename_collider_check
and util.filename_collider_check[x] is not self
):
raise ValueError(
"Two jobs generating the same file: %s %s - %s"
% (self, util.filename_collider_check[x], x)
)
else:
util.filename_collider_check[x] = self
job_id = ":".join(sorted_filenames)
Job.__init__(self, job_id)
self.filenames = filenames
self.callback = function
self.rename_broken = rename_broken
self.do_ignore_code_changes = False
self.empty_files_ok = empty_files_ok
def calc_is_done(self, depth=0):
for fn in self.filenames:
if self.empty_files_ok:
if not util.file_exists(fn):
return False
else:
if not util.output_file_exists(fn):
return False
return True
def invalidated(self, reason=""):
for fn in self.filenames:
try:
logger.info("unlinking %s" % self.job_id)
os.unlink(fn)
except OSError:
pass
Job.invalidated(self, reason)
def run(self):
try:
self.callback()
except Exception:
exc_info = sys.exc_info()
if self.rename_broken:
for fn in self.filenames:
try:
shutil.move(fn, fn + ".broken")
except IOError:
pass
else:
for fn in self.filenames:
try:
logger.info("unlinking %s" % fn)
os.unlink(fn)
except OSError:
pass
util.reraise(exc_info[1], None, exc_info[2])
self._is_done = None
missing_files = []
if self.empty_files_ok:
filecheck = util.file_exists
else:
filecheck = util.output_file_exists
for f in self.filenames:
if not filecheck(f):
missing_files.append(f)
if missing_files:
raise ppg_exceptions.JobContractError(
"%s did not create all of its files.\nMissing were:\n %s"
% (self.job_id, "\n".join(missing_files))
)
def runs_in_slave(self):
return True
class TempFileGeneratingJob(FileGeneratingJob):
"""Create a temporary file that is removed once all direct dependands have
been executed sucessfully"""
def __init__(self, output_filename, function, rename_broken=False):
FileGeneratingJob.__init__(self, output_filename, function, rename_broken)
self.is_temp_job = True
def cleanup(self):
logger.info("%s cleanup" % self)
try:
# the renaming will already have been done when FileGeneratingJob.run(self) was called...
# if self.rename_broken:
# shutil.move(self.job_id, self.job_id + '.broken')
# else:
logger.info("unlinking %s" % self.job_id)
os.unlink(self.job_id)
except (OSError, IOError):
pass
def runs_in_slave(self):
return True
def calc_is_done(self, depth=0):
logger.info("calc is done %s" % self)
if os.path.exists(
self.job_id
): # explicitly not using util.output_file_exists, since there the stat has a race condition - reports 0 on recently closed files
logger.info("calc is done %s - file existed" % self)
return True
else:
for dep in self.dependants:
if (not dep.is_done()) and (not dep.is_loadable()):
return False
return True
class MultiTempFileGeneratingJob(FileGeneratingJob):
"""Create a temporary file that is removed once all direct dependands have
been executed sucessfully"""
def __new__(cls, filenames, *args, **kwargs):
if isinstance(filenames, str):
raise ValueError(
"Filenames must be a list (or at least an iterable), not a single string"
)
if not hasattr(filenames, "__iter__"):
raise TypeError("filenames was not iterable")
for x in filenames:
if not (isinstance(x, six.string_types) or isinstance(x, pathlib.Path)):
raise ValueError("filenames must be a list of strings or pathlib.Path")
job_id = ":".join(sorted(str(x) for x in filenames))
return Job.__new__(cls, job_id)
def __getnewargs__(self): # so that unpickling works
return (self.filenames,)
def __init__(self, filenames, function, rename_broken=False):
"""If @rename_broken is set, any eventual outputfile that exists
when the job crashes will be renamed to output_filename + '.broken'
(overwriting whatever was there before)
"""
self.is_temp_job = True
if not hasattr(function, "__call__"):
raise ValueError("function was not a callable")
sorted_filenames = list(sorted(str(x) for x in filenames))
for x in sorted_filenames:
# type checking happens in __new__
if (
x in util.filename_collider_check
and util.filename_collider_check[x] is not self
):
raise ValueError(
"Two jobs generating the same file: %s %s - %s"
% (self, util.filename_collider_check[x], x)
)
else:
util.filename_collider_check[x] = self
job_id = ":".join(sorted_filenames)
Job.__init__(self, job_id)
self.filenames = filenames
self.callback = function
self.rename_broken = rename_broken
self.do_ignore_code_changes = False
def cleanup(self):
logger.info("%s cleanup" % self)
try:
# the renaming will already have been done when FileGeneratingJob.run(self) was called...
# if self.rename_broken:
# shutil.move(self.job_id, self.job_id + '.broken')
# else:
for fn in self.filenames:
logger.info("unlinking (cleanup) %s" % fn)
os.unlink(fn)
except (OSError, IOError):
pass
def invalidated(self, reason=""):
for fn in self.filenames:
try:
logger.info("unlinking (invalidated) %s" % self.job_id)
os.unlink(fn)
except OSError:
pass
Job.invalidated(self, reason)
def run(self):
try:
self.callback()
except Exception:
exc_info = sys.exc_info()
if self.rename_broken:
for fn in self.filenames:
try:
shutil.move(fn, fn + ".broken")
except IOError:
pass
else:
for fn in self.filenames:
try:
logger.info("unlinking %s" % fn)
os.unlink(fn)
except OSError:
pass
util.reraise(exc_info[1], None, exc_info[2])
self._is_done = None
missing_files = []
for f in self.filenames:
if not util.output_file_exists(f):
missing_files.append(f)
if missing_files:
raise ppg_exceptions.JobContractError(
"%s did not create all of its files.\nMissing were:\n %s"
% (self.job_id, "\n".join(missing_files))
)
def runs_in_slave(self):
return True
def calc_is_done(self, depth=0):
logger.info("calc is done %s" % self)
all_files_exist = True
for fn in self.filenames:
all_files_exist = all_files_exist and os.path.exists(fn)
if (
all_files_exist
): # explicitly not using util.output_file_exists, since there the stat has a race condition - reports 0 on recently closed files
logger.info("calc is done %s - file existed" % self)
return True
else:
for dep in self.dependants:
if (not dep.is_done()) and (not dep.is_loadable()):
return False
return True
class TempFilePlusGeneratingJob(FileGeneratingJob):
"""Create a temporary file that is removed once all direct dependands have
been executed sucessfully,
but keep a log file (and rerun if the log file is not there)
"""
def __init__(self, output_filename, log_filename, function, rename_broken=False):
if output_filename == log_filename:
raise ValueError("output_filename and log_filename must be different")
FileGeneratingJob.__init__(self, output_filename, function)
self.output_filename = output_filename
self.log_file = log_filename
self.is_temp_job = True
def cleanup(self):
logger.info("%s cleanup" % self)
try:
# the renaming will already have been done when FileGeneratingJob.run(self) was called...
# if self.rename_broken:
# shutil.move(self.job_id, self.job_id + '.broken')
# else:
logger.info("unlinking %s" % self.job_id)
os.unlink(self.job_id)
except (OSError, IOError):
pass
def runs_in_slave(self):
return True
def calc_is_done(self, depth=0):
if not os.path.exists(self.log_file):
return None
if os.path.exists(
self.job_id
): # explicitly not using util.output_file_exists, since there the stat has a race condition - reports 0 on recently closed files
return True
else:
for dep in self.dependants:
if (not dep.is_done()) and (not dep.is_loadable()):
return False
return True
def run(self):
try:
self.callback()
except Exception:
exc_info = sys.exc_info()
try:
logger.info("unlinking %s" % self.output_filename)
os.unlink(self.output_filename)
except OSError:
pass
util.reraise(exc_info[1], None, exc_info[2])
self._is_done = None
if not os.path.exists(self.output_filename):
raise ppg_exceptions.JobContractError(
"%s did not create it's output file" % self.job_id
)
if not os.path.exists(self.log_file):
raise ppg_exceptions.JobContractError(
"%s did not create it's log file" % self.job_id
)
class DataLoadingJob(Job):
"""Modify the current (system local) master process with a callback function.
No cleanup is performed - use AttributeLoadingJob if you want your data to be unloaded"""
def __init__(self, job_id, callback):
if not hasattr(callback, "__call__"):
raise ValueError("callback was not a callable")
Job.__init__(self, job_id)
self.callback = callback
self.do_ignore_code_changes = False
def ignore_code_changes(self):
self.do_ignore_code_changes = True
def inject_auto_invariants(self):
if not self.do_ignore_code_changes:
self.depends_on(FunctionInvariant(self.job_id + "_func", self.callback))
def is_loadable(self):
return True
def load(self):
if self.was_loaded:
logger.info("%s.load (repeat)" % self)
return
logger.info("%s.load" % self)
for preq in self.prerequisites: # load whatever is necessary...
if preq.is_loadable():
preq.load()
start = time.time()
if hasattr(self, "profile"):
import cProfile
cProfile.runctx(
"self.callback()", globals(), locals(), "%s.prof" % id(self)
)
else:
self.callback()
end = time.time()
logger.info("Loading time for %s - %.3f" % (self.job_id, end - start))
self.was_loaded = True
def calc_is_done(
self, depth=0
): # delegate to preqs... passthrough of 'not yet done'
# logger.info("\t" * depth + "Checking is done on %s" % self)
for preq in self.prerequisites:
if not preq.is_done(depth=depth + 1):
# logger.info("\t" * depth + "failed on %s" % preq)
return False
# logger.info("\t" * depth + "Passed")
return True
class AttributeLoadingJob(DataLoadingJob):
"""Modify the current master process by loading the return value of a callback
into an object attribute.
On cleanup, the attribute is deleted via del
"""
def __init__(self, job_id, object, attribute_name, callback):
if not hasattr(callback, "__call__"):
raise ValueError("callback was not a callable")
if not isinstance(attribute_name, str):
raise ValueError("attribute_name was not a string")
if not hasattr(self, "object"):
self.object = object
self.attribute_name = attribute_name
else:
if self.object is not object:
raise ppg_exceptions.JobContractError(
"Creating AttributeLoadingJob twice with different target objects"
)
if not self.attribute_name == attribute_name:
raise ppg_exceptions.JobContractError(
"Creating AttributeLoadingJob twice with different target attributes"
)
if not hasattr(callback, "__call__"):
raise ValueError(
"Callback for %s was not callable (missed __call__ attribute)" % job_id
)
DataLoadingJob.__init__(self, job_id, callback)
def ignore_code_changes(self):
self.do_ignore_code_changes = True
def inject_auto_invariants(self):
if not self.do_ignore_code_changes:
self.depends_on(FunctionInvariant(self.job_id + "_func", self.callback))
def load(self):
# logger.info("%s load" % self)
if self.was_loaded:
# logger.info('Was loaded')
return
for preq in self.prerequisites: # load whatever is necessary...
if preq.is_loadable():
preq.load()
# logger.info("setting %s on id %i in pid %i" % (self.attribute_name, id(self.object), os.getpid()))
setattr(self.object, self.attribute_name, self.callback())
self.was_loaded = True
def is_loadable(self):
return True
def calc_is_done(
self, depth=0
): # delegate to preqs... passthrough of 'not yet done'
for preq in self.prerequisites:
if not preq.is_done():
return False
return True
def cleanup(self):
logger.info("Cleanup on %s" % self.attribute_name)
try:
delattr(self.object, self.attribute_name)
except AttributeError: # this can happen if you have a messed up DependencyInjectionJob, but it would block the messed up reporting...
pass
def __str__(self):
return "AttributeLoadingJob (job_id=%s,id=%i,target=%i)" % (
self.job_id,
id(self),
id(self.object),
)
class _GraphModifyingJob(Job):
"""Baseclass for jobs that modify the pipegraph during runtime"""
def modifies_jobgraph(self):
return True
def calc_is_done(self, depth=0):
return self.was_run
class DependencyInjectionJob(_GraphModifyingJob):
"""Inject additional dependencies into a Job (B) that depends on the DependencyInjectionJob (A).
B can not run before A, and once A has run, B has additional dependencies.
For example if you have an aggregation job, but the generating jobs are not known until you have
queried a webservice. Then your aggregation job would be B, and A would create all the generating
jobs.
The callback should report back the jobs it has created - B will automagically depend on those
after A has run (you don't need to do this yourself).
The DependencyInjectionJob does it's very best to check wheter you're doing something stupid and
will raise JobContractErrors if you do.
"""
def __init__(self, job_id, callback, check_for_dependency_injections=True):
"""@check_for_dependency_injections - by default, we check whether you injected correctly,
but some of these checks are costly so you might wish to optimize by setting check_for_dependency_injections=False, but injecting into already run jobs and so on might create invisible (non exception raising) bugs.
"""
if not hasattr(callback, "__call__"):
raise ValueError("callback was not a callable")
Job.__init__(self, job_id)
self.callback = callback
self.do_ignore_code_changes = False
self.always_runs = True
self.check_for_dependency_injections = check_for_dependency_injections
def ignore_code_changes(self):
pass
def inject_auto_invariants(self):
# if not self.do_ignore_code_changes:
# self.depends_on(FunctionInvariant(self.job_id + '_func', self.callback))
pass
def run(self):
# this is different form JobGeneratingJob.run in it's checking of the contract
util.global_pipegraph.new_jobs = {}
logger.info(
"DependencyInjectionJob.dependants = %s %s"
% (", ".join(str(x) for x in self.dependants), id(self.dependants))
)
reported_jobs = self.callback()
logger.info(
"DependencyInjectionJob.dependants after callback = %s %s"
% (", ".join(str(x) for x in self.dependants), id(self.dependants))
)
logger.info(
"new_jobs count: %i, id %s"
% (len(util.global_pipegraph.new_jobs), id(util.global_pipegraph.new_jobs))
)
for new_job in list(util.global_pipegraph.new_jobs.values()):
new_job.inject_auto_invariants()
if reported_jobs:
for new_job in reported_jobs:
for my_dependand in self.dependants:
my_dependand.depends_on(new_job)
# we now need to fill new_jobs.dependants
# these implementations are much better than the old for loop based ones
# but still could use some improvements
# but at least for the first one, I don't see how to remove the remaining loops.
logger.info("Now checking first step for dependency injection violations")
new_job_set = set(util.global_pipegraph.new_jobs.keys())
if True:
for job in util.global_pipegraph.jobs.values():
for nw_jobid in new_job_set.intersection(
[x.job_id for x in job.prerequisites]
):
# logger.info("Checking %s against %s - %s" % (nw, job, job in self.dependants))
nw = util.global_pipegraph.new_jobs[nw_jobid]
if job not in self.dependants:
raise ppg_exceptions.JobContractError(
"DependencyInjectionJob %s tried to inject %s into %s, but %s was not dependand on the DependencyInjectionJob. It was dependand on %s though"
% (self, nw, job, job, nw.prerequisites)
)
nw.dependants.add(job)
# I need to check: All new jobs are now prereqs of my dependands
# I also need to check that none of the jobs that ain't dependand on me have been injected
if not self.check_for_dependency_injections:
logger.info("Skipping check for dependency injection violations")
else:
logger.info("Checking for dependency injection violations")
for job in util.global_pipegraph.jobs.values():
if job in self.dependants:
for new_job in util.global_pipegraph.new_jobs.values():
if not job.is_in_dependency_chain(
new_job, 5
): # 1 for the job, 2 for auto dependencies, 3 for load jobs, 4 for the dependencies of load jobs... 5 seems to work in pratice.
raise ppg_exceptions.JobContractError(
"DependencyInjectionJob %s created a job %s that was not added to the prerequisites of %s"
% (self.job_id, new_job.job_id, job.job_id)
)
else:
preq_intersection = set(job.prerequisites).intersection(new_job_set)
if preq_intersection:
raise ppg_exceptions.JobContractError(
"DependencyInjectionJob %s created a job %s that was added to the prerequisites of %s, but was not dependant on the DependencyInjectionJob"
% (self.job_id, preq_intersection, job.job_id)
)
dep_intersection = set(job.prerequisites).intersection(new_job_set)
if dep_intersection:
raise ppg_exceptions.JobContractError(
"DependencyInjectionJob %s created a job %s that was added to the dependants of %s, but was not dependant on the DependencyInjectionJob"
% (self.job_id, dep_intersection, job.job_id)
)
res = util.global_pipegraph.new_jobs
logger.info("returning %i new jobs" % len(res))
logger.info("%s" % ",".join(res.keys()))
util.global_pipegraph.tranfer_new_jobs()
util.global_pipegraph.new_jobs = False
return res
class JobGeneratingJob(_GraphModifyingJob):
"""A Job generating new jobs. The new jobs must be leaves in the sense that no job that existed
before may depend on them. If that's what you want, see L{DependencyInjectionJob}.
"""
def __init__(self, job_id, callback):
if not hasattr(callback, "__call__"):
raise ValueError("callback was not a callable")
Job.__init__(self, job_id)
self.callback = callback
self.do_ignore_code_changes = False
self.always_runs = True
def ignore_code_changes(self):
pass
def inject_auto_invariants(self):
pass
def run(self):
logger.info("Storing new jobs in %s" % id(util.global_pipegraph))
util.global_pipegraph.new_jobs = {}
self.callback()
for new_job in list(util.global_pipegraph.new_jobs.values()):
new_job.inject_auto_invariants()
# I need to check: All new jobs are now prereqs of my dependands
# I also need to check that none of the jobs that ain't dependand on me have been injected
new_job_set = set(util.global_pipegraph.new_jobs.values())
for job in util.global_pipegraph.jobs.values():
if new_job_set.intersection(job.prerequisites):
raise ppg_exceptions.JobContractError(
"JobGeneratingJob %s created a job that was added to the prerequisites of %s, which is invalid. Use a DependencyInjectionJob instead, this one might only create 'leave' nodes"
% (self.job_id, job.job_id)
)
res = util.global_pipegraph.new_jobs
util.global_pipegraph.tranfer_new_jobs()
util.global_pipegraph.new_jobs = False
logger.info("Returning from %s" % self)
return res
class FinalJob(Job):
"""A final job runs after all other (non final) jobs have run.
Use these sparringly - they really only make sense for things where you really want to hook
'after the pipeline has run', everything else realy is better of if you depend on the appropriate job
FinalJobs are also run on each run - but only if no other job died.
"""
def __init__(self, jobid, callback):
Job.__init__(self, jobid)
self.callback = callback
self.is_final_job = True
self.do_ignore_code_changes = False
self.always_runs = True
def calc_is_done(self, depth=0):
return self.was_run
def depends_on(self, *args):
raise ppg_exceptions.JobContractError(
"Final jobs can not have explicit dependencies - they run in random order after all other jobs"
)
def ignore_code_changes(self):
pass
def inject_auto_invariants(self):
pass
def run(self):
self.callback()
class PlotJob(FileGeneratingJob):
"""Calculate some data for plotting, cache it in cache/output_filename, and plot from there.
creates two jobs, a plot_job (this one) and a cache_job (FileGeneratingJob, in self.cache_job),
To use these jobs, you need to have pyggplot available.
"""
def __init__( # noqa:C901
self,
output_filename,
calc_function,
plot_function,
render_args=None,
skip_table=False,
skip_caching=False,
):
if not isinstance(output_filename, six.string_types):
raise ValueError("output_filename was not a string type")
if not (
output_filename.endswith(".png")
or output_filename.endswith(".pdf")
or output_filename.endswith(".svg")
):
raise ValueError(
"Don't know how to create this file %s, must end on .png or .pdf or .svg"
% output_filename
)
self.output_filename = output_filename
self.table_filename = self.output_filename + ".tsv"
self.calc_function = calc_function
self.plot_function = plot_function
self.skip_caching = skip_caching
if render_args is None:
render_args = {}
self.render_args = render_args
self._fiddle = None
import pandas as pd
import pyggplot
if not self.skip_caching:
self.cache_filename = os.path.join("cache", output_filename)
def run_calc():
df = calc_function()
if not isinstance(df, pd.DataFrame):
do_raise = True
if isinstance(df, dict): # might be a list dfs...
do_raise = False
for x in df.values():
if not isinstance(x, pd.DataFrame):
do_raise = True
break
if do_raise:
raise ppg_exceptions.JobContractError(
"%s.calc_function did not return a DataFrame (or dict of such), was %s "
% (output_filename, str(df.__class__))
)
try:
os.makedirs(os.path.dirname(self.cache_filename))
except OSError:
pass
of = open(self.cache_filename, "wb")
pickle.dump(df, of, pickle.HIGHEST_PROTOCOL)
of.close()
def run_plot():
df = self.get_data()
plot = plot_function(df)
if not isinstance(plot, pyggplot._PlotBase):
raise ppg_exceptions.JobContractError(
"%s.plot_function did not return a pyggplot.Plot "
% (output_filename)
)
if "width" not in render_args and hasattr(plot, "width"):
render_args["width"] = plot.width
if "height" not in render_args and hasattr(plot, "height"):
render_args["height"] = plot.height
if self._fiddle:
self._fiddle(plot)
plot.render(output_filename, **render_args)
FileGeneratingJob.__init__(self, output_filename, run_plot)
Job.depends_on(
self, ParameterInvariant(self.output_filename + "_params", render_args)
)
if not self.skip_caching:
cache_job = FileGeneratingJob(self.cache_filename, run_calc)
Job.depends_on(self, cache_job)
self.cache_job = cache_job
if not skip_table:
def dump_table():
import pandas as pd
df = self.get_data()
if isinstance(df, pd.DataFrame):
df.to_csv(self.table_filename, sep="\t")
else:
writer = pd.ExcelWriter(self.table_filename)
for key, dframe in df.items():
dframe.to_excel(writer, key)
writer = pd.ExcelWriter(self.table_filename)
for key in df:
df[key].to_excel(writer, key)
writer.save()
table_gen_job = FileGeneratingJob(self.table_filename, dump_table)
if not self.skip_caching:
table_gen_job.depends_on(cache_job)
self.table_job = table_gen_job
else:
self.table_job = None
def add_another_plot(self, output_filename, plot_function, render_args=None):
"""Add another plot job that runs on the same data as the original one (calc only done once)"""
import pyggplot
if render_args is None:
render_args = {}
def run_plot():
df = self.get_data()
plot = plot_function(df)
if not isinstance(plot, pyggplot.Plot):
raise ppg_exceptions.JobContractError(
"%s.plot_function did not return a pyggplot.Plot "
% (output_filename)
)
if "width" not in render_args and hasattr(plot, "width"):
render_args["width"] = plot.width
if "height" not in render_args and hasattr(plot, "height"):
render_args["height"] = plot.height
plot.render(output_filename, **render_args)
job = FileGeneratingJob(output_filename, run_plot)
job.depends_on(
ParameterInvariant(self.output_filename + "_params", render_args)
)
job.depends_on(FunctionInvariant(self.output_filename + "_func", plot_function))
job.depends_on(self.cache_job)
return job
def add_fiddle(self, fiddle_function):
"""Add another function that is called right before the plot is
rendered with a pyggplot.Plot as the only argument in order to be able
to 'fiddle' with the plot.
Please note: if you want to remove an add_fiddle, the plot is only redone if you
call add_fiddle(None) instead of removing the call altogether
"""
self._fiddle = fiddle_function
Job.depends_on(
self, FunctionInvariant(self.output_filename + "_fiddle", fiddle_function)
)
def depends_on(self, other_job):
# FileGeneratingJob.depends_on(self, other_job) # just like the cached jobs, the plotting does not depend on the loading of prerequisites
if self.skip_caching:
Job.depends_on(self, other_job)
if self.table_job:
self.table_job.depends_on(other_job)
elif (
hasattr(self, "cache_job") and other_job is not self.cache_job
): # activate this after we have added the invariants...
self.cache_job.depends_on(other_job)
return self
def inject_auto_invariants(self):
if not self.do_ignore_code_changes:
if not self.skip_caching:
self.cache_job.depends_on(
FunctionInvariant(self.job_id + ".calcfunc", self.calc_function)
)
else:
FileGeneratingJob.depends_on(
self,
FunctionInvariant(self.job_id + ".calcfunc", self.calc_function),
)
FileGeneratingJob.depends_on(
self, FunctionInvariant(self.job_id + ".plotfunc", self.plot_function)
)
FileGeneratingJob.depends_on(
self, FunctionInvariant(self.job_id + ".run_plot", self.callback)
) # depend on the run_plot func
def get_data(self):
if self.skip_caching:
return self.calc_function()
else:
try:
of = open(self.cache_filename, "rb")
df = pickle.load(of)
of.close()
except Exception:
print("could not load", self.cache_filename)
raise
return df
def __str__(self):
return "%s (job_id=%s,id=%s\n Calc function: %s:%s\nPlot function: %s:%s)" % (
self.__class__.__name__,
self.job_id,
id(self),
self.calc_function.__code__.co_filename,
self.calc_function.__code__.co_firstlineno,
self.plot_function.__code__.co_filename,
self.plot_function.__code__.co_firstlineno,
)
def CombinedPlotJob(
output_filename, plot_jobs, facet_arguments, render_args=None, fiddle=None
):
"""Combine multiple PlotJobs into a common (faceted) output plot.
An empty list means 'no facetting'
To use these jobs, you need to have pyggplot available.
"""
if not isinstance(output_filename, six.string_types):
raise ValueError("output_filename was not a string type")
if not (output_filename.endswith(".png") or output_filename.endswith(".pdf")):
raise ValueError(
"Don't know how to create this file %s, must end on .png or .pdf"
% output_filename
)
if render_args is None:
render_args = {"width": 10, "height": 10}
def plot():
import pandas as pd
import pyggplot
data = pd.concat([plot_job.get_data() for plot_job in plot_jobs], axis=0)
plot = plot_jobs[0].plot_function(data)
if isinstance(facet_arguments, list):
if facet_arguments: # empty lists mean no faceting
plot.facet(*facet_arguments)
elif isinstance(facet_arguments, dict):
plot.facet(**facet_arguments)
else:
raise ValueError(
"Don't know how to pass object of type %s to a function, needs to be a list or a dict. Was: %s"
% (type(facet_arguments), facet_arguments)
)
if not isinstance(plot, pyggplot.Plot):
raise ppg_exceptions.JobContractError(
"%s.plot_function did not return a pyggplot.Plot " % (output_filename)
)
path = os.path.dirname(output_filename)
if not os.path.exists(path):
os.makedirs(path)
if fiddle:
fiddle(plot)
plot.render(output_filename, **render_args)
job = FileGeneratingJob(output_filename, plot)
job.depends_on(
ParameterInvariant(
output_filename + "_params",
(
list(
sorted([plot_job.output_filename for plot_job in plot_jobs])
), # so to detect new plot_jobs...
render_args,
facet_arguments,
),
)
)
job.depends_on(FunctionInvariant(output_filename + "_fiddle", fiddle))
job.depends_on([plot_job.cache_job for plot_job in plot_jobs])
job.depends_on(
FunctionInvariant(
output_filename + "_plot_combined", plot_jobs[0].plot_function
)
)
return job
class _CacheFileGeneratingJob(FileGeneratingJob):
"""A job that takes the results from it's callback and pickles it.
data_loading_job is dependend on somewhere"""
def __init__(self, job_id, calc_function, dl_job, emtpy_file_allowed=False):
self.empty_file_allowed = emtpy_file_allowed
if not hasattr(calc_function, "__call__"):
raise ValueError("calc_function was not a callable")
Job.__init__(self, job_id) # FileGeneratingJob has no benefits for us
if not hasattr(self, "data_loading_job"): # only do this the first time...
self.cache_filename = job_id
self.callback = calc_function
self.data_loading_job = dl_job
self.do_ignore_code_changes = False
def invalidated(self, reason=""):
logger.info("%s invalidated called, reason: %s" % (self, reason))
try:
logger.info("unlinking %s" % self.job_id)
os.unlink(self.job_id)
except OSError:
pass
self.was_invalidated = True
if not self.data_loading_job.was_invalidated:
self.data_loading_job.invalidated(reason)
self._is_done_cache = False
# Job.invalidated(self) # no going back up the dependants... the dataloading job takes care of that
def run(self):
data = self.callback()
op = open(self.cache_filename, "wb")
pickle.dump(data, op, pickle.HIGHEST_PROTOCOL)
op.close()
class CachedAttributeLoadingJob(AttributeLoadingJob):
"""Like an AttributeLoadingJob, except that the callback value is pickled into
a file called job_id and reread on the next run"""
def __new__(cls, job_id, *args, **kwargs):
if not isinstance(job_id, six.string_types):
raise ValueError("cache_filename/job_id was not a string i jobect")
return Job.__new__(cls, job_id + "_load")
def __init__(
self, cache_filename, target_object, target_attribute, calculating_function
):
if not isinstance(cache_filename, six.string_types):
raise ValueError("cache_filename/job_id was not a string jobect")
if not hasattr(calculating_function, "__call__"):
raise ValueError("calculating_function was not a callable")
if not isinstance(target_attribute, str):
raise ValueError("attribute_name was not a string")
abs_cache_filename = os.path.abspath(cache_filename)
def do_load(cache_filename=abs_cache_filename):
op = open(cache_filename, "rb")
data = pickle.load(op)
op.close()
return data
AttributeLoadingJob.__init__(
self, cache_filename + "_load", target_object, target_attribute, do_load
)
lfg = _CacheFileGeneratingJob(cache_filename, calculating_function, self)
self.lfg = lfg
Job.depends_on(self, lfg)
def depends_on(self, jobs):
self.lfg.depends_on(jobs)
return self
# The loading job itself should not depend on the preqs
# because then the preqs would even have to be loaded if
# the lfg had run already in another job
# and dataloadingpreqs could not be unloaded right away
# and anyhow, the loading job is so simple it doesn't need
# anything but the lfg output file
# return Job.depends_on(self, jobs)
def ignore_code_changes(self):
self.lfg.ignore_code_changes()
self.do_ignore_code_changes = True
def __del__(self):
self.lfg = None
def invalidated(self, reason=""):
if not self.lfg.was_invalidated:
self.lfg.invalidated(reason)
Job.invalidated(self, reason)
class CachedDataLoadingJob(DataLoadingJob):
"""Like a DataLoadingJob, except that the callback value is pickled into
a file called job_id and reread on the next run"""
def __new__(cls, job_id, *args, **kwargs):
if not isinstance(job_id, six.string_types):
raise ValueError("cache_filename/job_id was not a string object")
return Job.__new__(
cls, job_id + "_load"
) # plus load, so that the cached data goes into the cache_filename passed to the constructor...
def __init__(self, cache_filename, calculating_function, loading_function):
if not isinstance(cache_filename, six.string_types):
raise ValueError("cache_filename/job_id was not a string object")
if not hasattr(calculating_function, "__call__"):
raise ValueError("calculating_function was not a callable")
if not hasattr(loading_function, "__call__"):
raise ValueError("loading_function was not a callable")
abs_cache_filename = os.path.abspath(cache_filename)
def do_load(cache_filename=abs_cache_filename):
op = open(cache_filename, "rb")
try:
data = pickle.load(op)
except Exception as e:
raise ValueError(
"Unpickling error in file %s - original error was %s"
% (cache_filename, str(e))
)
op.close()
loading_function(data)
DataLoadingJob.__init__(
self, cache_filename + "_load", do_load
) # todo: adjust functioninvariant injection
lfg = _CacheFileGeneratingJob(cache_filename, calculating_function, self)
self.lfg = lfg
Job.depends_on(self, lfg)
self.calculating_function = calculating_function
self.loading_function = loading_function
def inject_auto_invariants(self):
if not self.do_ignore_code_changes:
# this job should depend on that, not the lazy filegenerating one...
Job.depends_on(
self, FunctionInvariant(self.job_id + "_func", self.loading_function)
) # we don't want to depend on 'callback', that's our tiny wrapper, but on the loading_function instead.
def __str__(self):
try:
return (
"%s (job_id=%s,id=%s\n Calc calcback: %s:%s\nLoad callback: %s:%s)"
% (
self.__class__.__name__,
self.job_id,
id(self),
self.calculating_function.__code__.co_filename,
self.calculating_function.__code__.co_firstlineno,
self.loading_function.__code__.co_filename,
self.loading_function.__code__.co_firstlineno,
)
)
except AttributeError:
return "%s(job_id=%s, callbacks unset" % (
self.__class__.__name__,
self.job_id,
)
def depends_on(self, jobs):
self.lfg.depends_on(jobs)
return self
# The loading job itself should not depend on the preqs
# because then the preqs would even have to be loaded if
# the lfg had run already in another job
# and dataloadingpreqs could not be unloaded right away
# Now, if you need to have a more complex loading function,
# that also requires further jobs being loaded (integrating, etc)
# either add in another DataLoadingJob dependand on this CachedDataLoadingJob
# or call Job.depends_on(this_job, jobs) yourself.
# return Job.depends_on(self, jobs)
def ignore_code_changes(self):
self.lfg.ignore_code_changes()
self.do_ignore_code_changes = True
def __del__(self):
self.lfg = None
def invalidated(self, reason=""):
if not self.lfg.was_invalidated:
self.lfg.invalidated(reason)
Job.invalidated(self, reason)
class MemMappedDataLoadingJob(DataLoadingJob):
"""Like a DataLoadingJob that returns a numpy array. That array get's stored to a file, and memmapped back in later on.
Note that it's your job to del your memmapped reference to get it garbage collectable...
"""
def __new__(cls, job_id, *args, **kwargs):
if is_pypy:
raise NotImplementedError(
"Numpypy currently does not support memmap(), there is no support for MemMappedDataLoadingJob using pypy."
)
if not isinstance(job_id, six.string_types):
raise ValueError("cache_filename/job_id was not a string object")
return Job.__new__(
cls, job_id + "_load"
) # plus load, so that the cached data goes into the cache_filename passed to the constructor...
def __init__(self, cache_filename, calculating_function, loading_function, dtype):
if not isinstance(cache_filename, six.string_types):
raise ValueError("cache_filename/job_id was not a string object")
if not hasattr(calculating_function, "__call__"):
raise ValueError("calculating_function was not a callable")
if not hasattr(loading_function, "__call__"):
raise ValueError("loading_function was not a callable")
abs_cache_filename = os.path.abspath(cache_filename)
self.dtype = dtype
def do_load(cache_filename=abs_cache_filename):
import numpy
data = numpy.memmap(cache_filename, self.dtype, mode="r")
loading_function(data)
DataLoadingJob.__init__(self, cache_filename + "_load", do_load) #
def do_calc(cache_filename=abs_cache_filename):
import numpy
data = calculating_function()
if not isinstance(data, numpy.ndarray):
raise ppg_exceptions.JobContractError("Data must be a numpy array")
if data.dtype != self.dtype:
raise ppg_exceptions.JobContractError(
"Data had wrong dtype. Expected %s, was %s"
% (self.dtype, data.dtype)
)
mmap = numpy.memmap(cache_filename, self.dtype, "w+", shape=data.shape)
mmap[:] = data
mmap.flush()
del data
del mmap
lfg = FileGeneratingJob(cache_filename, do_calc)
self.lfg = lfg
Job.depends_on(self, lfg)
self.calculating_function = calculating_function
self.loading_function = loading_function
def inject_auto_invariants(self):
if not self.do_ignore_code_changes:
self.depends_on(
FunctionInvariant(self.job_id + "_func", self.loading_function)
) # we don't want to depend on 'callback', that's our tiny wrapper, but on the loading_function instead.
self.lfg.depends_on(
FunctionInvariant(self.job_id + "_calc_func", self.calculating_function)
)
def __str__(self):
return "%s (job_id=%s,id=%s\n Calc calcback: %s:%s\nLoad callback: %s:%s)" % (
self.__class__.__name__,
self.job_id,
id(self),
self.calculating_function.__code__.co_filename,
self.calculating_function.__code__.co_firstlineno,
self.loading_function.__code__.co_filename,
self.loading_function.__code__.co_firstlineno,
)
def depends_on(self, jobs):
self.lfg.depends_on(jobs)
return self
def ignore_code_changes(self):
self.lfg.ignore_code_changes()
self.do_ignore_code_changes = True
def __del__(self):
self.lfg = None
def invalidated(self, reason=""):
if not self.lfg.was_invalidated:
self.lfg.invalidated(reason)
Job.invalidated(self, reason)
def NotebookJob(notebook_filename, auto_detect_dependencies=True):
"""Run an ipython notebook if it changed, or any of the jobs for filenames it references
changed"""
notebook_name = notebook_filename
if "/" in notebook_name:
notebook_name = notebook_name[notebook_name.rfind("/") + 1 :]
if not os.path.exists("cache/notebooks"):
os.mkdir("cache/notebooks")
sentinel_file = os.path.join(
"cache",
"notebooks",
hashlib.md5(notebook_filename).hexdigest() + " " + notebook_name + ".html",
)
ipy_cache_file = os.path.join(
"cache", "notebooks", hashlib.md5(notebook_filename).hexdigest() + ".ipynb"
)
return _NotebookJob(
[sentinel_file, ipy_cache_file], notebook_filename, auto_detect_dependencies
)
class _NotebookJob(MultiFileGeneratingJob):
def __init__(self, files, notebook_filename, auto_detect_dependencies):
sentinel_file, ipy_cache_file = files
def run_notebook():
import subprocess
shutil.copy(notebook_filename, ipy_cache_file)
p = subprocess.Popen(
["runipy", "-o", os.path.abspath(ipy_cache_file), "--no-chdir"],
cwd=os.path.abspath("."),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise ValueError(
"Ipython notebook %s error return.\nstdout:\n%s\n\nstderr:\n%s"
% (notebook_filename, stdout, stderr)
)
output_file = open(sentinel_file, "wb")
p = subprocess.Popen(
[
"ipython",
"nbconvert",
os.path.abspath(ipy_cache_file),
"--to",
"html",
"--stdout",
],
stdout=output_file,
stderr=subprocess.PIPE,
)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise ValueError("Ipython nbconvert error. stderr: %s" % (stderr,))
output_file.close()
self.auto_detect_dependencies = auto_detect_dependencies
self.notebook_filename = notebook_filename
MultiFileGeneratingJob.__init__(self, files, run_notebook)
def inject_auto_invariants(self):
deps = [FileChecksumInvariant(self.notebook_filename)]
if self.auto_detect_dependencies:
with open(self.notebook_filename, "rb") as op:
raw_text = op.read()
for job_name, job in util.global_pipegraph.jobs.items():
if isinstance(job, MultiFileGeneratingJob):
for fn in job.filenames:
if fn in raw_text:
deps.append(job)
elif isinstance(job, FileGeneratingJob):
if job.job_id in raw_text:
deps.append(job)
| 2.109375 | 2 |
app/app.py | KingMikeXS/dl | 0 | 12758899 | <reponame>KingMikeXS/dl
import io
import inspect
import imp
from contextlib import redirect_stdout
from flask import Flask
from flask import request
from flask import jsonify
import adv.adv_test
import slot.a
import slot.d
import slot.w
app = Flask(__name__)
# Helpers
ROOT_DIR = '/home/wildshinobu/dl/'
MEANS_ADV = {
'addis': 'addis.py.means.py',
'sazanka': 'sazanka.py.means.py',
'victor': 'victor.py.m.py',
'ezelith': 'ezelith.py.means.py',
}
NORMAL_ADV = ['h_lowen']
MASS_SIM_ADV = []
with open(ROOT_DIR+'chara_quick.txt') as f:
for l in f:
NORMAL_ADV.append(l.strip().replace('.py', ''))
with open(ROOT_DIR+'chara_slow.txt') as f:
for l in f:
MASS_SIM_ADV.append(l.strip().replace('.py', ''))
# ???
# audric.py.dragon.py
# g_mym.py.dragon.py
# euden.py.dragon.py
# euden.py.dragon.sakuya.py
# lathna.py.dragon.py
SPECIAL_ADV = {
'chelsea_rollfs': {
'fn': 'chelsea.py.rollfs.py',
'nc': ['wp']
},
'g_cleo_ehjp': {
'fn': 'g_cleo.py.ehjp.py',
'nc': ['wp', 'acl']
},
'g_luca_maxstacks': {
'fn': 'g_luca.py.maxstacks.py',
'nc': []
},
'veronica_1hp': {
'fn': 'veronica.py.1hp.py',
'nc': []
},
'natalie_1hp': {
'fn': 'natalie.py.1hp.py',
'nc': []
},
'v_addis_1hp': {
'fn': 'v_addis.py.1hp.py',
'nc': []
}
}
def get_adv_module(adv_name):
if adv_name in SPECIAL_ADV or adv_name in MEANS_ADV:
if adv_name in MEANS_ADV:
fn = MEANS_ADV[adv_name]
else:
fn = SPECIAL_ADV[adv_name]['fn']
with open('{}{}'.format(ROOT_DIR+'adv/', fn), 'rb') as fp:
return imp.load_module(
adv_name, fp, fn,
('.py', 'rb', imp.PY_SOURCE)
).module()
else:
return getattr(
__import__('adv.{}'.format(adv_name.lower())),
adv_name.lower()
).module()
def is_amulet(obj):
return (inspect.isclass(obj) and issubclass(obj, slot.a.Amulet)
and obj.__module__ != 'slot.a'
and obj.__module__ != 'slot')
def is_dragon(obj):
return (inspect.isclass(obj) and issubclass(obj, slot.d.DragonBase)
and obj.__module__ != 'slot.d'
and obj.__module__ != 'slot')
def is_weapon(obj):
return (inspect.isclass(obj) and issubclass(obj, slot.d.WeaponBase)
and obj.__module__ != 'slot.w'
and obj.__module__ != 'slot')
def list_members(module, predicate, element=None):
members = inspect.getmembers(module, predicate)
member_list = []
for m in members:
n, c = m
if element is not None:
if issubclass(c, slot.d.WeaponBase) and element not in getattr(c, 'ele'):
continue
if c.__qualname__ not in member_list:
member_list.append(c.__qualname__)
return member_list
def set_teamdps_res(result, r, suffix=''):
if r['buff_sum'] > 0:
result['extra' + suffix]['team_buff'] = '+{}%'.format(round(r['buff_sum'] * 100))
if r['energy_sum'] > 0:
result['extra' + suffix]['team_energy'] = '{} stacks'.format(r['energy_sum'])
return result
def set_log_res(result, r, suffix=''):
result['logs' + suffix] = r['logs']
return result
# API
@app.route('/simc_adv_test', methods=['POST'])
def run_adv_test():
if not request.method == 'POST':
return 'Wrong request method.'
params = request.get_json(silent=True)
adv_name = params['adv'].lower() if 'adv' in params else 'euden'
wp1 = params['wp1'] if 'wp1' in params else None
wp2 = params['wp2'] if 'wp2' in params else None
dra = params['dra'] if 'dra' in params else None
wep = params['wep'] if 'wep' in params else None
ex = params['ex'] if 'ex' in params else ''
acl = params['acl'] if 'acl' in params else None
teamdps = abs(float(params['teamdps'])) if 'teamdps' in params else None
t = abs(int(params['t']) if 't' in params else 180)
log = -2
mass = 25 if adv_name in MASS_SIM_ADV and adv_name not in MEANS_ADV else 0
print(params)
if adv_name in SPECIAL_ADV:
not_customizable = SPECIAL_ADV[adv_name]['nc']
if 'wp' in not_customizable:
wp1 = None
wp2 = None
if 'acl' in not_customizable:
acl = None
adv.adv_test.set_ex(ex)
adv_module = get_adv_module(adv_name)
def slot_injection(this):
if wp1 is not None and wp2 is not None:
this.conf['slots.a'] = getattr(slot.a, wp1)() + getattr(slot.a, wp2)()
if dra is not None:
this.conf['slots.d'] = getattr(slot.d, dra)()
if wep is not None:
this.conf['slots.w'] = getattr(slot.w, wep)()
if teamdps is not None:
adv.adv_test.team_dps = teamdps
# assume team dps * 1.25 = raw skill dmg
adv.adv_test.energy_efficiency = (teamdps * 1.25) * 0.5 * 2 / 5 / adv.adv_test.sim_duration
else:
adv.adv_test.team_dps = 6000
adv.adv_test.energy_efficiency = 7500 * 0.5 * 2 / 5 / adv.adv_test.sim_duration
def acl_injection(this):
if acl is not None:
this.conf['acl'] = acl
adv_module.slot_backdoor = slot_injection
adv_module.acl_backdoor = acl_injection
conf = {}
for afflic in ['poison', 'paralysis', 'burn', 'blind', 'bog', 'stun', 'freeze', 'sleep']:
try:
conf['afflict_res.'+afflic] = min(abs(int(params['afflict_res_'+afflic])), 100)
except:
pass
try:
if params['sim_afflict_type'] in ['burn', 'paralysis', 'poison']:
conf['sim_afflict.time'] = t * min(abs(int(params['sim_afflict_time'])), 100)/100
conf['sim_afflict.type'] = params['sim_afflict_type']
except:
pass
try:
conf['sim_buffbot.buff'] = min(max(int(params['sim_buff_str']), -100), 100)/100
except:
pass
try:
conf['sim_buffbot.debuff'] = min(max(int(params['sim_buff_def']), -100), 50)/100
except:
pass
result = {'test_output': '', 'extra': {}, 'extra_no_cond': {}, 'logs': ''}
f = io.StringIO()
r = None
try:
with redirect_stdout(f):
r = adv.adv_test.test(adv_module, conf, verbose=log, duration=t, mass=mass)
except Exception as e:
result['error'] = str(e)
return jsonify(result)
result['test_output'] = f.getvalue()
f.close()
if r is not None:
result = set_teamdps_res(result, r)
result = set_log_res(result, r)
if 'no_cond' in r:
result = set_teamdps_res(result, r['no_cond'], '_no_cond')
# result = set_log_res(result, r['no_cond'], '_no_cond')
return jsonify(result)
@app.route('/simc_adv_slotlist', methods=['GET', 'POST'])
def get_adv_slotlist():
result = {}
result['adv'] = {}
if request.method == 'GET':
result['adv']['name'] = request.args.get('adv', default=None)
elif request.method == 'POST':
params = request.get_json(silent=True)
result['adv']['name'] = params['adv'].lower() if 'adv' in params else None
else:
return 'Wrong request method.'
adv_ele = None
dragon_module = slot.d
weap_module = slot.w
if result['adv']['name'] is not None:
adv_instance = get_adv_module(result['adv']['name'])(cond=1)
adv_ele = adv_instance.slots.c.ele.lower()
result['adv']['ele'] = adv_ele
dragon_module = getattr(slot.d, adv_ele)
result['adv']['wt'] = adv_instance.slots.c.wt.lower()
weap_module = getattr(slot.w, result['adv']['wt'])
result['adv']['pref_dra'] = type(adv_instance.slots.d).__qualname__
result['adv']['pref_wep'] = type(adv_instance.slots.w).__qualname__
result['adv']['pref_wp'] = {
'wp1': type(adv_instance.slots.a).__qualname__,
'wp2': type(adv_instance.slots.a.a2).__qualname__
}
result['adv']['acl'] = adv_instance.conf.acl
if 'afflict_res' in adv_instance.conf:
res_conf = adv_instance.conf.afflict_res
res_dict = {}
for afflic in ['poison', 'paralysis', 'burn', 'blind', 'bog', 'stun', 'freeze', 'sleep']:
if afflic in res_conf:
res_dict[afflic] = res_conf[afflic]
if len(res_dict.keys()) > 0:
result['adv']['afflict_res'] = res_dict
if result['adv']['name'] in SPECIAL_ADV:
result['adv']['no_config'] = SPECIAL_ADV[result['adv']['name']]['nc']
# result['amulets'] = list_members(slot.a, is_amulet)
result['dragons'] = list_members(dragon_module, is_dragon, element=adv_ele)
result['weapons'] = list_members(weap_module, is_weapon, element=adv_ele)
return jsonify(result)
@app.route('/simc_adv_wp_list', methods=['GET', 'POST'])
def get_adv_wp_list():
if not (request.method == 'GET' or request.method == 'POST'):
return 'Wrong request method.'
result = {}
result['amulets'] = list_members(slot.a, is_amulet)
result['adv'] = NORMAL_ADV+MASS_SIM_ADV+list(SPECIAL_ADV.keys())
return jsonify(result) | 1.960938 | 2 |
data_scope.py | sharma-bharat/Codes_NBP_Extremes | 0 | 12758900 | <gh_stars>0
# <NAME>
# python 3.7
# Read file names and sort data and file paths
import pandas as pd
import glob
import numpy as np
import netCDF4 as nc4
import os
web_path = '/global/homes/b/bharat/results/web/'
in_path = '/global/homes/b/bharat/results/data_processing/'
files_list = glob.glob(in_path+'ls_*.csv')
# Extracting Experiment Vars and Filenames
# --------------------------------------
# Creating the data frame for every experiment and variable
dict_exp_vars = {} # columns = ['activity_id','institution_id','source_id','experiment_id','member_id','table_id','variable_id','grid_label','version','filenames']
# dict_exp_vars ['experiment']['variable'] < strucutre
experiments = []
variables = []
experiment_vars_fname = np.asarray(np.zeros((len(files_list),3)), dtype = 'str')
for idx in range(len(files_list)):
exp_tmp = files_list[idx].split('/')[-1].split('_')[1]
var_tmp = files_list[idx].split('/')[-1].split('_')[-1].split('.')[0]
file_tmp = files_list[idx].split('/')[-1]
experiment_vars_fname[idx,0] = exp_tmp # experiment
experiment_vars_fname[idx,1] = var_tmp # variables
experiment_vars_fname[idx,2] =file_tmp # filename
print (exp_tmp,var_tmp)
df_var = pd.read_csv(in_path + file_tmp, header =None)
if exp_tmp not in experiments :
experiments .append(exp_tmp) # list of experiments
if var_tmp not in variables:
variables .append(var_tmp) # list of variables
if exp_tmp not in dict_exp_vars : # building dictionary
dict_exp_vars[exp_tmp] = {}
dict_exp_vars[exp_tmp] [var_tmp] = pd.DataFrame(df_var.iloc[:,0].str.split('/').tolist()
, columns = ['activity_id','institution_id','source_id','experiment_id','member_id','table_id','variable_id','grid_label','version','filenames'])
else:
dict_exp_vars[exp_tmp] [var_tmp] = pd.DataFrame(df_var.iloc[:,0].str.split('/').tolist()
, columns = ['activity_id','institution_id','source_id','experiment_id','member_id','table_id','variable_id','grid_label','version','filenames'])
# making only one dataframe with all the information
# --------------------------------------------------
df_column_names = ['activity_id','institution_id','source_id','experiment_id','member_id','table_id','variable_id','grid_label','version','filenames']
df_data = pd.DataFrame(columns=df_column_names)
for exp in experiments:
for var in variables:
df_data = df_data.append(dict_exp_vars[exp][var], ignore_index=True)
# df_data is the complete dataframe of all the data that i need for this project
# ------------------------------------------------------------------------------
# Saving the data frame:
# ----------------------
# Removing the files that are about to be saved, sometimes the previous file is not being updated
try:
os. remove (in_path + 'df_data_selected.csv')
os. remove (web_path + 'df_data_selected.csv')
os. remove (in_path + 'df_data_selected.xlsx')
os. remove (web_path + 'df_data_selected.xlsx')
except:
print ("The files does not exist: \n%s\n%s"%(in_path + 'df_data_selected.csv',web_path + 'df_data_selected.csv'))
df_data.to_csv (in_path + 'df_data_selected.csv', index=False)
df_data.to_csv (web_path + 'df_data_selected.csv', index=False)
df_data.to_excel(in_path + 'df_data_selected.xlsx', index=False)
df_data.to_excel(web_path + 'df_data_selected.xlsx', index=False)
print ("The shape of the dataframe %s"%str(df_data.shape))
# Storing the unique enteries in arrays
# ------------------------------------
# Source_id or Model Names:
# -------------------------
source_ids = np.unique(df_data['source_id'])
# Variable names:
# --------------
variable_ids = np.unique(df_data['variable_id'])
# Ensemble of models/source ids for every experiment and variable:
# ---------------------------------------------------------------
dict_ensemble = {}
for model in source_ids:
dict_ensemble [model] = {}
for exp in experiments:
dict_ensemble [model][exp] = {}
for var in variable_ids:
filters = (df_data['source_id'] == model) & (df_data['experiment_id'] == exp) & (df_data ['variable_id'] ==var)
dict_ensemble [model][exp][var] = np.array(df_data[filters]['member_id'])
# Dictionary hierarcy : 1. source_id, 2. experiment_id , 3. variable_id
# df_data is the complete dataframe of all the data that i need for this project
# ------------------------------------------------------------------------------
# ploting GPP from all the model and their ensembles
# --------------------------------------------------
#filters = (
# Extracting the variables from models of different experiments:
# --------------------------------------------------------------
dict_data = {}
dict_data ['source'] = {}
dict_data ['source']['experiment'] = {}
dict_data ['source']['experiment']['member'] = {}
dict_data ['source']['experiment']['member']['variable'] = {}
dict_data ['source']['experiment']['member']['variable']['version'] = {}
dict_data ['source']['experiment']['member']['variable']['version']['filename'] = {}
source_list = ['ScenarioMIP', 'CMIP']
#food for thought
#filters = (temp['experiment_id']=='ssp585') & (temp['institution_id'] == 'MIROC')
""" Summary of what data we have:
"""
columns = ['institution_id','source_id','experiment_id','variable_id','grid_label']
All = ['activity_id','institution_id','source_id','experiment_id','member_id','table_id','variable_id','grid_label','version','filenames']
columns_drop = ['activity_id','member_id','table_id','version','filenames']
# Creating an empty dataframe for summarizing the data sources
# ------------------------------------------------------------
df_brief = df_data.head()
df_brief = df_brief.drop(columns=columns_drop)
df_brief = df_brief.drop([0,1,2,3,4])
df_brief = pd.DataFrame(df_brief)
df_brief2 = df_brief.drop(columns=['grid_label'])
"""
for ins in np.unique(df_data['institution_id']):
for source in np.unique(df_data['source_id']):
for exp in np.unique(df_data['experiment_id']):
for var in np.unique(df_data['variable_id']):
df_brief2 = df_brief2.append(pd.DataFrame([[ins,source,exp,var]],columns = ['institution_id', 'source_id', 'experiment_id', 'variable_id']), ignore_index=True)
for gr in np.unique(df_data['grid_label']):
df_brief = df_brief.append(pd.DataFrame([[ins,source,exp,var,gr]],columns = ['institution_id', 'source_id', 'experiment_id', 'variable_id', 'grid_label']), ignore_index=True)
"""
i = 0
while i < len(df_data)-1:
ins_1 = df_data.iloc[i+1].institution_id
source_1 = df_data.iloc[i+1].source_id
exp_1 = df_data.iloc[i+1].experiment_id
var_1 = df_data.iloc[i+1].variable_id
gn_1 = df_data.iloc[i+1].grid_label
ins = df_data.iloc[i].institution_id
source = df_data.iloc[i].source_id
exp = df_data.iloc[i].experiment_id
var = df_data.iloc[i].variable_id
gn = df_data.iloc[i].grid_label
#print (i,source)
i = i+1
print (i,"|", ins, ins_1,"|",source,source_1,"|",exp,exp_1,"|",var,var_1 ,"|" ,gn,gn_1)
if (ins != ins_1) or (source !=source_1) or (exp!=exp_1) or (var!=var_1) or (gn!=gn_1):
df_brief = df_brief.append(pd.DataFrame([[ins,source,exp,var,gn]],columns = ['institution_id', 'source_id', 'experiment_id', 'variable_id', 'grid_label']), ignore_index=True)
print (i,source)
# Removing the files that are about to be saved, sometimes the previous file is not being updated
try:
os. remove (in_path + 'df_df_data_summary_variables_grids.csv')
os. remove (web_path + 'df_data_summary_variables_grids.csv')
except:
print ("The files does not exist: \n%s\n%s"%(in_path + 'df_data_summary_variables_grids.csv',web_path + 'df_data_summary_variables_grids.csv'))
df_brief.to_csv(in_path + 'df_data_summary_variables_grids.csv',index=False)
df_brief.to_csv(web_path + 'df_data_summary_variables_grids.csv',index=False)
| 2.609375 | 3 |
language/python/snippets/ipc/ipc_pipe_named.py | bigfoolliu/liu_aistuff | 1 | 12758901 | <filename>language/python/snippets/ipc/ipc_pipe_named.py<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# author: bigfoolliu
"""
使用FIFO(命名管道)来实现任意两个进程间的通信
特点:
1. 和无名管道一样,半双工
2. 每个FIFO管道都与一个路径名相关联,类似一个文件
3. 进程间通过读写FIFO来通信
4. 这种方式对子进程的生命周期管理并不方便,不建议在python中使用
"""
import os
import time
PIPE_NAME = 'pipe'
def child_process():
"""子进程, 负责写"""
pipe_out = os.open(PIPE_NAME, os.O_WRONLY) # 只写模式
count = 0
while count < 10:
print(f'write {count}')
os.write(pipe_out, f'number:{count}'.encode('utf-8')) # 非阻塞的
count += 1
time.sleep(1)
def parent_process():
"""父进程,负责读"""
# pipe_in = os.open(PIPE_NAME, 'r')
fd = os.open(PIPE_NAME, os.O_RDONLY) # 只写模式, 获取一个file descriptor
while True:
line = os.read(fd, 20) # 每次读20个字节,读出后FIFO管道中会马上清楚数据
parent_id = os.getpid()
cur_time = time.time()
if line:
print(f'parent {parent_id} get line {line} at {cur_time}')
else:
print(f'parent {parent_id} get None')
time.sleep(1)
def main():
if not os.path.exists(PIPE_NAME):
os.mkfifo(PIPE_NAME)
p_id = os.fork()
if p_id:
parent_process()
else:
child_process()
if __name__ == "__main__":
main()
| 2.9375 | 3 |
Object.py | innovationgarage/epimp-brain | 0 | 12758902 | <reponame>innovationgarage/epimp-brain
from datetime import datetime
import time
from math import *
import numpy as np
import math_tools as mtools
class D2(object):
"D2 laptop:86 -2424 -1024 86 -1024 86 186 86 -1024"
"D2 id x1 y1 x2 y2 x3 y3 x4 y4 extra"
def __init__(self, line):
self.timestamp = time.time() #sec
self._type = line[1].split(':')[0]
self.prob = float(line[1].split(':')[1])
self.x_1 = float(line[3])
self.y_1 = float(line[4])
self.x_2 = float(line[5])
self.y_2 = float(line[6])
self.x_3 = float(line[7])
self.y_3 = float(line[8])
self.x_4 = float(line[8])
self.y_4 = float(line[9])
self.properties = {
'_type': self._type,
'x_1': self.x_1,
'y_1': self.y_1,
'x_2': self.x_2,
'y_2': self.y_2,
'x_3': self.x_3,
'y_3': self.y_3,
'x_4': self.x_4,
'y_4': self.y_4,
'prob': self.prob,
'timestamp': self.timestamp
}
def getProperties(self):
props = {
'x_1': self.x_1,
'y_1': self.y_1,
'x_2': self.x_2,
'y_2': self.y_2,
'x_3': self.x_3,
'y_3': self.y_3,
'x_4': self.x_4,
'y_4': self.y_4,
'prob': self.prob,
'timestamp': self.timestamp
}
return props
class T2(object):
"T2 600 350"
"T2 x y"
def __init__(self, line):
self.timestamp = time.time() #sec
self.x = float(line[1])
self.y = float(line[2])
self.properties = {
'x': self.x,
'y': self.y,
'timestamp': self.timestamp
}
def getProperties(self):
props = {
'x': self.x,
'y': self.y,
'timestamp': self.timestamp
}
return props
| 2.46875 | 2 |
attic/testsuite/chunker.py | sherbang/attic | 1 | 12758903 | <gh_stars>1-10
from attic.chunker import chunkify, buzhash, buzhash_update
from attic.testsuite import AtticTestCase
from io import BytesIO
class ChunkerTestCase(AtticTestCase):
def test_chunkify(self):
data = b'0' * 1024 * 1024 * 15 + b'Y'
parts = [bytes(c) for c in chunkify(BytesIO(data), 2, 0x3, 2, 0)]
self.assert_equal(len(parts), 2)
self.assert_equal(b''.join(parts), data)
self.assert_equal([bytes(c) for c in chunkify(BytesIO(b''), 2, 0x3, 2, 0)], [])
self.assert_equal([bytes(c) for c in chunkify(BytesIO(b'foobarboobaz' * 3), 2, 0x3, 2, 0)], [b'fooba', b'rboobaz', b'fooba', b'rboobaz', b'fooba', b'rboobaz'])
self.assert_equal([bytes(c) for c in chunkify(BytesIO(b'foobarboobaz' * 3), 2, 0x3, 2, 1)], [b'fo', b'obarb', b'oob', b'azf', b'oobarb', b'oob', b'azf', b'oobarb', b'oobaz'])
self.assert_equal([bytes(c) for c in chunkify(BytesIO(b'foobarboobaz' * 3), 2, 0x3, 2, 2)], [b'foob', b'ar', b'boobazfoob', b'ar', b'boobazfoob', b'ar', b'boobaz'])
self.assert_equal([bytes(c) for c in chunkify(BytesIO(b'foobarboobaz' * 3), 3, 0x3, 3, 0)], [b'foobarboobaz' * 3])
self.assert_equal([bytes(c) for c in chunkify(BytesIO(b'foobarboobaz' * 3), 3, 0x3, 3, 1)], [b'foobar', b'boo', b'bazfo', b'obar', b'boo', b'bazfo', b'obar', b'boobaz'])
self.assert_equal([bytes(c) for c in chunkify(BytesIO(b'foobarboobaz' * 3), 3, 0x3, 3, 2)], [b'foo', b'barboobaz', b'foo', b'barboobaz', b'foo', b'barboobaz'])
self.assert_equal([bytes(c) for c in chunkify(BytesIO(b'foobarboobaz' * 3), 3, 0x3, 4, 0)], [b'foobarboobaz' * 3])
self.assert_equal([bytes(c) for c in chunkify(BytesIO(b'foobarboobaz' * 3), 3, 0x3, 4, 1)], [b'foobar', b'boobazfo', b'obar', b'boobazfo', b'obar', b'boobaz'])
self.assert_equal([bytes(c) for c in chunkify(BytesIO(b'foobarboobaz' * 3), 3, 0x3, 4, 2)], [b'foob', b'arboobaz', b'foob', b'arboobaz', b'foob', b'arboobaz'])
def test_buzhash(self):
self.assert_equal(buzhash(b'abcdefghijklmnop', 0), 3795437769)
self.assert_equal(buzhash(b'abcdefghijklmnop', 1), 3795400502)
self.assert_equal(buzhash(b'abcdefghijklmnop', 1), buzhash_update(buzhash(b'Xabcdefghijklmno', 1), ord('X'), ord('p'), 16, 1))
| 2.265625 | 2 |
scripts/pfam_parser.py | sandragodinhosilva/FAW-snakemake | 1 | 12758904 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
###############################################################################
import argparse
import sys
parser=argparse.ArgumentParser(
description='''Parse pfam file''')
__file__ = "pfam_parser.py"
__author__ = '<NAME> (<EMAIL>)'
__version__ = '0.8'
__date__ = 'December 3rd, 2020'
parser.add_argument('inputFile',
help='Full path to the input directory where all files are')
# Execute parse_args()
args = parser.parse_args()
# import standard Python modules
import os
import re
###############################################################################
file = sys.argv[1]
filename = os.path.basename(file)
filename = filename.replace("_tblout.txt","")
output_dir = os.path.dirname(file)
out = os.path.join(output_dir, filename + "_tblout_pfam.txt")
protein2hit_dict = {}
protein2bit_dict = {}
dic = {}
with open(file, 'r') as f:
i=0
lines = f.readlines()
for line in lines:
line = line.rstrip() # This removes the whitespace at the end of the line
if line.startswith("#"): # We only want to analyze lines with HMMER matches, so we can pass on all the lines that start with a #
pass
else:
newline = re.sub("\s+", "\t", line) # Now we can replace the whitespace in the lines with tabs, which are easier to work with.
tabs = newline.split("\t") # And now we can create a list by splitting each line into pieces based on where the tabs are.
hit = tabs[3]
i +=1
query = tabs[0] # The first item in the line is the query protein. We can assign the variable "query" to it.
bit_score = tabs[5] # The fifth item is the bit score. We can assign the variable "bit_score" to it.
dic[i]= query
protein2bit_dict[i] = float(bit_score)
protein2hit_dict[i] = hit
with open(out, "w") as outputfile:
outputfile.write("Query\tHit\tScore\n")
for proteins in protein2hit_dict:
outputfile.write(dic[proteins] + "\t" + protein2hit_dict[proteins] + "\t" + str(protein2bit_dict[proteins]) +"\n")
outputfile.close()
print("File " + str(out) + " was created.")
f.close() | 3.0625 | 3 |
app/server/calculator/Calculator/StatsCalculator.py | Nidhikokande/601_final_project | 0 | 12758905 | <reponame>Nidhikokande/601_final_project
from .Calculator import Calculator
import statistics
from .helper import Helper
class StatsCalculator(Calculator):
@Helper.validateListInput
def mean(self,lst):
'''
mean
:param lst:
:return: mean of list
'''
return statistics.mean(lst)
@Helper.validateListInput
def median(self,lst):
'''
median
:param lst:
:return: median of list
'''
return statistics.median(lst)
@Helper.validateListInput
def mode(self,lst):
'''
mode
:param lst:
:return: mode of list
'''
return statistics.mode(lst)
@Helper.validateListInput
def stdev(self,lst):
'''
Standard Deviation
:param lst:
:return:Standard Deviation of list
'''
return statistics.pstdev(lst)
@Helper.validateListInput
def variance(self,lst):
'''
Variance
:param lst:
:return: Variance of List
'''
return statistics.variance(lst)
@Helper.validateListInput
def z_score(self, x, lst):
'''
Z score
:param x:
:param lst:
:return: Z score of list
'''
return (x - self.mean(lst))/self.stdev(lst)
| 2.875 | 3 |
build_corpus.py | AotY/Pytorch-Word2vec | 0 | 12758906 | <reponame>AotY/Pytorch-Word2vec<gh_stars>0
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright © 2019 LeonTao
#
# Distributed under terms of the MIT license.
"""
Covert questions data to learning embedding format.
"""
import argparse
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument('--question_', type=str, default='./data/', help="data directory path")
parser.add_argument('--corpus_path', type=str, default='./data/corpus.txt', help="corpus path for building vocab")
args = parser.parse_args()
corpus_file = open(args.corpus_path, 'w', encoding='utf-8')
with open(args.questions_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.rstrip()
sub = line.split('SPLIT')
query = sub[2]
response = sub[3]
corpus_file.write('%s\n' % query)
corpus_file.write('%s\n' % response)
corpus_file.close()
| 2.75 | 3 |
plot.py | wtingda/blockchain | 2 | 12758907 | import numpy as np
import matplotlib.pyplot as plt
def plot_price_history(hist):
''' plot price history '''
plt.plot(hist, '-')
plt.xlabel("time steps"); plt.ylabel("price")
plt.title("price history")
plt.show()
def plot_price_std(arr):
''' plot std of price history over simulations'''
plt.plot(arr, '-')
plt.xlabel("time steps"); plt.ylabel("std of price")
plt.title("standard deviation of daily prices")
plt.show()
def plot_wealth_dist(total):
total = {"user": total[0], "miner": total[1], "speculator": total[2]}
total = dict(sorted(total.items(), key=lambda x:x[1]))
plt.pie(list(total.values()), labels=list(total.keys()) )
plt.title("wealth distribution post simulation")
plt.show()
def plot_hash_power_prop(prop):
''' plot miner proportion '''
plt.pie(prop)
plt.title("proportions of miner hash power")
plt.show()
price_hist = np.load("price_hist.npy")
hash_power = np.load("hash_power.npy")
wealth_dist = np.load("wealth_dist.npy")
plot_price_history(np.mean(price_hist, axis=0))
plot_price_std(np.std(price_hist, axis=0))
plot_wealth_dist(np.mean(wealth_dist, axis=0))
# # plot_hash_power_prop(np.mean(hash_power, axis=0))
keep = 20
arr = np.zeros(keep)
for hp in hash_power:
arr = np.add(hp[:keep], arr)
arr /= 100
plot_hash_power_prop(arr)
| 3.234375 | 3 |
SandBox/MaxWang/test.py | JohnHZheng/Athena2019 | 0 | 12758908 | <filename>SandBox/MaxWang/test.py
#!/usr/bin/env micropython
from ev3dev2.motor import LargeMotor, OUTPUT_C, OUTPUT_B, follow_for_ms
from ev3dev2.motor import SpeedDPS, SpeedRPM, SpeedRPS, SpeedDPM, MoveTank, MoveSteering, SpeedPercent
from time import sleep
from ev3dev2.sensor.lego import ColorSensor
<<<<<<< HEAD
#LeftWheel = LargeMotor(OUTPUT_B)
#RightWheel = LargeMotor(OUTPUT_C)
TankPair = MoveTank(OUTPUT_C, OUTPUT_B, motor_class=LargeMotor)
LeftSensor = ColorSensor(INPUT_1)
RightSensor = ColorSensor(INPUT_4)
N = 0
#while N<4:
#TankPair.on_for_seconds(SpeedDPS(-400),SpeedDPS(-400), 1)
#TankPair.on_for_degrees(SpeedDPS(-250),SpeedDPS(250),115,True,True)
# N = N + 1
=======
>>>>>>> 8c5914dbd59ea2113de00c06e2cc0afc2281b666
| 2.40625 | 2 |
Backend/oeda/rtxlib/changeproviders/MQTTPublisherChangeProvider.py | iliasger/OEDA | 2 | 12758909 | <reponame>iliasger/OEDA
import logging
from colorama import Fore
from paho import mqtt
from paho.mqtt import publish
from oeda.log import *
from oeda.rtxlib.changeproviders.ChangeProvider import ChangeProvider
from flask import json
class MQTTPublisherChangeProvider(ChangeProvider):
""" implements a change providers using MQTT """
def __init__(self, wf, cp):
# load config
try:
self.queue = []
self.host = cp["host"]
self.port = cp["port"]
self.topic = cp["topic"]
self.serializer = cp["serializer"]
info("> MQTTPublisher | " + self.serializer + " | URI: " + self.host + ":" + self.port + " | Topic: " +
self.topic, Fore.CYAN)
except KeyError:
error("mqttPublisher definition was incomplete")
exit(1)
# look at the serializer
if self.serializer == "JSON":
self.serialize_function = lambda v: json.dumps(v).encode('utf-8')
else:
error("serializer not implemented")
exit(1)
def applyChange(self, message):
""" publish a single mqtt message to the server """
publish.single(self.topic, payload=self.serialize_function(message), qos=0,
retain=False, hostname=self.host, port=self.port)
| 2.4375 | 2 |
com.toroam.appengine/views_community.py | yafraorg/yafra-toroam | 0 | 12758910 | <reponame>yafraorg/yafra-toroam
import os
import webapp2
import logging
import utils
from google.appengine.ext import db
from google.appengine.api import users
from gpx import SaveToDB
from models import GPXheader, ToroamUsers
from usermgmt import UserMgmt
from views import BaseHandler
class Search(BaseHandler):
def post(self):
searchtxt = self.request.get('search')
logging.info('toroam.com: post - search %s', searchtxt)
return webapp2.redirect('/search/%s' % searchtxt)
def get(self, searchtxt):
#TODO create init search page if GET - search tracks with terms in comments, desc, names
template_values = {}
template_values.update({'results': searchtxt})
query = db.Query(GPXheader)
query.order('-gpxdate')
query.filter('keywords =', searchtxt )
query.filter('status =', utils.status_ok)
query.filter('privacy =', utils.privacy_public)
gpxheadings = query.run(limit = 5, offset = 0)
template_values.update({'gpxheadings': gpxheadings})
logging.info('toroam.com: get - search %s', searchtxt)
self.render_template('search.html', template_values)
class Community(BaseHandler):
def get(self):
query = db.Query(GPXheader)
query.order('-gpxdate')
query.filter('status =', utils.status_ok)
query.filter('privacy =', utils.privacy_public)
gpxheadings = query.run(limit = 5, offset = 0)
self.render_template('community.html', {'gpxheadings': gpxheadings})
class MyTracks(BaseHandler):
def get(self):
cuser = UserMgmt()
userkey = cuser.getid()
if not userkey:
logging.info('toroam.com: not logged in')
raise Exception("Not logged in")
user = ToroamUsers.get(userkey)
self.render_template('mytracks.html', {'gpxheadings': user.gpxuser})
| 2.28125 | 2 |
ok_images/contrib/versatileimagefield/versatileimagefield.py | Katerinka28/ok-images | 7 | 12758911 | """
You need to import this file in any app in versatileimagefield.py file
to make it visible for versatileimagefield
"""
from PIL import Image
from PIL.WebPImagePlugin import WebPImageFile
from io import BytesIO
from versatileimagefield.datastructures.sizedimage import (
MalformedSizedImageKey,
settings,
cache,
VERSATILEIMAGEFIELD_CACHE_LENGTH,
SizedImageInstance
)
from versatileimagefield.registry import versatileimagefield_registry
from versatileimagefield.utils import JPEG_QUAL as QUAL
from versatileimagefield.versatileimagefield import (
FilteredImage,
CroppedImage as DefaultCroppedImage,
ThumbnailImage as DefaultThumbnailImage
)
from .utils import (
get_resized_path,
get_filtered_path
)
__all__ = (
'WebPMixin',
'ToWebPImage',
'WebPThumbnailImage',
'WebPCroppedImage',
'CroppedImage',
'ThumbnailImage'
)
class WebPMixin:
ext = "webp"
def __getitem__(self, key):
"""
Return a URL to an image sized according to key.
Arguments:
* `key`: A string in the following format
'[width-in-pixels]x[height-in-pixels]'
Example: '400x400'
"""
try:
width, height = [int(i) for i in key.split('x')]
except (KeyError, ValueError):
raise MalformedSizedImageKey(
"%s keys must be in the following format: "
"'`width`x`height`' where both `width` and `height` are "
"integers." % self.__class__.__name__
)
if not self.path_to_image and getattr(
settings, 'VERSATILEIMAGEFIELD_USE_PLACEHOLDIT', False
):
resized_url = "http://placehold.it/%dx%d" % (width, height)
resized_storage_path = resized_url
else:
resized_storage_path = get_resized_path(
path_to_image=self.path_to_image,
ext=self.ext,
width=width,
height=height,
filename_key=self.get_filename_key(),
storage=self.storage
)
try:
resized_url = self.storage.url(resized_storage_path)
except Exception:
resized_url = None
if self.create_on_demand is True:
if cache.get(resized_url) and resized_url is not None:
# The sized path exists in the cache so the image already
# exists. So we `pass` to skip directly to the return
# statement
pass
else:
if resized_storage_path and not self.storage.exists(
resized_storage_path
):
self.create_resized_image(
path_to_image=self.path_to_image,
save_path_on_storage=resized_storage_path,
width=width,
height=height
)
resized_url = self.storage.url(resized_storage_path)
# Setting a super-long cache for a resized image (30 Days)
cache.set(resized_url, 1, VERSATILEIMAGEFIELD_CACHE_LENGTH)
return SizedImageInstance(
name=resized_storage_path,
url=resized_url,
storage=self.storage
)
def retrieve_image(self, path_to_image):
image = self.storage.open(path_to_image, "rb")
file_ext = self.ext
image_format, mime_type = "WEBP", "image/webp"
return Image.open(image), file_ext, image_format, mime_type
def save_image(self, imagefile, save_path, file_ext, mime_type):
path, ext = save_path.rsplit('.')
save_path = f'{path}.{self.ext}'
return super().save_image(imagefile, save_path, file_ext, mime_type)
def preprocess_WEBP(self, image, **kwargs):
return image, {"quality": QUAL, "lossless": False, "icc_profile": ""}
class ToWebPImage(WebPMixin, FilteredImage):
"""
object.image.filters.to_webp.url
"""
def __init__(self, path_to_image, storage, create_on_demand, filename_key):
super().__init__(
path_to_image, storage, create_on_demand, filename_key
)
self.name = get_filtered_path(
path_to_image=self.path_to_image,
ext=self.ext,
filename_key=filename_key,
storage=storage
)
self.url = storage.url(self.name)
def process_image(self, image, image_format, save_kwargs):
imagefile = BytesIO()
image, save_kwargs = self.preprocess(image, "WEBP")
image.save(imagefile, **save_kwargs)
return imagefile
class WebPThumbnailImage(WebPMixin, DefaultThumbnailImage):
"""
object.image.thumbnail_webp['512x511'].url
"""
filename_key = "thumbnail_webp"
def process_image(self, image, image_format, save_kwargs, width, height):
imagefile = BytesIO()
image.thumbnail(
(width, height),
Image.ANTIALIAS
)
image, save_kwargs = self.preprocess(image, "WEBP")
image.save(
imagefile,
**save_kwargs
)
return imagefile
class WebPCroppedImage(WebPMixin, DefaultCroppedImage):
"""
object.image.crop_webp['512x511'].url
"""
filename_key = "crop_webp"
filename_key_regex = r'crop_webp-c[0-9-]+__[0-9-]+'
def process_image(self, image, image_format, save_kwargs,
width, height):
imagefile = BytesIO()
palette = image.getpalette()
cropped_image = self.crop_on_centerpoint(
image,
width,
height,
self.ppoi
)
# Using ImageOps.fit on GIFs can introduce issues with their palette
# Solution derived from: http://stackoverflow.com/a/4905209/1149774
if image_format == 'GIF':
cropped_image.putpalette(palette)
cropped_image, save_kwargs = self.preprocess(cropped_image, "WEBP")
cropped_image.save(
imagefile,
**save_kwargs
)
return imagefile
class CroppedImage(DefaultCroppedImage):
def process_image(self, image, image_format, save_kwargs,
width, height):
"""
Return a BytesIO instance of `image` cropped to `width` and `height`.
Cropping will first reduce an image down to its longest side
and then crop inwards centered on the Primary Point of Interest
(as specified by `self.ppoi`)
"""
imagefile = BytesIO()
palette = image.getpalette()
cropped_image = self.crop_on_centerpoint(
image,
width,
height,
self.ppoi
)
# Using ImageOps.fit on GIFs can introduce issues with their palette
# Solution derived from: http://stackoverflow.com/a/4905209/1149774
if image_format == 'GIF':
cropped_image.putpalette(palette)
if isinstance(image, WebPImageFile):
save_kwargs['format'] = 'JPEG'
cropped_image.save(
imagefile,
**save_kwargs
)
return imagefile
class ThumbnailImage(DefaultThumbnailImage):
def process_image(self, image, image_format, save_kwargs,
width, height):
"""
Return a BytesIO instance of `image` that fits in a bounding box.
Bounding box dimensions are `width`x`height`.
"""
imagefile = BytesIO()
image.thumbnail(
(width, height),
Image.ANTIALIAS
)
if isinstance(image, WebPImageFile):
save_kwargs['format'] = 'JPEG'
image.save(
imagefile,
**save_kwargs
)
return imagefile
versatileimagefield_registry.register_filter('to_webp', ToWebPImage)
versatileimagefield_registry.register_sizer("thumbnail_webp", WebPThumbnailImage)
versatileimagefield_registry.register_sizer("crop_webp", WebPCroppedImage)
versatileimagefield_registry.unregister_sizer('crop')
versatileimagefield_registry.unregister_sizer('thumbnail')
versatileimagefield_registry.register_sizer('crop', CroppedImage)
versatileimagefield_registry.register_sizer('thumbnail', ThumbnailImage)
| 2.78125 | 3 |
dnd/models/library_entities/skills.py | Saevon/webdnd | 4 | 12758912 | <filename>dnd/models/library_entities/skills.py
from django.db import models
from dnd.constants.database import STND_CHAR_LIMIT, STND_ID_CHAR_LIMIT
from dnd.constants.skills import SKILL_SAMPLE_TYPES
from dnd.models.abstract import AbstractDnDModel
from dnd.models.library_entities.abstract import AbstractDnDEntity
class Skill(AbstractDnDEntity):
"""
A skill.
"""
short_description = models.TextField(blank=False)
has_subtypes = models.BooleanField(blank=False)
class SkillSubType(AbstractDnDEntity):
"""
a subtype for a skill
e.g. Nature for the Knowledge skill
"""
class Meta(AbstractDnDEntity.Meta):
unique_together = (
('title','skill'),
)
skill = models.ForeignKey(
Skill,
related_name='Skill.subtypes',
blank=False,
null=False)
# (if True) always put this in the skill tree
main = models.BooleanField(blank=False)
class SkillSample(AbstractDnDModel):
"""
A sample skill DC or modifier that shows DC
"""
skill = models.ForeignKey(
Skill,
related_name='skill_samples',
blank=False)
difficulty_class = models.IntegerField(blank=False, null=False)
description = models.TextField(blank=True)
type = models.CharField(
max_length=STND_ID_CHAR_LIMIT,
choices=SKILL_SAMPLE_TYPES,
default='dc',
blank=True)
class Language(AbstractDnDEntity):
"""docstring for Language"""
alphabet = models.CharField(
max_length=STND_CHAR_LIMIT,
blank=False)
| 2.546875 | 3 |
demo/helper.py | scottx611x/napkin | 190 | 12758913 | <reponame>scottx611x/napkin
import inspect
import os
import napkin
def generate_markdown_file(title, src_path):
name, _ = os.path.splitext(os.path.basename(src_path))
src_file = name + '.py'
napkin.generate(output_format='plantuml_png', output_dir='../images')
text = """# {title}
Following examples are auto-generated by
[demo/{src_file}](demo/{src_file})
""".format(title=title, src_file=src_file)
for diagram in napkin._collected_seq_diagrams:
text += """## {name}

```python
{src}
```
""".format(src=inspect.getsource(diagram.sd_func),
name=diagram.name,
image_file=diagram.name.replace(' ', '%20') + '.png')
md_file = '../{}.md'.format(name.upper())
with open(md_file, 'wt') as f:
f.write(text)
print('MD file generated : {}'.format(md_file))
| 2.78125 | 3 |
bin/tlinks.py | ddierschow/cab984 | 0 | 12758914 | #!/usr/local/bin/python
import os, sys, urllib2
import basics
import config
import mbdata
import useful
# -- links
# main entry point for toylinks
@basics.web_page
def links(pif):
pif.render.print_html()
ostr = ''
pif.render.hierarchy_append('/', 'Home')
pif.render.hierarchy_append('/cgi-bin/links.cgi', 'Toy Links')
if pif.form.get_int('id'):
link = pif.dbh.fetch_link_line(pif.form.get_int('id'))
if link:
return single_link(pif, link[0])
pif.render.set_page_extra(pif.render.reset_button_js)
if pif.page_id != 'links.toylinks':
pif.render.hierarchy_append('/cgi-bin/links.cgi?page=%s' % pif.page_id[6:], pif.render.title)
return link_page(pif)
def single_link(pif, link):
if link['page_id'] != 'links.toylinks':
pif.render.hierarchy_append('/cgi-bin/links.cgi?page=%s' % pif.page_id[6:], pif.render.title)
pif.render.hierarchy_append('', 'Specific Link')
extra = ''
if pif.is_allowed('m'): # pragma: no cover
extra = '- ' + pif.render.format_button("edit", "edlinks.cgi?id=%s" % link['id'])
return pif.render.format_template('tlink.html', link=link, extra=extra)
def link_page(pif):
section_id = useful.clean_id(pif.form.get_str('section'))
if section_id:
sections = pif.dbh.fetch_sections({'page_id': pif.page_id, 'id': section_id})
else:
sections = pif.dbh.fetch_sections({'page_id': pif.page_id})
linklines = pif.dbh.fetch_link_lines(pif.page_id, not_flags=pif.dbh.FLAG_ITEM_HIDDEN)
linklines = pif.dbh.depref('link_line', linklines)
linklines.sort(key=lambda x: int(x['display_order']))
sect_links = dict()
for link in linklines:
sect_links.setdefault(link['section_id'], list())
sect_links[link['section_id']].append(link)
llineup = {'id': pif.page_id, 'name': '', 'section': []}
for lsec in sections:
lsec['anchor'] = lsec['id']
lsec['columns'] = 1
lran = {'id': 'range', 'name': '', 'entry': list(generate_links(pif, sect_links.get(lsec['id'], [])))}
lsec['range'] = [lran]
llineup['section'].append(lsec)
return pif.render.format_template('tlinks.html', llineup=llineup, sections=sections,
flags=pif.render.format_shown_flags())
def generate_links(pif, links):
for ent in links:
if ent['link_type'] != 'x' and not (ent['flags'] & pif.dbh.FLAG_LINK_LINE_NEW):
yield make_link(pif, ent)
def make_link(pif, ent):
lnk = dict()
lnk['text'], lnk['desc'] = format_entry(pif, ent)
lnk['indent'] = (ent['flags'] & pif.dbh.FLAG_LINK_LINE_INDENTED) != 0
lnk['id'] = ent['id']
cmd = ent['link_type']
lnk['comment'] = True
if pif.is_allowed('m'): # pragma: no cover
if ent.get('last_status') == 'exc':
cmd = 'b'
lnk['linktype'] = cmd # linktypes.get(cmd)
lnk['large'] = ent['flags'] & pif.dbh.FLAG_LINK_LINE_FORMAT_LARGE
return lnk
def format_entry(pif, ent):
dictFlag = {
'': ('o', pif.render.format_image_art('wheel.gif', also={'class': 'dlm'})),
'Reciprocal': ('Reciprocal', '<i class="fas fa-refresh dlm"></i>'),
'PayPal': ('Accepts PayPal', '<i class="fab fa-paypal dlm"></i>'),
}
is_large = ent['flags'] & pif.dbh.FLAG_LINK_LINE_FORMAT_LARGE
url = ent['url']
tag = ent['name']
dlms = []
if ent['country']:
dlms.append(ent['country'])
cmt = ent['description']
if ent['flags'] & pif.dbh.FLAG_LINK_LINE_RECIPROCAL:
dlms.append('Reciprocal')
if ent['flags'] & pif.dbh.FLAG_LINK_LINE_PAYPAL:
dlms.append('PayPal')
ostr = pif.render.format_link(url, tag) + ' '
if not dlms and not cmt:
pass
elif not dlms:
# add name
if not is_large:
ostr += format_delimiter(pif, dictFlag[''])
else:
also = {'class': 'dlm'}
for dlm in dlms:
flag = pif.render.show_flag(dlm)
if flag:
ostr += useful.img_src(flag[1], also={'class': 'dlm'})
else:
ostr += format_delimiter(pif, dictFlag[dlm])
# if cmt and is_large:
# ostr += '<br>' + '<br>'.join(cmt.split('|'))
# else:
# ostr += cmt
return ostr, cmt.split('|')
def format_delimiter(pif, dlm):
return dlm[1] + ' '
also = {'class': 'dlm', 'alt': '[' + dlm[0] + ']'}
pif.render.comment('format_delimiter', dlm)
return useful.img_src(dlm[1], also=also) + ' '
# -- addlink
def read_config(pif, showall=False):
listCats = []
listIndices = []
listRejectCats = []
dictCats = {}
allpages = pif.dbh.fetch_pages("id like 'links.%'")
if pif.is_allowed('a'): # and pif.render.is_beta: # pragma: no cover
showpage = {x['page_info.id']: 1 for x in allpages}
else:
showpage = {x['page_info.id']: not (x['page_info.flags'] & pif.dbh.FLAG_PAGE_INFO_HIDDEN) for x in allpages}
sections = pif.dbh.fetch_sections(where="page_id like 'links.%'")
for section in sections:
page_name = section['section.page_id'].split('.', 1)[1]
if page_name not in listIndices:
listIndices.append(page_name)
if showpage[section['section.page_id']]:
listCats.append((section['section.id'], section['section.name']))
if section['section.page_id'] in ['links.rejects', 'links.trash']:
listRejectCats.append((section['section.id'], section['section.name']))
dictCats[section['section.id']] = page_name
return listCats, listIndices, dictCats, listRejectCats
def read_blacklist(pif):
blacklist = pif.dbh.fetch_blacklist()
reject = [x['blacklist.target'] for x in filter(lambda x: x['blacklist.reason'] == 'site', blacklist)]
banned = [x['blacklist.target'] for x in filter(lambda x: x['blacklist.reason'] == 'ip', blacklist)]
return reject, banned
def is_blacklisted(url, rejects):
for reject in rejects:
if url.find(reject) >= 0:
return reject
return ''
def fix_url(url):
url = url.lower()
if url[-1] == '/':
url = url[:-1]
return url
def read_all_links(pif):
highest_disp_order = {}
all_links = []
for section in pif.dbh.fetch_sections(where="page_id like 'links%'"):
highest_disp_order.setdefault((section['section.page_id'], section['section.id']), 0)
for link in pif.dbh.fetch_link_lines():
link = pif.dbh.depref('link_line', link)
highest_disp_order.setdefault((link['page_id'], link['section_id']), 0)
if link['display_order'] > highest_disp_order[(link['page_id'], link['section_id'])]:
highest_disp_order[(link['page_id'], link['section_id'])] = link['display_order']
if link['url'] and link['link_type'] in 'lsx':
all_links.append(fix_url(link['url']))
return all_links, highest_disp_order
def add_new_link(pif, dictCats, listRejects):
reasons = []
ostr = "<hr>"
#'columns': ['id', 'page_id', 'section_id', 'display_order', 'flags', 'link_type', 'country', 'url', 'name', 'description', 'note'],
all_links, highest_disp_order = read_all_links(pif)
link = {}
try:
link['url'] = url = pif.form.get_str('url', '')
link['section_id'] = pif.form.get_str('cat', '')
link['page_id'] = 'links.' + dictCats[link['section_id']]
link['display_order'] = highest_disp_order[(link.get('page_id', 'unknown'), link.get('section_id', 'unknown'))] + 1
except:
reasons.extend([
"Some information was missing.",
"The request was badly formed.",
"The request was not made by the supplied web form."])
link['flags'] = pif.dbh.FLAG_LINK_LINE_NEW
if pif.is_allowed('a'): # pragma: no cover
link['flags'] = 0
link['link_type'] = 'l'
link['name'] = pif.form.get_str('name', '')
link['country'] = pif.form.get_str('country', '')
link['description'] = pif.form.get_str('desc', '')
link['note'] = pif.remote_addr + '/' + pif.remote_host + '. ' + pif.form.get_str('note', '')
url = fix_url(url)
for reject in listRejects:
if url.find(reject) >= 0:
reasons.append("The URL is on a banned list.")
if url in all_links and not pif.form.get('dup'):
reasons.append("The site has already been submitted.")
if url.find('://') < 0:
reasons.append("The URL is not properly formed.")
if (link['description'].find('<') >= 0) or (link['name'].find('<') >= 0):
reasons.extend("The description text or the notes text contains HTML.")
if (link['description'].find('\n') >= 0) or (link['name'].find('\n') >= 0):
reasons.extend([
"The request was badly formed.",
"The request was not made by the supplied web form."])
if link['country'] == 'US':
link['country'] = ''
#str = 'l|' + url + '|' + tag + '|' + dlm + '|' + cmt
if reasons:
ostr += "<b>The site submitted is being rejected. Sorry.</b><br>\n"
ostr += "Possible reason%s:<ul>\n" % useful.plural(reasons)
for reason in reasons:
ostr += "<li>" + reason + '\n'
ostr += "</ul>If your submission has to do with sex, drugs, hotel reservations or ringtones, please go away and never come back. Seriously.<p>\n"
ostr += "Feel free to use your browser's BACK button to fix your entry, then resubmit; or,\n"
ostr += "if you think this rejection was in error, you can send email. Just don't hope for too much.\n"
open(os.path.join(config.LOG_ROOT, 'trash.log'), 'a+').write(str(link) + '\n')
else:
link['id'] = pif.dbh.insert_link_line(link)
ostr += "The following has been added to the list:<br><ul>\n"
ent = format_entry(pif, link)
ostr += ent[0] + ' '
ostr += '<br>' .join(ent[1])
ostr += '\n</ul>\n'
check_link(pif, link)
return ostr
# main routine for addlink
@basics.web_page
def add_page(pif):
pif.render.print_html()
pif.render.set_page_extra(pif.render.reset_button_js)
rejected, blacklist = read_blacklist(pif)
for l in blacklist:
if os.environ.get('REMOTE_ADDR') == l:
raise useful.SimpleError("You have been banned from using this service because of previous abuses. If you have a problem with this, contact us via email, but don't hope for much.")
listCats, listIndices, dictCats, listRejectCats = read_config(pif)
lnk = add_new_link(pif, dictCats, rejected) if pif.form.get_str('url') else ''
context = {
'categories': listCats,
'countries': mbdata.countries,
'link': lnk,
}
return pif.render.format_template('tlinkadd.html', **context)
# -- edlinks
link_type_names = [
('b', 'bad'),
('f', 'folder'),
('g', 'graphic'),
('l', 'normal'),
('n', 'none'),
('p', 'button'),
('s', 'star'),
('t', 'text'),
('x', 'trash'),
]
flag_check_names = [
('01', 'New'),
('02', 'Recip'),
('04', 'Paypal'),
('08', 'Indent'),
('10', 'Large'),
('20', 'NoVer'),
('40', 'Assoc'),
]
def edit_single(pif):
listCats, listIndices, dictCats, listRejectCats = read_config(pif, True)
#listCats.append(('single', 'single'))
table_info = pif.dbh.table_info['link_line']
link_id = pif.form.get_str('id')
if pif.form.get_bool('save'):
all_links, highest_disp_order = read_all_links(pif)
nlink = {x: pif.form.get_str(x) for x in table_info['columns']}
nlink['flags'] = 0
if pif.form.get_str('section_id') == 'single':
pass
else:
nlink['page_id'] = 'links.' + dictCats.get(pif.form.get_str('section_id', ''), pif.form.get_str('section_id', ''))
nlink['display_order'] = highest_disp_order.get((nlink['page_id'], nlink['section_id']), 0) + 1
formflags = pif.form.get_list('flags')
for flag in formflags:
nlink['flags'] += int(flag, 16)
if nlink['flags'] & pif.dbh.FLAG_LINK_LINE_NOT_VERIFIABLE:
nlink['last_status'] = 'NoVer'
pif.dbh.update_link_line(nlink)
pif.render.message('<br>record saved<br>')
elif pif.form.get_bool('test'):
link = pif.dbh.fetch_link_line(link_id)[0]
check_link(pif, link) # don't care about blacklist here, just actual check
elif pif.form.get_bool('delete'):
pif.dbh.delete_link_line(link_id)
return "<br>deleted<br>"
elif pif.form.get_bool('reject'):
nlink = {x: pif.form.get_str(x, '') for x in table_info['columns']}
nlink['page_id'] = 'links.rejects'
nlink['display_order'] = 1
nlink['section_id'] = pif.form.get_str('rejects_sec')
nlink['flags'] = 0
pif.dbh.update_link_line(nlink)
pif.render.message('<br>record rejected<br>')
elif pif.form.get_bool('add'):
link_id = (#pif.dbh.insert_link_line({'page_id': pif.form.get_str('page_id', ''), 'section_id': pif.form.get_str('sec')})
# pif.form.set_val('id',
pif.dbh.insert_link_line({'page_id': pif.form.get_str('page_id'), 'country': '', 'flags': 1, 'link_type': 'l'}))
links = pif.dbh.fetch_link_lines(where="id='%s'" % link_id)
if not links:
raise useful.SimpleError("That ID wasn't found.")
link = links[0]
asslinks = [(0, '')] + [(x['link_line.id'], x['link_line.name']) for x in pif.dbh.fetch_link_lines(flags=pif.dbh.FLAG_LINK_LINE_ASSOCIABLE)]
descs = pif.dbh.describe_dict('link_line')
header = '<form>' + pif.create_token()
header += '<input type="hidden" name="o_id" value="%s">\n' % link['link_line.id']
entries = []
for col in table_info['columns']:
col_long = 'link_line.' + col
coltype = descs.get(col).get('type', 'unknown')
value = useful.printablize(link.get(col_long, ''))
entries.append({'text': col})
# entries.append({'text': '<a href="%s">%s</a>' % (link.get(col_long, ''), link.get(col_long, ''))
# if col == 'url' else link[col_long]})
entries.append({'text': '<a href="%s">%s</a>' % (value, value) if col == 'url' else value})
if col in table_info.get('readonly', []):
cell = ' <input type="hidden" name="%s" value="%s">' % (col, value)
# elif col == 'page_id':
# cell = ' <input type="hidden" name="%s" value="%s">' % (col, value)
elif col == 'section_id':
cell = pif.render.format_select('section_id', listCats, selected=value, blank='Please choose one from the list')
elif col == 'flags':
cell = pif.render.format_checkbox("flags", flag_check_names, useful.bit_list(link[col_long]))
elif col == 'country':
cell = pif.render.format_select_country('country', value)
elif col == 'link_type':
cell = pif.render.format_select(col, link_type_names, selected=value)
elif col == 'associated_link':
cell = pif.render.format_select(col, asslinks, selected=value)
elif coltype.startswith('varchar('):
colwidth = int(coltype[8:-1])
cell = pif.render.format_text_input(col, colwidth, 64, value=value)
elif coltype.startswith('int('):
if link[col_long] is None:
value = 0
colwidth = int(coltype[4:-1])
cell = pif.render.format_text_input(col, colwidth, value=value)
else:
cell = coltype
entries.append({'text': cell})
footer = ''.join([
pif.render.format_button_input("save"),
pif.render.format_button_input("delete"),
pif.render.format_button_input("test"),
pif.render.format_button_input("reject"),
pif.render.format_select('rejects_sec', listRejectCats, blank='Please choose one from the list'),
'</form>',
pif.render.format_button("edit", link=pif.dbh.get_editor_link('link_line', {'id': link_id})),
])
llineup = {'id': 'tl', 'name': 'Edit Link', 'columns': 3, 'widthauto': True,
'section': [{'id': 's', 'name': '',
'range': [{'entry': entries}]}],
'header': header, 'footer': footer,
}
pif.render.format_matrix_for_template(llineup)
return pif.render.format_template('simplematrix.html', llineup=llineup)
def edit_multiple(pif):
table_info = pif.dbh.table_info['link_line']
page_id = ''
sec_id = pif.form.get_str('sec', '')
if pif.form.get_bool('as'):
linklines = pif.dbh.fetch_link_lines(flags=pif.dbh.FLAG_LINK_LINE_ASSOCIABLE, order="display_order")
elif sec_id == 'new':
linklines = pif.dbh.fetch_link_lines(flags=pif.dbh.FLAG_LINK_LINE_NEW)
elif sec_id == 'nonf':
linklines = pif.dbh.fetch_link_lines(where="last_status is not Null and last_status != 'H200' and link_type in ('l','s') and page_id != 'links.rejects' and page_id != 'links.trash' and (flags & 32)=0")
elif pif.form.get_str('stat'):
if pif.form.get_str('stat') == 'None':
linklines = pif.dbh.fetch_link_lines(where="last_status is NULL", order='id')
else:
linklines = pif.dbh.fetch_link_lines(where="last_status='%s'" % pif.form.get_str('stat'), order='id')
elif sec_id:
linklines = pif.dbh.fetch_link_lines(where="section_id='%s'" % sec_id, order="display_order")
section = pif.dbh.fetch_section(sec_id)
page_id = section['page_id']
else:
linklines = pif.dbh.fetch_link_lines(where="page_id='%s'" % pif.form.get_str('page'), order="display_order")
pif.render.message(len(linklines), 'lines')
entries = [{'text': col} for col in table_info['columns']]
for link in linklines:
pif.dbh.depref('link_line', link)
for col in table_info['columns']:
val = link.get(col, '')
if col == 'id':
entries.append({'text': '<a href="?id=' + str(val) + '">' + str(val) + '</a>'})
elif col == 'url':
entries.append({'text': '<a href="%s">%s</a>' % (val, val)})
else:
entries.append({'text': useful.printablize(val)})
footer = pif.render.format_button("add", "edlinks.cgi?page_id=%s&sec=%s&add=1" % (page_id, sec_id))
llineup = {'id': 'tl', 'name': 'Edit Link', 'columns': len(table_info['columns']),
'section': [{'id': 's', 'name': '',
'range': [{'entry': entries}]}],
'footer': footer,
}
pif.render.format_matrix_for_template(llineup)
return pif.render.format_template('simplematrix.html', llineup=llineup)
def edit_choose(pif):
reasons = {
'None': '(Untested)',
'H200': '(Good)',
'H302': '(Moved)',
'H400': '(Bad Request)',
'H403': '(Forbidden)',
'H404': '(Not Found)',
'H410': '(Gone)',
'H418': '(Teapot)',
'H429': '(Too Many Reqs)',
'H500': '(Internal Error)',
'H502': '(Bad Gateway)',
'H503': '(Unavailable)',
'NoVer': '(Ignored)',
'U1': '(Bad Cert)',
'U60': '(Timeout)',
'U61': '(Conn Refused)',
'U65': '(No Route)',
'U8': '(No DNS)',
'exc': '(Exception)',
}
link_statuses = pif.dbh.fetch_link_statuses()
link_statuses = {str(x['last_status']): x['count(*)'] for x in link_statuses}
#'link_statuses': ["%s (%s)" % (x, reasons.get(x, 'Unknown')) for x in sorted(pif.dbh.fetch_link_statuses())],
context = {
'sections': sorted(pif.dbh.fetch_sections(where="page_id like 'links%'"),
key=lambda x: x['section.page_id']),
'blacklist': pif.dbh.get_editor_link('blacklist', {}),
'link_statuses': link_statuses,
'reasons': reasons,
}
return pif.render.format_template('tlinkcats.html', **context)
# main entry point for links editor
@basics.web_page
def edit_links(pif):
pif.render.print_html()
if pif.form.get_str('id') or pif.form.get_bool('add'):
return edit_single(pif)
elif pif.form.has_any(['as', 'sec', 'stat', 'page']):
return edit_multiple(pif)
else:
return edit_choose(pif)
# -- link checker
def check_links(pif, sections=None, reject=[], retest=False, visible=False):
pif.dbh.set_verbose(True)
for sec in sections if sections else [None]:
pif.dbh.clear_link_line_statuses(section=sec, where='last_status != "H200"' if retest else '')
links = pif.dbh.fetch_link_lines(section=sec, where='last_status is NULL' if retest else '', order='id')
for link in links:
if not retest or link['link_line.page_id'] != 'links.rejects':
check_link(pif, link, reject, visible=visible)
def check_link(pif, link, rejects=[], visible=False):
if link:
print link, visible
link = pif.dbh.depref('link_line', link)
lstatus = 'unset'
if visible and (link['flags'] & pif.dbh.FLAG_LINK_LINE_HIDDEN or link['page_id'] == 'links.rejects'):
return
print link['id'], link['url'],
if link['flags'] & pif.dbh.FLAG_LINK_LINE_NOT_VERIFIABLE or link['link_type'] in 'tfpn':
lstatus = 'NoVer'
elif link['link_type'] in 'bglsx':
# ret = is_blacklisted(link['url'], rejects)
# if ret:
# print link['id'], link['section_id'], link['url'], "BLACKLISTED", ret
#pif.dbh.dbi.remove('link_line', 'id=%s' % link['id'])
lurl = link['url']
if lurl.startswith('/'):
lurl = 'http://www.bamca.org' + lurl
try:
url = urllib2.urlopen(urllib2.Request(lurl, headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:42.0) Gecko/20100101 Firefox/42.0'}))
lstatus = 'H' + str(url.code)
except urllib2.HTTPError as (c):
print 'http error:', c.code
lstatus = 'H' + str(c.code)
except urllib2.URLError as (c):
print 'url error:', c.reason
lstatus = 'U' + str(c.reason[0])
except:
lstatus = 'exc'
print lstatus
if link.get('last_status') != lstatus:
pif.dbh.update_link_line({'id': str(link['id']), 'last_status': lstatus})
# ---- ----------------------------------------------------
def check_blacklisted_links(pif, sections=None):
reject, banned = links.read_blacklist(pif)
pif.dbh.set_verbose(True)
for sec in sections if sections else [None]:
for link in pif.dbh.fetch_link_lines(section=sec):
link = pif.dbh.depref('link_line', link)
if link['link_type'] in 'blsxg':
ret = is_blacklisted(link['url'], reject)
if ret:
print link['id'], link['section_id'], link['url'], "BLACKLISTED", ret
#pif.dbh.dbi.remove('link_line', 'id=%s' % link['id'])
def update_links(pif):
links = pif.dbh.fetch_link_lines()
good_ids = [x for x in range(100, 3000)]
bad_ids = []
for lnk in links:
id = lnk['link_line.id']
if id in good_ids:
good_ids.remove(id)
elif id < 100 and not lnk['link_line.flags'] & 64:
bad_ids.append(id)
bad_ids.sort()
for ids in zip(good_ids, bad_ids):
print "update link_line set id=%d where id=%d;" % ids
def cl_check_links(pif, *filelist):
retest = visible = False
if 'retest' in filelist:
retest = True
filelist.remove('retest')
if 'visible' in filelist:
visible = True
filelist.remove('visible')
check_links(pif, filelist, retest=retest, visible=visible)
cmds = [
('u', update_links, "update"),
('c', cl_check_links, "check"),
('b', check_blacklisted_links, 'check blacklist'),
]
@basics.command_line
def commands(pif):
useful.cmd_proc(pif, './tlinks.py', cmds)
# ---- ----------------------------------------------------
if __name__ == '__main__': # pragma: no cover
commands(dbedit='')
| 2.21875 | 2 |
rise_of_machines/compare_impurity.py | fpdevil/rise_of_machines | 0 | 12758915 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Description
-----------
Comparision of the Impurity criteria based on splitting
Authors: <NAME>
:module:decision_tree.py
:created: Sat Feb 16 23:58:45 CST 2019
:copyright: Copyright © 2019 <NAME>
:license: MIT
:moduleauthor:<NAME> <<EMAIL>>
"""
import numpy as np
import matplotlib.pyplot as plt
def gini(p):
"""Calculate the Gini impurity
:param p: The probability or proportion of the samples belonging to a class
:returns: Gini Index
:rtype: float
"""
return p * (1 - p) + (1 - p) * (1 - (1 - p))
def entropy(p):
"""Calculate the Entropy for a sample
:param p: The probability or proportion of the samples belonging to a class
:returns: The Entropy
:rtype: float
"""
return -p * np.log2(p) - (1 - p) * np.log2(1 - p)
def error(p):
"""Calculate the classification error which is a measure of impurity
:param p: The probability or proportion of the samples belonging to a class
:returns: Classification error
:rtype: float
"""
return 1 - np.max([p, 1 - p])
# list with step of 0.01 from 0 to 1.0
x = np.arange(0.0, 1.0, 0.01)
# calculate the impurity measures based on the defined
# functions for a set of values
ent = [entropy(p) if p != 0 else None for p in x]
sc_ent = [e * 0.5 if e else None for e in ent]
err = [error(i) for i in x]
gini_idx = gini(x)
idx = [ent, sc_ent, gini_idx, err]
labels = [
'Entropy', 'Entropy (scaled)', 'Gini Impurity', 'Misclassification Error'
]
symbols = ['-', ':', '--', '-.']
colorl = ['blue', 'green', 'red', 'cyan', 'pink']
fig = plt.figure()
ax = plt.subplot(111)
for i, lab, s, c in zip(idx, labels, symbols, colorl):
line = ax.plot(x, i, label=lab, linestyle=s, lw=2, color=c)
ax.legend(
loc='upper center',
bbox_to_anchor=(0.5, 1.15),
ncol=5,
fancybox=True,
shadow=False)
ax.axhline(y=0.5, linewidth=1, color='y', linestyle='--')
ax.axhline(y=1.0, linewidth=1, color='y', linestyle='--')
ax.annotate(
'Entropy',
xy=(0.19, 0.7),
xycoords='data',
xytext=(0.3, 0.7),
arrowprops=dict(arrowstyle="simple", fc="0.6", ec="none"),
textcoords='data')
ax.annotate(
'Entropy (scaled)',
xy=(0.2, 0.36),
xycoords='data',
xytext=(0.2, 0.55),
arrowprops=dict(arrowstyle="simple", fc="0.6", ec="none"),
textcoords='data')
ax.annotate(
'Gini Impurity',
xy=(0.82, 0.29),
xycoords='data',
xytext=(0.7, 0.55),
arrowprops=dict(arrowstyle="simple", fc="0.6", ec="none"),
textcoords='data')
ax.annotate(
'Misclassification Error',
xy=(0.7, 0.3),
xycoords='data',
xytext=(0.4, 0.2),
arrowprops=dict(arrowstyle="simple", fc="0.6", ec="none"),
textcoords='data')
plt.ylim([0, 1.1])
plt.xlabel('p[i=1]')
plt.ylabel('Impurity Index')
plt.show()
| 3.203125 | 3 |
flight_plotter.py | jfox13-nd/ned-drone-collison-avoidance | 0 | 12758916 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Graph Utilities for plotting longitude and latitude of flight results
"""
from pymavlink import mavutil
import math
import re
import numpy as np
import matplotlib.pyplot as plt
class Location:
def __init__(self,lat=0.0,lon=0.0):
""" Create a new point at the origin """
self.lat = lat
self.lon = lon
class CoordinateLogger:
def __init__(self):
self.lat_array = []
self.lon_array = []
def add_data(self,latitude,longitude):
"""
:rtype: object
"""
self.lat_array.append([])
self.lon_array.append([])
self.lat_array[-1].append(latitude)
self.lon_array[-1].append(longitude)
##############################################################################################
# Provides Graph Plotting functionality
##############################################################################################
class GraphPlotter:
def __init__(self,lat1_array,lon1_array,lat2_array,lon2_array,xlabel="",ylabel="",title=""):
self.lat1_array=lat1_array
self.lon1_array=lon1_array
self.lat2_array=lat2_array
self.lon2_array=lon2_array
self.xlegend = xlabel
self.ylegend=ylabel
self.title=title
self.marker_lat = 0
self.marker_lon = 0
def add_marker(self,markerlat,markerlon):
self.marker_lat = markerlat
self.marker_lon = markerlon
def scatter_plot(self):
plt.plot(self.lat1_array,self.lon1_array,linewidth=10,color='gray')
plt.plot(self.lat2_array,self.lon2_array,linewidth=4,color='blue')
plt.xlabel(self.xlegend)
plt.ylabel(self.ylegend)
plt.title(self.title)
if self.marker_lat != 0:
plt.plot([self.marker_lat], [self.marker_lon], "ro", markersize=22)
plt.show()
| 3.25 | 3 |
Scrapy/my_movie_crawlspider/my_movie_crawlspider/pipelines.py | pengchenyu111/SpiderLearning | 3 | 12758917 | <filename>Scrapy/my_movie_crawlspider/my_movie_crawlspider/pipelines.py
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from my_movie_crawlspider.items import MaoyanMoviesItem
import pymysql
class MyMovieCrawlspiderPipeline:
def open_spider(self, spider):
self.connection = pymysql.connect(
host=spider.settings['MYSQL_HOST'],
user=spider.settings['MYSQL_USERNAME'],
password=spider.settings['MYSQL_PASSWORD'],
db=spider.settings['MYSQL_DB'],
charset=spider.settings['MYSQL_DB_CHARSET']
)
def process_item(self, item, spider):
if isinstance(item, MaoyanMoviesItem):
try:
self.save_movie_meta(item)
except Exception as e:
print(e)
def close_spider(self, spider):
self.connection.close()
def save_movie_meta(self, item):
keys = item.keys()
values = tuple(item.values())
fields = ','.join(keys)
temp = ','.join(['%s'] * len(keys))
sql = 'INSERT INTO my_movie (%s) VALUES (%s)' % (fields, temp)
self.connection.cursor().execute(sql, tuple(i.strip() for i in values))
return self.connection.commit()
| 2.671875 | 3 |
scorers/classification/binary/false_positive_count.py | theaiacademy/driverlessai-recipes | 0 | 12758918 | """Optimizes for specific Confusion Matrix Values: `FP` - only recommended if threshold is adjusted"""
import typing
import numpy as np
from h2oaicore.metrics import CustomScorer
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix
class CMFalsePositive(CustomScorer):
_threshold = 0.5 # Example only, should be adjusted based on domain knowledge and other experiments
_description = "Reduce false positive count"
_binary = True
_maximize = False
_perfect_score = 0
_display_name = "FP"
def score(self,
actual: np.array,
predicted: np.array,
sample_weight: typing.Optional[np.array] = None,
labels: typing.Optional[np.array] = None) -> float:
lb = LabelEncoder()
labels = lb.fit_transform(labels)
actual = lb.transform(actual)
predicted = (predicted > self._threshold)
cm = confusion_matrix(actual, predicted, sample_weight=sample_weight, labels=labels)
tn, fp, fn, tp = cm.ravel()
return fp
| 2.765625 | 3 |
backend/balance/migrations/0003_auto_20190827_1917.py | Kodeworks/budsjetteringssystem | 6 | 12758919 | <filename>backend/balance/migrations/0003_auto_20190827_1917.py
# Generated by Django 2.2.4 on 2019-08-27 17:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('balance', '0002_auto_20190627_1503'),
]
operations = [
migrations.AlterField(
model_name='bankbalance',
name='date',
field=models.DateField(help_text='The date the balance was recorded on'),
),
migrations.AlterField(
model_name='bankbalance',
name='money',
field=models.PositiveIntegerField(help_text='The amount of money available'),
),
]
| 1.390625 | 1 |
dthm4kaiako/poet/views.py | taskmaker1/dthm4kaiako | 3 | 12758920 | <gh_stars>1-10
"""Views for POET application."""
from ipware import get_client_ip
from json import dumps
from django.forms import ValidationError
from django.urls import reverse, reverse_lazy
from django.shortcuts import render, redirect
from django.core.exceptions import ObjectDoesNotExist
from django.contrib import messages
from django.db.models import Q, Count
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.views.generic import (
ListView,
DetailView,
TemplateView,
)
from django.views.generic.edit import FormView
from poet.forms import (
POETSurveySelectorForm,
POETSurveyForm,
POETContactForm,
)
from poet.models import (
Submission,
ProgressOutcome,
Resource,
)
from poet.utils import select_resources_for_poet_form
from poet import settings as poet_settings
class HomeView(FormView):
"""View for POET homepage."""
template_name = 'poet/home.html'
form_class = POETSurveySelectorForm
success_url = reverse_lazy('poet:form')
def get_context_data(self, **kwargs):
"""Provide the context data for the POET home view.
Returns:
Dictionary of context data.
"""
context = super().get_context_data(**kwargs)
context['active_survey'] = self.request.session.get('poet_form_resources', False)
return context
def form_valid(self, form):
"""Send email if form is valid."""
resources_pks = select_resources_for_poet_form(form.cleaned_data['po_group'])
self.request.session['poet_form_resources'] = resources_pks
self.request.session['poet_form_new'] = True
self.request.session['poet_form_active'] = True
return super().form_valid(form)
def poet_form(request):
"""View for POET form."""
# Create form view with resources in forms
context = dict()
template = 'poet/form.html'
if request.method == 'POST' and request.session.get('poet_form_active', False):
form = POETSurveyForm()
# Check whether POST data is valid, if not return to home
try:
form.add_fields_from_request(request)
except (ObjectDoesNotExist, ValidationError):
messages.error(request, 'Invalid form data. Returning to POET home.')
# Delete session data
request.session.pop('poet_form_resources', None)
request.session.pop('poet_form_active', None)
return redirect(reverse('poet:home'))
context['form'] = form
# Valid form but missing data
try:
data = form.validate(request)
except ValidationError as e:
messages.error(request, '{}.'.format(e.message))
else:
# Save submissions to database
client_ip, is_routable = get_client_ip(request)
for submission_data in data:
submission_data['ip_address'] = client_ip
Submission.objects.create(**submission_data)
# Delete session data
request.session.pop('poet_form_resources', None)
request.session.pop('poet_form_active', None)
# Render results template
template = 'poet/result.html'
form.update_form_with_summary()
# if a GET (or any other method) we'll create a blank form
else:
# Get resources for form
resource_pks = request.session.get('poet_form_resources', None)
if not resource_pks:
return redirect(reverse('poet:home'))
# Check if new form
new_form = request.session.pop('poet_form_new', False)
if not new_form:
messages.info(request, 'Loaded incomplete survey resources.')
resources = Resource.objects.filter(pk__in=resource_pks)
form = POETSurveyForm()
form.add_fields_from_resources(resources)
context['form'] = form
context['progress_outcomes'] = ProgressOutcome.objects.exclude(learning_area__exact='')
context['progress_outcomes_json'] = dumps(list(ProgressOutcome.objects.values()))
return render(request, template, context)
class StatisticsListView(PermissionRequiredMixin, ListView):
"""View for POET statistics list page."""
model = ProgressOutcome
context_object_name = 'resources'
template_name = 'poet/statistics.html'
permission_required = 'poet.view_submission'
def get_queryset(self):
"""Get queryset for page.
Returns:
Progress outcomes with resources.
"""
return Resource.objects.all().order_by(
'target_progress_outcome',
'title',
).annotate(submission_count=Count('submissions')).prefetch_related(
'submissions',
'target_progress_outcome',
)
def get_context_data(self, **kwargs):
"""Provide the context data for the event homepage view.
Returns:
Dictionary of context data.
"""
context = super().get_context_data(**kwargs)
for resource in self.object_list:
if resource.submission_count > 0:
# Add top 3 selected progress outcomes
# TODO: Perform as one query, possibly when requesting queryset
resource.crowdsourced_pos = ProgressOutcome.objects.filter(
submissions__resource=resource
).annotate(
submission_count=Count('submissions')
).order_by('-submission_count')[:3]
total_submission_count = Submission.objects.filter(resource=resource).count()
for crowdsourced_po in resource.crowdsourced_pos:
crowdsourced_po.percentage = (crowdsourced_po.submission_count / total_submission_count) * 100
if resource.target_progress_outcome != crowdsourced_po:
crowdsourced_po.resource_target = True
context['total_submissions'] = Submission.objects.count()
context['submission_threshold'] = poet_settings.MINIMUM_SUBMISSIONS_PER_RESOURCE
return context
class StatisticsDetailsView(PermissionRequiredMixin, DetailView):
"""View for POET statistics details page."""
model = Resource
context_object_name = 'resource'
template_name = 'poet/statistics_detail.html'
permission_required = 'poet.view_submission'
def get_context_data(self, **kwargs):
"""Provide the context data for the event homepage view.
Returns:
Dictionary of context data.
"""
context = super().get_context_data(**kwargs)
context['statistics'] = True
total_submissions = Submission.objects.filter(resource=self.object).count()
progress_outcomes = {x.code: x for x in ProgressOutcome.objects.annotate(
count=Count('submissions', filter=Q(submissions__resource=self.object)))}
for progress_outcome_code, progress_outcome in progress_outcomes.items():
if total_submissions:
progress_outcome.percentage = progress_outcome.count / total_submissions
else:
progress_outcome.percentage = 0
context['total_submissions'] = total_submissions
context['progress_outcomes'] = progress_outcomes
context['progress_outcome_widget'] = 'poet/widgets/progress-outcome-radio-statistics.html'
context['feedback_submissions'] = self.object.submissions.exclude(feedback__exact='')
return context
class AboutView(TemplateView):
"""View for website about page."""
template_name = 'poet/about.html'
class ContactView(FormView):
"""View for website contact page."""
template_name = 'poet/contact.html'
form_class = POETContactForm
def form_valid(self, form):
"""Send email if form is valid."""
form.send_email()
messages.success(self.request, 'Your email has been sent.')
return redirect(reverse('poet:home'))
| 2.046875 | 2 |
tests/framework/test_loaders.py | mrbermell/ffai | 2 | 12758921 | <filename>tests/framework/test_loaders.py
import pytest
from botbowl.core.load import *
def test_rule_loader():
rulesetBB2016 = load_rule_set("BB2016")
rulesetLRB5Experimental = load_rule_set("LRB5-Experimental")
assert rulesetBB2016.name == "BB2016"
assert rulesetLRB5Experimental.name == "LRB5-Experimental"
assert len(rulesetBB2016.races) > 1
assert len(rulesetLRB5Experimental.races) > 1
assert len(rulesetBB2016.races[0].roles) > 1
assert len(rulesetLRB5Experimental.races[0].roles) > 1
assert rulesetBB2016.races[0].roles[0].ma > 0
assert rulesetLRB5Experimental.races[0].roles[0].ma > 0
def test_config_loader():
config_gym11 = load_config("gym-11")
config_bot_bowl_ii = load_config("bot-bowl-ii")
assert config_gym11.name == "botbowl"
assert config_bot_bowl_ii.name == "Bot Bowl II"
assert config_gym11.roster_size == 16
assert config_bot_bowl_ii.roster_size == 16
def test_team_loader():
rulesetBB2016 = load_rule_set("BB2016")
human_team_2016 = load_team_by_name("Human Team", rulesetBB2016)
assert len(human_team_2016.players) > 0
assert human_team_2016.players[0].get_ma() > 0
rulesetExperimental = load_rule_set("LRB5-Experimental")
human_team_exp = load_team_by_name("Human Team", rulesetExperimental)
assert len(human_team_exp.players) > 0
assert human_team_exp.players[0].get_ma() > 0
human_team_exp_2 = load_team_by_filename("human", rulesetExperimental)
assert human_team_exp_2.name == human_team_exp_2.name
# Teams are assigned new ids when loaded
assert human_team_exp_2.team_id != human_team_exp.team_id
assert human_team_2016.team_id != human_team_exp.team_id
def test_arena_loader():
arena = load_arena("ff-pitch-1")
arena2 = load_arena("ff-pitch-1.txt")
assert arena.height == arena2.height
pitch = load_arena("ff-pitch-11")
assert pitch.width == 26 + 2
assert pitch.height == 15 + 2
def test_formation_loader():
def_spread = load_formation("def_spread")
def_zone = load_formation("def_zone")
off_line = load_formation("off_line")
off_wedge = load_formation("off_wedge")
assert def_spread.name == "Spread"
assert def_zone.name == "Zone"
assert off_line.name == "Line"
assert off_wedge.name == "Wedge"
| 2.265625 | 2 |
pyhaystack/client/ops/vendor/niagara.py | lixs74/pyhaystack | 0 | 12758922 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Niagara AX/Niagara 4 operation implementations.
"""
import fysom
import re
from ....util import state
from ....util.asyncexc import AsynchronousException
from ...http.auth import BasicAuthenticationCredentials
from ...http.exceptions import HTTPStatusError
class NiagaraAXAuthenticateOperation(state.HaystackOperation):
"""
An implementation of the log-in procedure for Niagara AX. The procedure
is as follows:
1. Do a request of the log-in URL, without credentials. This sets session
cookies in the client. Response should be code 200.
2. Pick up the session cookie named 'niagara_session', submit this in
a GET request for the login URL with a number of other parameters.
Response should NOT include the word 'login'.
Future requests should include the basic authentication credentials.
"""
_LOGIN_RE = re.compile('login', re.IGNORECASE)
def __init__(self, session, retries=0):
"""
Attempt to log in to the Niagara AX server.
:param session: Haystack HTTP session object.
:param uri: Possibly partial URI relative to the server base address
to perform a query. No arguments shall be given here.
:param expect_format: Request that the grid be sent in the given format.
:param args: Dictionary of key-value pairs to be given as arguments.
:param multi_grid: Boolean indicating if we are to expect multiple
grids or not. If True, then the operation will
_always_ return a list, otherwise, it will _always_
return a single grid.
:param raw_response: Boolean indicating if we should try to parse the
result. If True, then we should just pass back the
raw HTTPResponse object.
:param retries: Number of retries permitted in case of failure.
"""
super(NiagaraAXAuthenticateOperation, self).__init__()
self._retries = retries
self._session = session
self._cookies = {}
self._auth = BasicAuthenticationCredentials(session._username,
session._password)
self._state_machine = fysom.Fysom(
initial='init', final='done',
events=[
# Event Current State New State
('get_new_session', 'init', 'newsession'),
('do_login', 'newsession', 'login'),
('login_done', 'login', 'done'),
('exception', '*', 'failed'),
('retry', 'failed', 'newsession'),
('abort', 'failed', 'done'),
], callbacks={
'onenternewsession': self._do_new_session,
'onenterlogin': self._do_login,
'onenterfailed': self._do_fail_retry,
'onenterdone': self._do_done,
})
def go(self):
"""
Start the request.
"""
# Are we logged in?
try:
self._state_machine.get_new_session()
except: # Catch all exceptions to pass to caller.
self._state_machine.exception(result=AsynchronousException())
def _do_new_session(self, event):
"""
Request the log-in cookie.
"""
try:
self._session._get('login', self._on_new_session,
cookies={}, headers={}, exclude_cookies=True,
exclude_headers=True, api=False)
except: # Catch all exceptions to pass to caller.
self._state_machine.exception(result=AsynchronousException())
def _on_new_session(self, response):
"""
Retrieve the log-in cookie.
"""
try:
if isinstance(response, AsynchronousException):
try:
response.reraise()
except HTTPStatusError as e:
if e.status == 404:
pass
else:
raise
self._cookies = response.cookies.copy()
self._state_machine.do_login()
except: # Catch all exceptions to pass to caller.
self._state_machine.exception(result=AsynchronousException())
def _do_login(self, event):
try:
self._session._post('login', self._on_login,
params={
'token':'',
'scheme':'cookieDigest',
'absPathBase':'/',
'content-type':'application/x-niagara-login-support',
'Referer':self._session._client.uri+'login/',
'accept':'text/zinc; charset=utf-8',
'cookiePostfix' : self._cookies['niagara_session'],
},
headers={}, cookies=self._cookies,
exclude_cookies=True, exclude_proxies=True,
api=False, auth=self._auth)
except: # Catch all exceptions to pass to caller.
self._state_machine.exception(result=AsynchronousException())
def _on_login(self, response):
"""
See if the login succeeded.
"""
try:
if isinstance(response, AsynchronousException):
try:
response.reraise()
except HTTPStatusError as e:
if e.status == 404:
pass
else:
raise
else:
if self._LOGIN_RE.match(response.text):
# No good.
raise IOError('Login failed')
self._state_machine.login_done(result=(self._auth, self._cookies))
except: # Catch all exceptions to pass to caller.
self._state_machine.exception(result=AsynchronousException())
def _do_fail_retry(self, event):
"""
Determine whether we retry or fail outright.
"""
if self._retries > 0:
self._retries -= 1
self._state_machine.retry()
else:
self._state_machine.abort(result=event.result)
def _do_done(self, event):
"""
Return the result from the state machine.
"""
self._done(event.result)
| 2.40625 | 2 |
vispy/visuals/transforms/chain.py | lcampagn/vispy | 1 | 12758923 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
from ..shaders import FunctionChain
from .base_transform import BaseTransform
from .linear import NullTransform
class ChainTransform(BaseTransform):
"""
BaseTransform subclass that performs a sequence of transformations in
order. Internally, this class uses shaders.FunctionChain to generate
its glsl_map and glsl_imap functions.
Arguments:
transforms : list of BaseTransform instances
See ``transforms`` property.
"""
glsl_map = None
glsl_imap = None
Linear = False
Orthogonal = False
NonScaling = False
Isometric = False
def __init__(self, *transforms):
super(ChainTransform, self).__init__()
# Set input transforms
trs = []
for tr in transforms:
if isinstance(tr, (tuple, list)):
trs.extend(tr)
else:
trs.append(tr)
self._transforms = trs
# ChainTransform does not have shader maps
self._shader_map = None
self._shader_imap = None
@property
def transforms(self):
""" The list of transform that make up the transform chain.
The order of transforms is given such that the last transform in the
list is the first to be invoked when mapping coordinates through
the chain.
For example, the following two mappings are equivalent::
# Map coordinates through individual transforms:
trans1 = STTransform(scale=(2, 3), translate=(0, 1))
trans2 = PolarTransform()
mapped = trans1.map(trans2.map(coords))
# Equivalent mapping through chain:
chain = ChainTransform([trans1, trans2])
mapped = chain.map(coords)
"""
return self._transforms
# @transforms.setter
# def transforms(self, tr):
# #if self._enabled:
# #raise RuntimeError("Shader is already enabled; cannot modify.")
# if not isinstance(tr, list):
# raise TypeError("Transform chain must be a list")
# self._transforms = tr
@property
def Linear(self):
b = True
for tr in self._transforms:
b &= tr.Linear
return b
@property
def Orthogonal(self):
b = True
for tr in self._transforms:
b &= tr.Orthogonal
return b
@property
def NonScaling(self):
b = True
for tr in self._transforms:
b &= tr.NonScaling
return b
@property
def Isometric(self):
b = True
for tr in self._transforms:
b &= tr.Isometric
return b
def map(self, coords):
"""Map coordinates
Parameters
----------
coords : array-like
Coordinates to map.
Returns
-------
coords : ndarray
Coordinates.
"""
for tr in reversed(self.transforms):
coords = tr.map(coords)
return coords
def imap(self, coords):
"""Inverse map coordinates
Parameters
----------
coords : array-like
Coordinates to inverse map.
Returns
-------
coords : ndarray
Coordinates.
"""
for tr in self.transforms:
coords = tr.imap(coords)
return coords
def shader_map(self):
if self._shader_map is None:
self._shader_map = self._make_shader_map(imap=False)
else:
for tr in self._transforms:
tr.shader_map() # force transform to update its shader
return self._shader_map
def shader_imap(self):
if self._shader_imap is None:
self._shader_imap = self._make_shader_map(imap=True)
else:
for tr in self._transforms:
tr.shader_imap() # force transform to update its shader
return self._shader_imap
def _make_shader_map(self, imap):
if bool(imap):
funcs = [tr.shader_imap() for tr in self.transforms]
else:
funcs = [tr.shader_map() for tr in reversed(self.transforms)]
name = "transform_%s_chain" % ('imap' if bool(imap) else 'map')
return FunctionChain(name, funcs)
def flat(self):
"""
Return a simplified chain by expanding any nested chains.
"""
transforms = self.transforms[:]
new_chain = []
while len(transforms) > 0:
tr = transforms.pop(0)
if isinstance(tr, ChainTransform):
transforms = tr.transforms[:] + transforms
else:
new_chain.append(tr)
return ChainTransform(new_chain)
def simplified(self):
"""
Return a simplified chain by joining adjacent transforms.
If the result is a single transform, return that transform.
"""
tr = self.flat()
if len(tr.transforms) == 0:
return NullTransform()
cont = True
tr = tr.transforms
while cont:
new_tr = [tr[0]]
cont = False
for t2 in tr[1:]:
t1 = new_tr[-1]
pr = t1 * t2
if not isinstance(pr, ChainTransform):
cont = True
new_tr.pop()
new_tr.append(pr)
else:
new_tr.append(t2)
tr = new_tr
if len(tr) == 1:
return tr[0]
else:
return ChainTransform(tr)
def append(self, tr):
"""
Add a new transform to the end of this chain.
Parameters
----------
tr : instance of Transform
The transform to use.
"""
self.transforms.append(tr)
self.update()
# Keep simple for now. Let's look at efficienty later
# I feel that this class should not decide when to compose transforms
# while len(self.transforms) > 0:
# pr = tr * self.transforms[-1]
# if isinstance(pr, ChainTransform):
# self.transforms.append(tr)
# break
# else:
# self.transforms.pop()
# tr = pr
# if len(self.transforms) == 0:
# self._transforms = [pr]
# break
def prepend(self, tr):
"""
Add a new transform to the beginning of this chain.
Parameters
----------
tr : instance of Transform
The transform to use.
"""
self.transforms.insert(0, tr)
self.update()
# Keep simple for now. Let's look at efficienty later
# while len(self.transforms) > 0:
# pr = self.transforms[0] * tr
# if isinstance(pr, ChainTransform):
# self.transforms.insert(0, tr)
# break
# else:
# self.transforms.pop(0)
# tr = pr
# if len(self.transforms) == 0:
# self._transforms = [pr]
# break
def __setitem__(self, index, tr):
self._transforms[index] = tr
if self._shader_map is not None:
self._shader_map[-(index+1)] = tr.shader_map()
if self._shader_imap is not None:
self._shader_imap[index] = tr.shader_imap()
self.update()
def __mul__(self, tr):
if isinstance(tr, ChainTransform):
trs = tr.transforms
else:
trs = [tr]
return ChainTransform(self.transforms+trs)
def __rmul__(self, tr):
if isinstance(tr, ChainTransform):
trs = tr.transforms
else:
trs = [tr]
return ChainTransform(trs+self.transforms)
def __str__(self):
names = [tr.__class__.__name__ for tr in self.transforms]
return "<ChainTransform [%s] at 0x%x>" % (", ".join(names), id(self))
def __repr__(self):
tr = ",\n ".join(map(repr, self.transforms))
return "<ChainTransform [%s] at 0x%x>" % (tr, id(self))
| 2.3125 | 2 |
tests/mixed/conftest.py | aoxiangflysky/onedata | 2 | 12758924 | """
Definitions of fixtures used in acceptance mixed tests.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2017 ACK CYFRONET AGH"
__license__ = "This software is released under the MIT license cited in " \
"LICENSE.txt"
pytest_plugins = "tests.gui.gui_conf"
| 0.859375 | 1 |
tests/unit/output/schema/__init__.py | jaebradley/draftkings_client | 111 | 12758925 | <gh_stars>100-1000
"""
Represents tests defined in the draft_kings.output.schema module.
Most tests center around serializing / deserializing output objects using the marshmallow library
"""
| 0.957031 | 1 |
Factories/DefaultFactory.py | SUMER9999dev/Cubes-Playground | 1 | 12758926 | <reponame>SUMER9999dev/Cubes-Playground
# import's
from Interfaces.IEvent import IEvent
from Interfaces.ICube import ICube
from Interfaces.IFactory import IFactory
from Interfaces.ICubeHandler import ICubeHandler
from Cubes.DefaultCube import DefaultCube
# factory
class DefaultFactory(IFactory):
def __init__(self, handler_base: ICubeHandler) -> None:
self.__handler_base: ICubeHandler = handler_base
def create_cube(self, event: IEvent) -> ICube:
cube = DefaultCube(100)
cube.handler = self.__handler_base(event, cube)
return cube
| 2.21875 | 2 |
backend/application/user/__init__.py | vitor-kato/ACMEVita-API | 0 | 12758927 | from flask import Blueprint
from . import auth, models, schemas
user_bp = Blueprint("user", __name__)
| 1.898438 | 2 |
pymatgen/io/cp2k/tests/test_outputs.py | exenGT/pymatgen | 1 | 12758928 | # Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
from pathlib import Path
from pymatgen.io.cp2k.outputs import Cp2kOutput
from pymatgen.util.testing import PymatgenTest
class SetTest(PymatgenTest):
def setUp(self):
self.TEST_FILES_DIR = Path.joinpath(self.TEST_FILES_DIR, "cp2k")
self.out = Cp2kOutput(Path.joinpath(self.TEST_FILES_DIR, "cp2k.out"), auto_load=True)
def test_files(self):
self.out.parse_files()
self.assertEqual(len(self.out.filenames["PDOS"]), 2)
def test(self):
self.assertEqual(self.out.spin_polarized, False)
self.assertEqual(self.out.completed, True)
self.assertEqual(self.out.num_warnings, [[1]])
self.assertEqual(self.out.run_type.upper(), "GEO_OPT")
if __name__ == "__main__":
unittest.main()
| 2.515625 | 3 |
illustration/entropytools.py | CSMMLab/Flowmachine | 0 | 12758929 | <filename>illustration/entropytools.py
"""
Script with functions for quadratures, moment basis for 1D-3D spatial dimensions
Author: <NAME>
Date: 16.03.21
"""
from numpy.polynomial.legendre import leggauss
import numpy as np
import tensorflow as tf
import scipy.optimize as opt
class EntropyTools:
"""
Same functions implemented in the sobolev Network.
Also uses Tensorflow
"""
spatial_dimension: int
poly_degree: int
nq: int
inputDim: int
quadPts: tf.Tensor # dims = (1 x nq)
quadWeights: tf.Tensor # dims = (1 x nq)
momentBasis: tf.Tensor # dims = (batchSIze x N x nq)
opti_u: np.ndarray
opti_m: np.ndarray
opti_w: np.ndarray
def __init__(self, polynomial_degree=1, spatial_dimension=1) -> object:
"""
Class to compute the 1D entropy closure up to degree N
input: N = degree of polynomial basis
"""
# Create quadrature and momentBasis. Currently only for 1D problems
self.poly_degree = polynomial_degree
self.spatial_dimension = spatial_dimension
quad_order = 100
if spatial_dimension == 1:
self.nq = quad_order
[quad_pts, quad_weights] = qGaussLegendre1D(quad_order) # order = nq
m_basis = computeMonomialBasis1D(quad_pts, self.poly_degree) # dims = (N x nq)
if spatial_dimension == 2:
[quad_pts, quad_weights] = qGaussLegendre2D(quad_order) # dims = nq
self.nq = quad_weights.size # is not 10 * polyDegree
m_basis = computeMonomialBasis2D(quad_pts, self.poly_degree) # dims = (N x nq)
self.quadPts = tf.constant(quad_pts, shape=(self.spatial_dimension, self.nq), dtype=tf.float32)
self.quadWeights = tf.constant(quad_weights, shape=(1, self.nq),
dtype=tf.float32)
self.inputDim = m_basis.shape[0]
self.momentBasis = tf.constant(m_basis, shape=(self.inputDim, self.nq),
dtype=tf.float32)
def reconstruct_alpha(self, alpha: tf.Tensor) -> tf.Tensor:
"""
brief: Reconstructs alpha_0 and then concats alpha_0 to alpha_1,... , from alpha1,...
Only works for maxwell Boltzmann entropy so far.
nS = batchSize
N = basisSize
nq = number of quadPts
input: alpha, dims = (nS x N-1)
m , dims = (N x nq)
w , dims = nq
returns alpha_complete = [alpha_0,alpha], dim = (nS x N), where alpha_0 = - ln(<exp(alpha*m)>)
"""
tmp = tf.math.exp(tf.tensordot(alpha, self.momentBasis[1:, :], axes=([1], [0]))) # tmp = alpha * m
alpha_0 = -tf.math.log(tf.tensordot(tmp, self.quadWeights, axes=([1], [1]))) # ln(<tmp>)
return tf.concat([alpha_0, alpha], axis=1) # concat [alpha_0,alpha]
def reconstruct_u(self, alpha: tf.Tensor) -> tf.Tensor:
"""
brief: reconstructs u from alpha
nS = batchSize
N = basisSize
nq = number of quadPts
input: alpha, dims = (nS x N)
used members: m , dims = (N x nq)
w , dims = nq
returns u = <m*eta_*'(alpha*m)>, dim = (nS x N)
"""
# Currently only for maxwell Boltzmann entropy
f_quad = tf.math.exp(tf.tensordot(alpha, self.momentBasis, axes=([1], [0]))) # alpha*m
tmp = tf.math.multiply(f_quad, self.quadWeights) # f*w
return tf.tensordot(tmp, self.momentBasis[:, :], axes=([1], [1])) # f * w * momentBasis
def compute_u(self, f: tf.Tensor) -> tf.Tensor:
"""
brief: reconstructs u from kinetic density f
nS = batchSize
N = basisSize
nq = number of quadPts
input: f, dims = (nS x nq)
used members: m , dims = (N x nq)
w , dims = nq
returns u = <m*eta_*'(alpha*m)>, dim = (nS x N)
"""
tmp = tf.math.multiply(f, self.quadWeights) # f*w
return tf.tensordot(tmp, self.momentBasis[:, :], axes=([1], [1])) # f * w * momentBasis
def compute_h(self, u: tf.Tensor, alpha: tf.Tensor) -> tf.Tensor:
"""
brief: computes the entropy functional h on u and alpha
nS = batchSize
N = basisSize
nq = number of quadPts
input: alpha, dims = (nS x N)
u, dims = (nS x N)
used members: m , dims = (N x nq)
w , dims = nq
returns h = alpha*u - <eta_*(alpha*m)>
"""
# Currently only for maxwell Boltzmann entropy
f_quad = tf.math.exp(tf.tensordot(alpha, self.momentBasis, axes=([1], [0]))) # alpha*m
tmp = tf.tensordot(f_quad, self.quadWeights, axes=([1], [1])) # f*w
# tmp2 = tf.tensordot(alpha, u, axes=([1], [1]))
tmp2 = tf.math.reduce_sum(tf.math.multiply(alpha, u), axis=1, keepdims=True)
return tmp2 - tmp
def convert_to_tensor_float(self, vector: np.ndarray) -> tf.Tensor:
"""
brief: converts to tensor, keeps dimensions
"""
return tf.constant(vector, shape=vector.shape, dtype=tf.float32)
def minimize_entropy(self, u: tf.Tensor, start: tf.Tensor) -> tf.Tensor:
"""
brief: computes the minimal entropy at u
input: u = dims (1,N)
start = start_valu of alpha
"""
dim = u.numpy().shape[1]
self.opti_u = np.reshape(u.numpy(), (dim,))
self.opti_m = self.momentBasis.numpy()
self.opti_w = self.quadWeights.numpy()
opti_start = np.reshape(start.numpy(), (dim,))
# test objective functions
# t = self.opti_entropy(opti_start)
# tp = self.opti_entropy_prime(opti_start)
# tpp = self.opti_entropy_prime2(opti_start)#
# print(t)
# print(tp)
# print(tpp)
opt_result = opt.minimize(fun=self.opti_entropy, x0=opti_start, jac=self.opti_entropy_prime,
hess=self.opti_entropy_prime2, tol=1e-6)
if not opt_result.success:
exit("Optimization unsuccessfull!")
return tf.constant(opt_result.x, dtype=tf.float32, shape=(1, dim))
def opti_entropy(self, alpha: np.ndarray) -> np.ndarray:
"""
brief: returns the negative entropy functional with fixed u
nS = batchSize
N = basisSize
nq = number of quadPts
input: alpha, dims = (1 x N)
u, dims = (1 x N)
used members: m , dims = (N x nq)
w , dims = nq
returns h = - alpha*u + <eta_*(alpha*m)>
"""
# Currently only for maxwell Boltzmann entropy
# compute negative entropy functional
f_quad = np.exp(np.tensordot(alpha, self.opti_m, axes=([0], [0]))) # alpha*m
t1 = np.tensordot(f_quad, self.opti_w, axes=([0], [1])) # f*w
t2 = np.inner(alpha, self.opti_u)
return t1 - t2
def opti_entropy_prime(self, alpha: np.ndarray) -> np.ndarray:
"""
brief: returns the derivative negative entropy functional with fixed u
nS = batchSize
N = basisSize
nq = number of quadPts
input: alpha, dims = (1 x N)
u, dims = (1 x N)
used members: m , dims = (N x nq)
w , dims = nq
returns h = -u + <m*eta_*(alpha*m)>
"""
# Currently only for maxwell Boltzmann entropy
f_quad = np.exp(np.tensordot(alpha, self.opti_m, axes=([0], [0]))) # alpha*m
tmp = np.multiply(f_quad, self.opti_w) # f*w
t2 = np.tensordot(tmp, self.opti_m[:, :], axes=([1], [1])) # f * w * momentBasis
dim = t2.shape[1]
return np.reshape(t2 - self.opti_u, (dim,))
def opti_entropy_prime2(self, alpha: np.ndarray) -> np.ndarray:
"""
brief: returns the 2nd derivative negative entropy functional with fixed u
nS = batchSize
N = basisSize
nq = number of quadPts
input: alpha, dims = (1 x N)
u, dims = (1 x N)
used members: m , dims = (N x nq)
w , dims = nq
returns h = <mxm*eta_*(alpha*m)>
"""
# Currently only for maxwell Boltzmann entropy
f_quad = np.exp(np.tensordot(alpha, self.opti_m, axes=([0], [0]))) # exp(alpha*m)
tmp = np.multiply(f_quad, self.opti_w) # f*w
# mm = np.zeros(shape=(self.nq, self.inputDim, self.inputDim))
t2 = 0
for i in range(self.nq):
t = np.tensordot(self.opti_m[:, i], self.opti_m[:, i], axes=0)
t2 += t * tmp[0, i]
return t2
def entropy_hessian(self, alpha: np.ndarray, m: np.ndarray, w: np.ndarray) -> np.ndarray:
"""
:brief same as opti_entropy_prime2, but with m and w as args as standalone
:param alpha: lagranage multipliers dims=(1xN)
:param m: moment basis dims=(Nxnq)
:param w: quadweights dims=(1xnq)
:return: hessian of the entropy functional
"""
f_quad = np.exp(np.tensordot(alpha, m, axes=([0], [0]))) # exp(alpha*m)
tmp = np.multiply(f_quad, w) # f*w
t2 = 0
for i in range(self.nq):
t = np.tensordot(m[:, i], m[:, i], axes=0)
t2 += t * tmp[0, i]
return t2
def KL_divergence(self, alpha_true: tf.Tensor, alpha: tf.Tensor) -> tf.Tensor:
"""
brief: computes the Kullback-Leibler Divergence of the kinetic density w.r.t alpha given the kinetic density w.r.t
alpha_true
input: alpha_true , dim= (1,N+1)
alpha , dim = (ns, N+1)
output: pointwise KL Divergence, dim = ns x 1
"""
diff = alpha_true - alpha
t1 = tf.math.exp(tf.tensordot(alpha_true, self.momentBasis, axes=([1], [0])))
t2 = tf.tensordot(diff, self.momentBasis, axes=([1], [0]))
integrand = tf.math.multiply(t1, t2)
res = tf.tensordot(integrand, self.quadWeights, axes=([1], [1]))
return res
def compute_kinetic_density(self, alpha: tf.Tensor) -> tf.Tensor:
"""
brief: computes the kinetic density w.r.t alpha
input: alpha , dim = (ns, N+1)
output: kinetic density, dim = ns x nq
"""
return tf.math.exp(tf.tensordot(alpha, self.momentBasis, axes=([1], [0])))
def compute_maxwellian(self):
"""
returns the maxwellian distribution at quadpts
return: maxwellian, dims = (1,nq)
"""
return 0
### Standalone features
### Integration
def qGaussLegendre1D(order: int):
"""
order: order of quadrature
returns: [mu, weights] : quadrature points and weights
"""
return leggauss(order)
def qGaussLegendre2D(Qorder):
"""
order: order of quadrature, uses all quadpts... inefficient
returns: [pts, weights] : quadrature points and weights, dim(pts) = nq x 2
"""
def computequadpoints(order):
"""Quadrature points for GaussLegendre quadrature. Read from file."""
mu, _ = leggauss(order)
phi = [np.pi * (k + 1 / 2) / order for k in range(2 * order)]
xy = np.zeros((order * order, 2))
count = 0
for i in range(int(order / 2)):
for j in range(2 * order):
mui = mu[i]
phij = phi[j]
xy[count, 0] = np.sqrt(1 - mui ** 2) * np.cos(phij)
xy[count, 1] = np.sqrt(1 - mui ** 2) * np.sin(phij)
# xyz[count, 2] = mui
count += 1
return xy
def computequadweights(order):
"""Quadrature weights for GaussLegendre quadrature. Read from file."""
_, leggaussweights = leggauss(order)
w = np.zeros(order * order)
count = 0
for i in range(int(order / 2)):
for j in range(2 * order):
w[count] = 0.5 * np.pi / order * leggaussweights[i]
count += 1
return w
pts = computequadpoints(Qorder)
weights = computequadweights(Qorder)
return [pts, weights]
def integrate(integrand, weights):
"""
params: weights = quadweights vector (at quadpoints) (dim = nq)
integrand = integrand vector, evaluated at quadpts (dim = vectorlen x nq)
returns: integral <integrand>
"""
return np.dot(integrand, weights)
### Entropy functions
def negEntropyFunctional(u, alpha, m, w):
"""
compute entropy functional at one point using
inputs: u = moment vector, dim = N+1
alpha = corresponding lagrange multiplier, dim = N+1
m = moment basis vector, evaluated at quadpts, dim = (N + 1) x nQuad
quadPts = number of quadpts
returns: h = alpha*u - <entropyDual(alpha*m)>
"""
# tmp = integrate(entropyDualPrime(np.matmul(alpha, m)), w)
return 0 # Todo
def entropy(x):
return x * np.log(x) - x
def entropyDual(y):
return np.exp(y)
def entropyPrime(x):
return np.log(x)
def entropyDualPrime(y):
return np.exp(y)
def reconstructU(alpha, m, w):
"""
imput: alpha, dims = (nS x N)
m , dims = (N x nq)
w , dims = nq
returns u = <m*eta_*'(alpha*m)>, dim = (nS x N)
"""
# tensor version
temp = entropyDualPrime(np.matmul(alpha, m)) # ns x nq
## extend to 3D tensor
mTensor = m.reshape(1, m.shape[0], m.shape[1]) # ns x N x nq
tempTensor = temp.reshape(temp.shape[0], 1, temp.shape[1]) # ns x N x nq
return integrate(mTensor * tempTensor, w)
def reconstructL1F(alpha, m, w):
"""
imput: alpha, dims = (nS x N)
m , dims = (N x nq)
w , dims = nq
returns: the L1 norm of f, the kinetic density, <|f|>
"""
return integrate(np.abs(entropyDualPrime(np.matmul(alpha, m))), w)
def reconstructUSingleCell(alpha, m, w):
"""
imput: alpha, dims = (N)
m , dims = (N x nq)
w , dims = nq
returns u = <m*eta_*'(alpha*m)>, dim = (nS x N)
"""
temp = entropyDualPrime(np.matmul(alpha, m)) # ns x nq
res = m * temp
return integrate(res, w)
### Basis Computation
def computeMonomialBasis1D(quadPts, polyDegree):
"""
params: quadPts = quadrature points to evaluate
polyDegree = maximum degree of the basis
return: monomial basis evaluated at quadrature points
"""
basisLen = getBasisSize(polyDegree, 1)
nq = quadPts.shape[0]
monomialBasis = np.zeros((basisLen, nq))
for idx_quad in range(0, nq):
for idx_degree in range(0, polyDegree + 1):
monomialBasis[idx_degree, idx_quad] = np.power(quadPts[idx_quad], idx_degree)
return monomialBasis
def computeMonomialBasis2D(quadPts, polyDegree):
"""
brief: hardcoded for polyDegree 1 right now ! # TODO
params: quadPts = quadrature points to evaluate
polyDegree = maximum degree of the basis
return: monomial basis evaluated at quadrature points
"""
basisLen = getBasisSize(polyDegree, 2)
nq = quadPts.shape[0]
monomialBasis = np.zeros((basisLen, nq))
for idx_quad in range(0, nq):
# Hardcoded for degree 1
monomialBasis[0, idx_quad] = 1.0
monomialBasis[1, idx_quad] = quadPts[idx_quad, 0]
monomialBasis[2, idx_quad] = quadPts[idx_quad, 1]
# for idx_degree in range(0, polyDegree + 1):
# monomialBasis[idx_degree, idx_quad] = np.power(quadPts[idx_quad], idx_degree)
return monomialBasis
def getBasisSize(polyDegree, spatialDim):
"""
params: polyDegree = maximum Degree of the basis
spatialDIm = spatial dimension of the basis
returns: basis size
"""
basisLen = 0
for idx_degree in range(0, polyDegree + 1):
basisLen += int(
getCurrDegreeSize(idx_degree, spatialDim))
return basisLen
def getCurrDegreeSize(currDegree, spatialDim):
"""
Computes the number of polynomials of the current spatial dimension
"""
return np.math.factorial(currDegree + spatialDim - 1) / (
np.math.factorial(currDegree) * np.math.factorial(spatialDim - 1))
| 2.359375 | 2 |
AI3603_HW1/point3D.py | prefrontal21/AI3603 | 0 | 12758930 | <filename>AI3603_HW1/point3D.py
import math
import matplotlib.pyplot as plt
steering_inputs = [-40, 0, 40]
cost_steering_inputs = [0.1, 0, 0.1]
speed_inputs = [-1, 1]
class point3D:
def __init__(self, total_cost, node, parent_d, parent_c):
self.total_cost = total_cost
self.parent_d = parent_d
self.parent_c = parent_c
self.node = node
def __lt__(self, other):
return self.f < other.f
def create_successor(self, i, j, vehicle_length):
delta = steering_inputs[i]
velocity = speed_inputs[j]
successor_x_cts = self.node[0] + \
(velocity * math.cos(math.radians(self.node[2])))
successor_y_cts = self.node[1] + \
(velocity * math.sin(math.radians(self.node[2])))
successor_theta_cts = math.radians(
self.node[2]) + (velocity * math.tan(math.radians(delta))/(float(vehicle_length)))
successor_theta_cts = math.degrees(successor_theta_cts)
successor_x_d = round(successor_x_cts)
successor_y_d = round(successor_y_cts)
successor_theta_d = round(successor_theta_cts)
successor_d = (successor_x_d, successor_y_d, successor_theta_d)
successor_c = (successor_x_cts, successor_y_cts, successor_theta_cts)
return successor_d, successor_c
| 3.28125 | 3 |
junction/conferences/migrations/0001_initial.py | theSage21/junction | 192 | 12758931 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django_extensions.db.fields
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Conference",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"created_at",
models.DateTimeField(auto_now_add=True, verbose_name="Created At"),
),
(
"modified_at",
models.DateTimeField(
auto_now=True, verbose_name="Last Modified At"
),
),
(
"name",
models.CharField(max_length=255, verbose_name="Conference Name"),
),
(
"slug",
django_extensions.db.fields.AutoSlugField(
editable=False,
populate_from=("name",),
max_length=255,
blank=True,
unique=True,
),
),
("description", models.TextField(default="")),
("start_date", models.DateField(verbose_name="Start Date")),
("end_date", models.DateField(verbose_name="End Date")),
(
"status",
models.PositiveSmallIntegerField(
verbose_name="Current Status",
choices=[
(1, b"Accepting Call for Proposals"),
(2, b"Closed for Proposals"),
(3, b"Accepting Votes"),
(4, b"Schedule Published"),
],
),
),
(
"deleted",
models.BooleanField(default=False, verbose_name="Is Deleted?"),
),
(
"created_by",
models.ForeignKey(
related_name="created_conference_set",
verbose_name="Created By",
blank=True,
on_delete=models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
null=True,
),
),
(
"modified_by",
models.ForeignKey(
related_name="updated_conference_set",
verbose_name="Modified By",
blank=True,
on_delete=models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
null=True,
),
),
],
options={"abstract": False},
bases=(models.Model,),
),
migrations.CreateModel(
name="ConferenceModerator",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"created_at",
models.DateTimeField(auto_now_add=True, verbose_name="Created At"),
),
(
"modified_at",
models.DateTimeField(
auto_now=True, verbose_name="Last Modified At"
),
),
(
"active",
models.BooleanField(default=True, verbose_name="Is Active?"),
),
(
"conference",
models.ForeignKey(
to="conferences.Conference", on_delete=models.deletion.CASCADE,
),
),
(
"created_by",
models.ForeignKey(
related_name="created_conferencemoderator_set",
verbose_name="Created By",
blank=True,
on_delete=models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
null=True,
),
),
(
"moderator",
models.ForeignKey(
to=settings.AUTH_USER_MODEL, on_delete=models.deletion.CASCADE,
),
),
(
"modified_by",
models.ForeignKey(
related_name="updated_conferencemoderator_set",
verbose_name="Modified By",
blank=True,
on_delete=models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
null=True,
),
),
],
options={"abstract": False},
bases=(models.Model,),
),
migrations.CreateModel(
name="ConferenceProposalReviewer",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"created_at",
models.DateTimeField(auto_now_add=True, verbose_name="Created At"),
),
(
"modified_at",
models.DateTimeField(
auto_now=True, verbose_name="Last Modified At"
),
),
(
"active",
models.BooleanField(default=True, verbose_name="Is Active?"),
),
(
"conference",
models.ForeignKey(
to="conferences.Conference", on_delete=models.deletion.CASCADE,
),
),
(
"created_by",
models.ForeignKey(
related_name="created_conferenceproposalreviewer_set",
verbose_name="Created By",
blank=True,
on_delete=models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
null=True,
),
),
(
"modified_by",
models.ForeignKey(
related_name="updated_conferenceproposalreviewer_set",
verbose_name="Modified By",
blank=True,
on_delete=models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
null=True,
),
),
(
"reviewer",
models.ForeignKey(
to=settings.AUTH_USER_MODEL, on_delete=models.deletion.CASCADE,
),
),
],
options={},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name="conferenceproposalreviewer",
unique_together=set([("conference", "reviewer")]),
),
]
| 1.796875 | 2 |
poller.py | mhspradlin/go-lite-bot | 0 | 12758932 | # The poller that queries Telegram for bot updates.
# Assumes it's the only poller enqueueing elements onto a number of queues.
# Nice way to make HTTP get requests
import requests
# To read arguments
import sys
# For our queues
from collections import deque
# To lock and unlock files
import fcntl
# To read/write files
import os
# To serialize/deserialize objects
import pickle
# To yield
from time import sleep
# Package as a function for go-lite-bot to run
def run ():
# For ease of configuration, we pull our token from a text file located in the same directory
f = open('token.txt', 'r')
token = f.readline().strip()
f.close()
# Get the last update number so we don't do duplicates
f = open('offset.txt', 'r')
offset = int(f.readline.strip())
f.close()
# Process our arguments, which should be safe since they're passed by start
queueDir = sys.argv[1]
numQueues = int(sys.argv[2])
# Initialize our internal buffers to hold pending writes
writeBuffers = []
for i in range(numQueues):
writeBuffers.append(deque())
# Continually request updates and pass them to the queues
while not canceled():
updates = getUpdates()
# If there's no updates, yield
if len(updates) == 0:
sleep(0)
else: # Apply them to the queues and write
for i in range(updates):
if 'message' in updates[i] and 'text' in updates[i].message:
writeBuffers[hash(updates[i].message.chat.id) % numQueues].append(updates[i])
writeOut()
# Gives the queue file name for queue i
def queueName (i):
return queueDir + '/' + str(i) + '_queue.p'
# Write out the new offset number
def writeOffset (num):
f = open('offset.txt', 'w')
f.write(str(num))
f.close()
# Check to see if we've been canceled
def canceled ():
f = open('cancel.txt', 'r')
done = f.readline.strip()
f.close()
return done == 'Yes'
# Write out all buffers, appending elements to the appropriate queues
def writeOut ():
for i in range(len(writeBuffers)):
f = open(queueName(i), 'r+')
fcntl.flock(f, fcntl.LOCK_EX)
workingQueue = pickle.load(f)
# Recall that later messages are at higher indices
# Workers read messages off the left, so we should add to the right
# in ascending order
for j in range(len(writeBuffers[i])):
workingQueue.append(writeBuffers[i].popLeft())
pickle.dump(writeBuffers[i], f, pickle.HIGHEST_PROTOCOL)
fcntl.flock(f, fcntl.LOCK_UN)
f.close()
# Get all updates from the server for our bot
def getUpdates ():
r = requests.get('https://api.telegram.org/bot' + token + '/getUpdates' +
'?offset=' + str(offset) +
'&limit=100')
# Updates are returned sequentially, update the offset
updates = r.json()
if (len(updates) > 0):
offset = updates[len(updates) - 1].update_id + 1
# Write out offset counter
writeOffset(offset)
return updates
| 2.53125 | 3 |
smartsheet/models/webhook_stats.py | bromic007/smartsheet-python-sdk | 106 | 12758933 | # pylint: disable=C0111,R0902,R0904,R0912,R0913,R0915,E1101
# Smartsheet Python SDK.
#
# Copyright 2017 Smartsheet.com, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from ..types import *
from ..util import serialize
from ..util import deserialize
class WebhookStats(object):
"""Smartsheet WebhookStats data model."""
def __init__(self, props=None, base_obj=None):
"""Initialize the WebhookStats model."""
self._base = None
if base_obj is not None:
self._base = base_obj
self._last_callback_attempt = Timestamp()
self._last_callback_attempt_retry_count = Number()
self._last_successful_callback = Timestamp()
if props:
deserialize(self, props)
# requests package Response object
self.request_response = None
self.__initialized = True
@property
def last_callback_attempt(self):
return self._last_callback_attempt.value
@last_callback_attempt.setter
def last_callback_attempt(self, value):
self._last_callback_attempt.value = value
@property
def last_callback_attempt_retry_count(self):
return self._last_callback_attempt_retry_count.value
@last_callback_attempt_retry_count.setter
def last_callback_attempt_retry_count(self, value):
self._last_callback_attempt_retry_count.value = value
@property
def last_successful_callback(self):
return self._last_successful_callback.value
@last_successful_callback.setter
def last_successful_callback(self, value):
self._last_successful_callback.value = value
def to_dict(self):
return serialize(self)
def to_json(self):
return json.dumps(self.to_dict())
def __str__(self):
return self.to_json()
| 1.859375 | 2 |
pyfilter/filters/__init__.py | merz9b/pyfilter | 0 | 12758934 | <gh_stars>0
from .apf import APF
from .sisr import SISR
from .ukf import UKF | 1.0625 | 1 |
UCourse/resources/migrations/0004_auto_20200906_1020.py | Natsu1270/UCourse | 1 | 12758935 | # Generated by Django 3.0.7 on 2020-09-06 03:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('resources', '0003_remove_resource_code'),
]
operations = [
migrations.AlterModelTable(
name='resource',
table='Resource',
),
]
| 1.4375 | 1 |
samples/pymba_view.py | lcarde/pymanip | 0 | 12758936 | """
This file comes from pydc1394 examples.
Written by jordens.
Tested on Linux.
git clone https://github.com/jordens/pydc1394
"""
import time
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
from pymba import Vimba
class CameraPlot:
def __init__(self):
self.vimba = Vimba()
self.vimba.startup()
self.system = self.vimba.getSystem()
self.cameraIds = self.vimba.getCameraIds()
self.init_win()
self.init_camera()
def init_win(self):
self.win = QtGui.QMainWindow()
self.win.show()
self.win.resize(600, 400)
self.win.setWindowTitle("pymba + pyqtgraph")
self.img = pg.ImageView()
self.win.setCentralWidget(self.img)
def init_camera(self):
print("Vimba version:", self.vimba.getVersion())
print("Found {:d} cameras.".format(len(self.cameraIds)))
self.cam = self.vimba.getCamera(self.cameraIds[0])
self.cam.openCamera()
info = self.cam.getInfo()
print('cameraName:', info.cameraName.decode('ascii'))
print('interfaceIdString:', info.interfaceIdString.decode('ascii'))
print('modelName:', info.modelName.decode('ascii'))
def start_camera(self):
self.cam.AcquisitionMode = 'Continuous'
self.cam.IIDCPhyspeed = 'S800'
self.cam.PixelFormat = 'Mono16'
self.cam.TriggerMode = 'Off'
self.cam.AcquisitionFrameRate = 20.0
self.frame = self.cam.getFrame()
self.frame.announceFrame()
self.cam.startCapture()
self.cam.runFeatureCommand('AcquisitionStart')
def process_images(self):
QtCore.QTimer.singleShot(50, self.process_images)
self.frame.queueFrameCapture()
self.frame.waitFrameCapture()
im = self.frame.getImage().T
self.img.setImage(im, autoRange=False, autoLevels=False,
autoHistogramRange=False)
def stop_camera(self):
self.cam.runFeatureCommand('AcquisitionStop')
self.cam.endCapture()
self.cam.revokeAllFrames()
def deinit_camera(self):
self.vimba.shutdown()
if __name__ == "__main__":
app = QtGui.QApplication([])
cam = CameraPlot()
try:
cam.start_camera()
time.sleep(.5)
cam.process_images()
cam.img.autoRange()
cam.img.autoLevels()
QtGui.QApplication.instance().exec_()
finally:
cam.stop_camera()
cam.deinit_camera()
| 2.578125 | 3 |
modules.py | le0x99/autograd | 0 | 12758937 | from .core import Module, TensorNode
from .ops import *
from .functions import *
class Linear(Module):
"""
A linear operation. Applies a matrix transformation and a vector translation.
"""
def __init__(self, input_size, output_size):
super().__init__()
# weights of the matrix transformation
glorot_std = np.sqrt(2.0 / (input_size + output_size)) # scalar for Glorot init
w = np.random.randn(output_size, input_size) * glorot_std
self.w = TensorNode(w)
# weights of the bias (the translation)
b = np.zeros((1, output_size))
self.b = TensorNode(b)
# -- We initialize the biases to zero for simplicity. This is a common approach, but with ReLU units it's
# sometimes best to add a little noise to avoid dead neurons.
def forward(self, input):
outsize, insize = self.w.size()
n, f = input.size()
assert f == insize, f'Number of features in input ({f}) does not match input dimension ({insize}).'
assert len(input.size()) == 2
# Multiply all input vectors by the weight matrix.
x = BatchMM.do_forward(self.w, input)
assert x.size() == (n, outsize)
exb = Expand.do_forward(self.b, dim=0, repeats=n)
# -- We are broadcasting the (1, outsize) vector b over the (n, outsize) matrix x. Numpy normally does this
# automatically, if we just do `x + self.b`, but we wouldn't get a gradient over that operation. Expand
# is a minimal broadcasting op that is sufficient for our purposes.
# -- In pytorch, full-featured broadcasting is implemented so there you would actually be able to do `x + self.b`.
assert x.size() == exb.size()
return x + exb
def parameters(self):
return [self.w, self.b]
| 3.34375 | 3 |
src/cuda_slic/debug/__main__.py | abonawas/cuda-slic | 6 | 12758938 | <filename>src/cuda_slic/debug/__main__.py
import numpy as np
import pycuda
from skimage import color, data, filters
from ..slic import slic
def test_slic_grayscale_runs(n=200, sp_size=5):
blob = data.binary_blobs(length=n, n_dim=3, seed=2)
blob = np.float32(blob)
n_segments = n ** 3 / sp_size ** 3
labels = slic(blob, n_segments=n_segments, compactness=3)
if __name__ == "__main__":
pycuda.driver.start_profiler()
test_slic_grayscale_runs(n=200)
pycuda.driver.stop_profiler()
| 2.25 | 2 |
hard-gists/2310005/snippet.py | jjhenkel/dockerizeme | 21 | 12758939 | import os
import struct
import pylibemu
emu = pylibemu.Emulator()
# shellcode uses this address for the winexec call as cmdline
# modify it to see different emu_profile_output
emu.memory_write_dword(0x41414243, 0x41414141)
# this is used as exitprocess exitcode
emu.memory_write_dword(0x41414143, 0x00000021)
#b = open('/opt/pylibemu/urldownloadsc/test.s','rb').read()
b = 'eb6b566a3059648b018b400c8b701cad8b40085ec3608b6c24248b453c8b54057801ea8b4a188b5a2001ebe334498b348b01ee31ff31c0fcac84c07407c1cf0d01c7ebf43b7c242875e18b5a2401eb668b0c4b8b5a1c01eb8b048b01e88944241c61c35fe899ffffff89c3eb05e8f1ffffff6898fe8a0e53e898ffffff41516843424141ffd0687ed8e27353e884ffffff31d28b8a4341414151ffd0'.decode('hex')
MEM_OFFSET = 0x401000
# make the ret work by putting our address here
emu.memory_write_dword(MEM_OFFSET, MEM_OFFSET + 4)
# manually copy shellcode to mem
for i in range(0, len(b), 4):
emu.memory_write_dword(MEM_OFFSET + 4 + i, struct.unpack('I',b[i:i+4].ljust(4, '\x00'))[0])
# c3 is ret, 90 is nop
emu.prepare('\xc3\x90\x90\x90', 0)
# set stack to MEM_OFFSET
emu.cpu_reg32_set(pylibemu.EMU_REGS.esp, MEM_OFFSET)
emu.test()
print 'EMU PROFILE OUTPUT:'
print emu.emu_profile_output
# output should look like:
#EMU PROFILE OUTPUT:
#UINT WINAPI WinExec (
# LPCSTR = 0x01c5cdb0 =>
# = "AAAA";
# UINT uCmdShow = 49;
#) = 32;
#void ExitProcess (
# UINT uExitCode = 2088763392;
#) = 0; | 2.25 | 2 |
src/scs_core/aws/manager/byline_manager.py | south-coast-science/scs_core | 3 | 12758940 | <reponame>south-coast-science/scs_core<filename>src/scs_core/aws/manager/byline_manager.py
"""
Created on 25 Dec 2018
@author: <NAME> (<EMAIL>)
Equivalent to cURLs:
curl "https://aws.southcoastscience.com/device-topics?topic=south-coast-science-dev/alphasense/loc/303/gases"
curl "https://aws.southcoastscience.com/device-topics?device=scs-bgx-303"
"""
from scs_core.aws.client.rest_client import RESTClient
from scs_core.aws.data.byline import Byline, DeviceBylineGroup, TopicBylineGroup
# --------------------------------------------------------------------------------------------------------------------
class BylineManager(object):
"""
classdocs
"""
__DEVICE = 'device'
__TOPIC = 'topic'
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, api_key):
"""
Constructor
"""
self.__rest_client = RESTClient(api_key)
# ----------------------------------------------------------------------------------------------------------------
def find_latest_byline_for_topic(self, topic):
request_path = '/device-topics'
params = {self.__TOPIC: topic}
# request...
self.__rest_client.connect()
try:
jdict = self.__rest_client.get(request_path, params=params)
# bylines...
if jdict is None:
return None
latest_byline = None
for item in jdict:
byline = Byline.construct_from_jdict(item)
if latest_byline is None or latest_byline.rec < byline.rec:
latest_byline = byline
return latest_byline
finally:
self.__rest_client.close()
def find_bylines(self, excluded=None):
request_path = '/device-topics'
# request...
self.__rest_client.connect()
try:
jdict = self.__rest_client.get(request_path)
# bylines...
return TopicBylineGroup.construct_from_jdict(jdict, excluded=excluded, skeleton=True)
finally:
self.__rest_client.close()
def find_bylines_for_topic(self, topic, excluded=None):
request_path = '/device-topics'
params = {self.__TOPIC: topic}
# request...
self.__rest_client.connect()
try:
jdict = self.__rest_client.get(request_path, params=params)
# bylines...
return TopicBylineGroup.construct_from_jdict(jdict, excluded=excluded, skeleton=True)
finally:
self.__rest_client.close()
def find_bylines_for_device(self, device, excluded=None):
request_path = '/device-topics'
params = {self.__DEVICE: device}
# request...
self.__rest_client.connect()
try:
jdict = self.__rest_client.get(request_path, params=params)
# bylines...
return DeviceBylineGroup.construct_from_jdict(jdict, excluded=excluded, skeleton=True)
finally:
self.__rest_client.close()
def find_byline_for_device_topic(self, device, topic):
request_path = '/device-topics'
params = {self.__DEVICE: device}
# request...
self.__rest_client.connect()
try:
jdict = self.__rest_client.get(request_path, params=params)
# bylines...
if jdict is None:
return None
for item in jdict:
byline = Byline.construct_from_jdict(item)
if byline.topic == topic:
return byline
return None
finally:
self.__rest_client.close()
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "BylineManager:{rest_client:%s}" % self.__rest_client
| 1.882813 | 2 |
Introduction to Deep Learning & Neural Networks with Keras/Module1/Lab1_Artificial_Neural_Networks_Simple_computation.py | worklifesg/Deep-Learning-Specialization-In-Progress- | 1 | 12758941 | <filename>Introduction to Deep Learning & Neural Networks with Keras/Module1/Lab1_Artificial_Neural_Networks_Simple_computation.py
# Neural network has 2 inputs, 1 hidden layer with 2 nodes and 1 output layer with one node.
# x1,x2 - I/P, w1,w2,w3,w4 - I/P weights, b1,1 b1,2 - biases, w5,w6 - Hidden layer weights
# b2 - bias, a2 - O/P, z1,1 z1,2 z2 - linear combination of weights and biases
#Initializing weights and biases
import numpy as np
weights=np.around(np.random.uniform(size=6),decimals=2)
biases=np.around(np.random.uniform(size=3),decimals=2)
# Given x1 x2
x1=0.5
x2=0.85
with open ("Lab1_ANN.txt",'a') as f:
print("The weights are: ", weights,file=f)
print("The biases are: ", biases,file=f)
print("x1 is {} and x2 is {}".format(x1,x2),file=f)
# Computing z1,1 z1,2
z11=x1*weights[0]+x2*weights[1]+biases[0]
z12=x1*weights[2]+x2*weights[3]+biases[1]
with open ("Lab1_ANN.txt",'a') as f:
print('The weighted sum of the inputs at the first node in the hidden layer is {}'.format(z11, decimals=3),file=f)
print('The weighted sum of the inputs at the second node in the hidden layer is {}'.format(z12, decimals=3),file=f)
#using sigmoid function to compute a1,1 a1,2
a11=1.0/(1.0+np.exp(-z11))
a12=1.0/(1.0+np.exp(-z12))
with open ("Lab1_ANN.txt",'a') as f:
print('The activation of the first node in the hidden layer is {}'.format(a11, decimals=3),file=f)
print('The activation of the second node in the hidden layer is {}'.format(a12, decimals=3),file=f)
# computing z2 for O/P layer using hidden layer inputs a1,1 a1,2
z2=a11*weights[4]+a12*weights[5]+biases[2]
# Computing O/P layer term a2
a2=1.0/(1.0+np.exp(-z2))
with open ("Lab1_ANN.txt",'a') as f:
print('The weighted sum of the inputs at the node in the output layer is {}'.format(z2, decimals=3),file=f)
print('The output of the network for x1 = 0.5 and x2 = 0.85 is {}'.format(a2, decimals=3),file=f)
| 4.28125 | 4 |
app/__main__.py | theirix/hilinkmon | 0 | 12758942 | from .monitor import main_loop
main_loop()
| 1.03125 | 1 |
jp.atcoder/abc137/abc137_c/8495749.py | kagemeka/atcoder-submissions | 1 | 12758943 | <reponame>kagemeka/atcoder-submissions
# 2019-11-16 14:51:41(JST)
import sys
# import collections
# import math
# from string import ascii_lowercase, ascii_uppercase, digits
from bisect import bisect_left as bi_l
from bisect import bisect_right as bi_r
from bisect import insort_left as in_l
# import itertools
# from functools import reduce
# import operator as op
# from scipy.misc import comb # float
# import numpy as np
# import heapq
# import array
def main():
n = int(sys.stdin.readline().rstrip())
sorted_letters = []
count = 0
for _ in range(n):
s = ''.join(sorted(sys.stdin.readline().rstrip()))
count += bi_r(sorted_letters, s) - bi_l(sorted_letters, s)
in_l(sorted_letters, s)
print(count)
if __name__ == "__main__":
main()
| 2.859375 | 3 |
cdptools/indexers/indexer.py | textioHQ/cdptools | 0 | 12758944 | <reponame>textioHQ/cdptools<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import logging
import re
import string
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, Union
from nltk.stem import PorterStemmer
###############################################################################
log = logging.getLogger(__name__)
# Ensure stopwords are downloaded
try:
from nltk.corpus import stopwords
STOPWORDS = stopwords.words("english")
except LookupError:
import nltk
nltk.download("stopwords")
log.info("Downloaded nltk stopwords")
from nltk.corpus import stopwords
STOPWORDS = stopwords.words("english")
###############################################################################
class Indexer(ABC):
"""
Why is this not just a single function?
Like audio splitters, to pass arguments to the instance of the class and retain state may be useful. In this case,
while computing the scores for every meeting memory may be a constrant, or in the case where a developer wants to
artifically inflate the scores for certain events or words to highlight something that can be done on an object
instance rather than just a function instance.
"""
@staticmethod
def get_raw_transcript(transcipt_path: Union[str, Path]) -> str:
"""
Attempts to open either a raw or annotated json transcript format and return the raw transcript as a string.
If the file format is not supported or if the data contained in the transcript does not follow the specification
a TypeError is raised.
Parameters
----------
transcipt_path: Union[str, Path]
Path to the transcript
Returns
-------
transcript: str
The raw text of the opened transcript.
"""
# Enforce path
transcipt_path = Path(transcipt_path).expanduser().resolve(strict=True)
# Check that the transcript follows a known format
if transcipt_path.suffix == ".json":
with open(transcipt_path, "r") as read_in:
transcript = json.load(read_in)
# Join all text items into a single string
try:
transcript = " ".join([portion["text"] for portion in transcript["data"]])
except KeyError:
raise TypeError(
f"Unsure how to handle annotated JSON transcript provided: {transcipt_path}"
f"Please refer to the `transcript_formats.md` file in the documentation for details."
)
# Raise error for all other file formats
else:
raise TypeError(
f"Unsure how to handle transcript file format: {transcipt_path}"
f"Please refer to the `transcript_formats.md` file in the documentation for details."
)
return transcript
@staticmethod
def clean_text_for_indexing(raw_transcript: str) -> str:
"""
Run basic cleaning operations against the raw text of the transcript.
Parameters
----------
raw_transcript: str
The raw text of a transcript as a single string.
Returns
-------
cleaned_transcript: str
The cleaned version of the transcript text.
"""
# Send to lowercase
cleaned_transcript = raw_transcript.lower()
# Remove new line and tab characters
cleaned_transcript = cleaned_transcript.replace("\n", " ").replace("\t", " ")
# Remove punctuation
cleaned_transcript = re.sub(f"[{re.escape(string.punctuation)}]", "", cleaned_transcript)
# Remove stopwords
joined_stopwords = "|".join(STOPWORDS)
cleaned_transcript = re.sub(r"\b("+joined_stopwords+r")\b", "", cleaned_transcript)
# Remove gaps in string
cleaned_transcript = re.sub(r" {2,}", " ", cleaned_transcript)
if cleaned_transcript[0] == " ":
cleaned_transcript = cleaned_transcript[1:]
if cleaned_transcript[-1] == " ":
cleaned_transcript = cleaned_transcript[:-1]
# Clean words by stems
words = cleaned_transcript.split(" ")
stemmed = []
ps = PorterStemmer()
for word in words:
stemmed.append(ps.stem(word))
# Rejoin transcript
cleaned_transcript = " ".join(stemmed)
return cleaned_transcript
@staticmethod
def drop_terms_from_index_below_value(
index: Dict[str, Dict[str, float]],
minimum_value_allowed: float = 0.0
) -> Dict[str, Dict[str, float]]:
"""
Drop any terms from an index that have a value less than or equal to the provided.
Parameters
----------
index: Dict[str, Dict[str, float]]
An index dictionary, the output of an `Indexer.generate_index` run.
minimum_value_allowed: float
The float value that all term event values should be compared against. Any term event value less than or
equal to the received value will be dropped from the index.
Returns
-------
cleaned_index: Dict[str, Dict[str, float]]
The cleaned index that has had values removed based off the received minimum value allowed.
"""
cleaned = {}
dropped_count = 0
# For each term in the index
for term in index:
# For each event value given to that term
for event_id, value in index[term].items():
# If the value is strictly greater than the minimum value allowed
if value > minimum_value_allowed:
# If the term is already in the cleaned index, add the event and value as a new pair
if term in cleaned:
cleaned[term][event_id] = value
# If the term is not already in the cleaned index, add a new dictionary to store the pair
else:
cleaned[term] = {event_id: value}
else:
dropped_count += 1
log.debug(f"Dropped {dropped_count} terms during index cleaning")
return cleaned
@abstractmethod
def generate_index(self, event_corpus_map: Dict[str, Path], **kwargs) -> Dict[str, Dict[str, float]]:
"""
Given an event corpus map, compute word event values that will act as a search index.
Parameters
----------
event_corpus_map: Dict[str, str]
A dictionary that maps event id to a local path with transcript to use for indexing.
Returns
-------
word_event_scores: Dict[str, Dict[str, float]]
A dictionary of values per event per word that will be stored in the CDP instance's database and used as a
method for searching for events.
Example:
```json
{
"hello": {
"15ce0a20-3688-4ebd-bf3f-24f6e8d12ad9": 12.3781,
"3571c871-6f7d-41b5-85d1-ced0589f9220": 56.7922,
},
"world": {
"15ce0a20-3688-4ebd-bf3f-24f6e8d12ad9": 8.0016,
"3571c871-6f7d-41b5-85d1-ced0589f9220": 33.9152,
}
}
```
"""
return {}
| 2.765625 | 3 |
migration_test_cleanup.py | blinder27/sqlalcm | 16 | 12758945 | <filename>migration_test_cleanup.py
# -*- coding: utf-8 -*-
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
from google.cloud import spanner
def main(argv):
db_url = argv[0]
project = re.findall(r"projects(.*?)instances", db_url)
instance_id = re.findall(r"instances(.*?)databases", db_url)
client = spanner.Client(project="".join(project).replace("/", ""))
instance = client.instance(instance_id="".join(instance_id).replace("/", ""))
database = instance.database("compliance-test")
database.update_ddl(["DROP TABLE account", "DROP TABLE alembic_version"]).result(120)
if __name__ == "__main__":
main(sys.argv[1:])
| 2.203125 | 2 |
get_instance_details.py | rafaelnize/Python_AWS_Scripts | 0 | 12758946 | import boto
import boto.ec2
import argparse
from aws import EC2Instances
from aws import EC2Instances
parser = argparse.ArgumentParser(description='Get instance details')
parser.add_argument('-n', '--instance_name', metavar='pattern')
parser.add_argument('-i', "--instance_id", metavar='pattern')
def main():
"""docstring"""
#conn = boto.connect_ec2()
#instances = EC2Instances()
#instances.list_instance_names(conn)
# instances.list_instances(conn)
args = parser.parse_args()
print args
if __name__ == "__main__":
main() | 3.1875 | 3 |
backend/blog/admin.py | saifxd7/WeBlog | 0 | 12758947 | from django.contrib import admin
from django_summernote.admin import SummernoteModelAdmin
from .models import BlogPost
# Apply summernote to all TextField in model.
class BlogPostAdmin(SummernoteModelAdmin): # instead of ModelAdmin
exclude = ('slug', )
list_display = ('id', 'title', 'category', 'date_created')
list_display_links = ('id', 'title')
search_fields = ('title', )
list_per_page = 25
summernote_fields = ('content',)
admin.site.register(BlogPost, BlogPostAdmin) | 1.789063 | 2 |
imagenet/getlogger.py | wizard1203/examples-master | 0 | 12758948 | <reponame>wizard1203/examples-master
import logging
def get_logger(logger_name, log_file, level=logging.INFO):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s : %(message)s', "%Y-%m-%d %H:%M:%S")
fileHandler = logging.FileHandler(log_file, mode='w')
fileHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
return logging.getLogger(logger_name)
| 2.65625 | 3 |
DQM/EcalMonitorClient/python/TimingClient_cfi.py | ckamtsikis/cmssw | 852 | 12758949 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
from DQM.EcalMonitorTasks.TimingTask_cfi import ecalTimingTask
from DQM.EcalMonitorClient.IntegrityClient_cfi import ecalIntegrityClient
minChannelEntries = 1
minTowerEntries = 3
toleranceMean = 2.
toleranceRMS = 6.
minChannelEntriesFwd = 8
minTowerEntriesFwd = 24
toleranceMeanFwd = 6.
toleranceRMSFwd = 12.
tailPopulThreshold = 0.4
timeWindow = 25.
ecalTimingClient = cms.untracked.PSet(
params = cms.untracked.PSet(
minChannelEntries = cms.untracked.int32(minChannelEntries),
minTowerEntries = cms.untracked.int32(minTowerEntries),
toleranceMean = cms.untracked.double(toleranceMean),
toleranceRMS = cms.untracked.double(toleranceRMS),
minChannelEntriesFwd = cms.untracked.int32(minChannelEntriesFwd),
minTowerEntriesFwd = cms.untracked.int32(minTowerEntriesFwd),
toleranceMeanFwd = cms.untracked.double(toleranceMeanFwd),
toleranceRMSFwd = cms.untracked.double(toleranceRMSFwd),
tailPopulThreshold = cms.untracked.double(tailPopulThreshold)
),
sources = cms.untracked.PSet(
TimeAllMap = ecalTimingTask.MEs.TimeAllMap,
TimeMap = ecalTimingTask.MEs.TimeMap,
TimeMapByLS = ecalTimingTask.MEs.TimeMapByLS,
ChStatus = ecalIntegrityClient.MEs.ChStatus
),
MEs = cms.untracked.PSet(
RMSAll = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sSummaryClient/%(prefix)sTMT%(suffix)s timing rms 1D summary'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal3P'),
xaxis = cms.untracked.PSet(
high = cms.untracked.double(10.0),
nbins = cms.untracked.int32(100),
low = cms.untracked.double(0.0),
title = cms.untracked.string('time (ns)')
),
btype = cms.untracked.string('User'),
description = cms.untracked.string('Distribution of per-channel timing RMS. Channels with entries less than ' + str(minChannelEntries) + ' are not considered.')
),
ProjEta = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sTimingClient/%(prefix)sTMT timing projection eta%(suffix)s'),
kind = cms.untracked.string('TProfile'),
yaxis = cms.untracked.PSet(
title = cms.untracked.string('time (ns)')
),
otype = cms.untracked.string('Ecal3P'),
btype = cms.untracked.string('ProjEta'),
description = cms.untracked.string('Projection of per-channel mean timing. Channels with entries less than ' + str(minChannelEntries) + ' are not considered.')
),
FwdBkwdDiff = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sTimingTask/%(prefix)sTMT timing %(prefix)s+ - %(prefix)s-'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal2P'),
xaxis = cms.untracked.PSet(
high = cms.untracked.double(5.0),
nbins = cms.untracked.int32(100),
low = cms.untracked.double(-5.0),
title = cms.untracked.string('time (ns)')
),
btype = cms.untracked.string('User'),
description = cms.untracked.string('Forward-backward asymmetry of per-channel mean timing. Channels with entries less than ' + str(minChannelEntries) + ' are not considered.')
),
FwdvBkwd = cms.untracked.PSet(
kind = cms.untracked.string('TH2F'),
yaxis = cms.untracked.PSet(
high = cms.untracked.double(timeWindow),
nbins = cms.untracked.int32(50),
low = cms.untracked.double(-timeWindow),
title = cms.untracked.string('time (ns)')
),
otype = cms.untracked.string('Ecal2P'),
xaxis = cms.untracked.PSet(
high = cms.untracked.double(timeWindow),
nbins = cms.untracked.int32(50),
low = cms.untracked.double(-timeWindow)
),
btype = cms.untracked.string('User'),
path = cms.untracked.string('%(subdet)s/%(prefix)sTimingTask/%(prefix)sTMT timing %(prefix)s+ vs %(prefix)s-'),
description = cms.untracked.string('Forward-backward correlation of per-channel mean timing. Channels with entries less than ' + str(minChannelEntries) + ' are not considered.')
),
ProjPhi = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sTimingClient/%(prefix)sTMT timing projection phi%(suffix)s'),
kind = cms.untracked.string('TProfile'),
yaxis = cms.untracked.PSet(
title = cms.untracked.string('time (ns)')
),
otype = cms.untracked.string('Ecal3P'),
btype = cms.untracked.string('ProjPhi'),
description = cms.untracked.string('Projection of per-channel mean timing. Channels with entries less than ' + str(minChannelEntries) + ' are not considered.')
),
MeanSM = cms.untracked.PSet(
kind = cms.untracked.string('TH1F'),
yaxis = cms.untracked.PSet(
title = cms.untracked.string('time (ns)')
),
otype = cms.untracked.string('SM'),
xaxis = cms.untracked.PSet(
high = cms.untracked.double(timeWindow),
nbins = cms.untracked.int32(100),
low = cms.untracked.double(-timeWindow)
),
btype = cms.untracked.string('User'),
path = cms.untracked.string('%(subdet)s/%(prefix)sTimingClient/%(prefix)sTMT timing mean %(sm)s'),
description = cms.untracked.string('Distribution of per-channel timing mean. Channels with entries less than ' + str(minChannelEntries) + ' are not considered.')
),
RMSMap = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sTimingClient/%(prefix)sTMT timing rms %(sm)s'),
kind = cms.untracked.string('TH2F'),
zaxis = cms.untracked.PSet(
title = cms.untracked.string('rms (ns)')
),
otype = cms.untracked.string('SM'),
btype = cms.untracked.string('Crystal'),
description = cms.untracked.string('2D distribution of per-channel timing RMS. Channels with entries less than ' + str(minChannelEntries) + ' are not considered.')
),
QualitySummary = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sSummaryClient/%(prefix)sTMT%(suffix)s timing quality summary'),
kind = cms.untracked.string('TH2F'),
otype = cms.untracked.string('Ecal3P'),
btype = cms.untracked.string('SuperCrystal'),
description = cms.untracked.string('Summary of the timing data quality. A 5x5 tower is red if the mean timing of the tower is off by more than ' + str(toleranceMean) + ' or RMS is greater than ' + str(toleranceRMS) + ' (' + str(toleranceMeanFwd) + ' and ' + str(toleranceRMSFwd) + ' in forward region). Towers with total entries less than ' + str(minTowerEntries) + ' are not subject to this evaluation. Since 5x5 tower timings are calculated with a tighter time-window than per-channel timings, a tower can additionally become red if its the sum of per-channel timing histogram entries is greater than per-tower histogram entries by factor ' + str(1. / (1. - tailPopulThreshold)) + ' (significant fraction of events fall outside the tight time-window).')
),
Quality = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sTimingClient/%(prefix)sTMT timing quality %(sm)s'),
kind = cms.untracked.string('TH2F'),
otype = cms.untracked.string('SM'),
btype = cms.untracked.string('Crystal'),
description = cms.untracked.string('Summary of the timing data quality. A channel is red if its mean timing is off by more than ' + str(toleranceMean) + ' or RMS is greater than ' + str(toleranceRMS) + '. Channels with entries less than ' + str(minChannelEntries) + ' are not considered.')
),
MeanAll = cms.untracked.PSet(
path = cms.untracked.string('%(subdet)s/%(prefix)sSummaryClient/%(prefix)sTMT%(suffix)s timing mean 1D summary'),
kind = cms.untracked.string('TH1F'),
otype = cms.untracked.string('Ecal3P'),
xaxis = cms.untracked.PSet(
high = cms.untracked.double(timeWindow),
nbins = cms.untracked.int32(100),
low = cms.untracked.double(-timeWindow),
title = cms.untracked.string('time (ns)')
),
btype = cms.untracked.string('User'),
description = cms.untracked.string('Distribution of per-channel timing mean. Channels with entries less than ' + str(minChannelEntries) + ' are not considered.')
),
TrendMean = cms.untracked.PSet(
path = cms.untracked.string('Ecal/Trends/TimingClient %(prefix)s timing mean'),
kind = cms.untracked.string('TProfile'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('Trend'),
description = cms.untracked.string('Trend of timing mean. Plots simple average of all channel timing means at each lumisection.')
),
TrendRMS = cms.untracked.PSet(
path = cms.untracked.string('Ecal/Trends/TimingClient %(prefix)s timing rms'),
kind = cms.untracked.string('TProfile'),
otype = cms.untracked.string('Ecal2P'),
btype = cms.untracked.string('Trend'),
description = cms.untracked.string('Trend of timing rms. Plots simple average of all channel timing rms at each lumisection.')
)
)
)
| 1.492188 | 1 |
tests/test_manager.py | X0rg/afancontrol | 1 | 12758950 | <reponame>X0rg/afancontrol
from contextlib import ExitStack
from unittest.mock import MagicMock, patch, sentinel
import pytest
import afancontrol.manager
from afancontrol.config import (
Actions,
AlertCommands,
FanName,
FanSpeedModifier,
FansTempsRelation,
MappingName,
TempName,
TriggerConfig,
)
from afancontrol.manager import Manager
from afancontrol.metrics import Metrics
from afancontrol.pwmfan import PWMFanNorm, PWMValueNorm
from afancontrol.report import Report
from afancontrol.temp import FileTemp, TempCelsius, TempStatus
from afancontrol.trigger import Triggers
@pytest.fixture
def report():
return MagicMock(spec=Report)
def test_manager(report):
mocked_case_fan = MagicMock(spec=PWMFanNorm)()
mocked_mobo_temp = MagicMock(spec=FileTemp)()
mocked_metrics = MagicMock(spec=Metrics)()
with ExitStack() as stack:
stack.enter_context(
patch.object(afancontrol.manager, "Triggers", spec=Triggers)
)
manager = Manager(
fans={FanName("case"): mocked_case_fan},
temps={TempName("mobo"): mocked_mobo_temp},
mappings={
MappingName("1"): FansTempsRelation(
temps=[TempName("mobo")],
fans=[FanSpeedModifier(fan=FanName("case"), modifier=0.6)],
)
},
report=report,
triggers_config=TriggerConfig(
global_commands=Actions(
panic=AlertCommands(enter_cmd=None, leave_cmd=None),
threshold=AlertCommands(enter_cmd=None, leave_cmd=None),
),
temp_commands={
TempName("mobo"): Actions(
panic=AlertCommands(enter_cmd=None, leave_cmd=None),
threshold=AlertCommands(enter_cmd=None, leave_cmd=None),
)
},
),
metrics=mocked_metrics,
)
stack.enter_context(manager)
manager.tick()
mocked_triggers = manager.triggers # type: MagicMock
assert mocked_triggers.check.call_count == 1
assert mocked_case_fan.__enter__.call_count == 1
assert mocked_metrics.__enter__.call_count == 1
assert mocked_metrics.tick.call_count == 1
assert mocked_case_fan.__exit__.call_count == 1
assert mocked_metrics.__exit__.call_count == 1
@pytest.mark.parametrize(
"temps, mappings, expected_fan_speeds",
[
(
{
TempName("cpu"): TempStatus(
min=TempCelsius(30),
max=TempCelsius(50),
temp=TempCelsius((50 - 30) * 0.42 + 30),
panic=None,
threshold=None,
is_panic=False,
is_threshold=False,
),
TempName("hdd"): None, # a failing sensor
},
{
MappingName("all"): FansTempsRelation(
temps=[TempName("cpu"), TempName("hdd")],
fans=[FanSpeedModifier(fan=FanName("rear"), modifier=1.0)],
)
},
{FanName("rear"): PWMValueNorm(1.0)},
),
(
{
TempName("cpu"): TempStatus(
min=TempCelsius(30),
max=TempCelsius(50),
temp=TempCelsius((50 - 30) * 0.42 + 30),
panic=None,
threshold=None,
is_panic=False,
is_threshold=False,
)
},
{
MappingName("all"): FansTempsRelation(
temps=[TempName("cpu")],
fans=[FanSpeedModifier(fan=FanName("rear"), modifier=1.0)],
)
},
{FanName("rear"): PWMValueNorm(0.42)},
),
(
{
TempName("cpu"): TempStatus(
min=TempCelsius(30),
max=TempCelsius(50),
temp=TempCelsius((50 - 30) * 0.42 + 30),
panic=None,
threshold=None,
is_panic=False,
is_threshold=False,
)
},
{
MappingName("all"): FansTempsRelation(
temps=[TempName("cpu")],
fans=[FanSpeedModifier(fan=FanName("rear"), modifier=0.6)],
)
},
{FanName("rear"): PWMValueNorm(0.42 * 0.6)},
),
(
{
TempName("cpu"): TempStatus(
min=TempCelsius(30),
max=TempCelsius(50),
temp=TempCelsius((50 - 30) * 0.42 + 30),
panic=None,
threshold=None,
is_panic=False,
is_threshold=False,
),
TempName("mobo"): TempStatus(
min=TempCelsius(30),
max=TempCelsius(50),
temp=TempCelsius((50 - 30) * 0.52 + 30),
panic=None,
threshold=None,
is_panic=False,
is_threshold=False,
),
TempName("hdd"): TempStatus(
min=TempCelsius(30),
max=TempCelsius(50),
temp=TempCelsius((50 - 30) * 0.12 + 30),
panic=None,
threshold=None,
is_panic=False,
is_threshold=False,
),
},
{
MappingName("all"): FansTempsRelation(
temps=[TempName("cpu"), TempName("mobo"), TempName("hdd")],
fans=[FanSpeedModifier(fan=FanName("rear"), modifier=1.0)],
)
},
{FanName("rear"): PWMValueNorm(0.52)},
),
(
{
TempName("cpu"): TempStatus(
min=TempCelsius(30),
max=TempCelsius(50),
temp=TempCelsius((50 - 30) * 0.42 + 30),
panic=None,
threshold=None,
is_panic=False,
is_threshold=False,
),
TempName("mobo"): TempStatus(
min=TempCelsius(30),
max=TempCelsius(50),
temp=TempCelsius((50 - 30) * 0.52 + 30),
panic=None,
threshold=None,
is_panic=False,
is_threshold=False,
),
TempName("hdd"): TempStatus(
min=TempCelsius(30),
max=TempCelsius(50),
temp=TempCelsius((50 - 30) * 0.12 + 30),
panic=None,
threshold=None,
is_panic=False,
is_threshold=False,
),
},
{
MappingName("1"): FansTempsRelation(
temps=[TempName("cpu"), TempName("hdd")],
fans=[FanSpeedModifier(fan=FanName("rear"), modifier=1.0)],
),
MappingName("2"): FansTempsRelation(
temps=[TempName("mobo"), TempName("hdd")],
fans=[FanSpeedModifier(fan=FanName("rear"), modifier=0.6)],
),
},
{FanName("rear"): PWMValueNorm(0.42)},
),
],
)
def test_fan_speeds(report, temps, mappings, expected_fan_speeds):
mocked_case_fan = MagicMock(spec=PWMFanNorm)()
mocked_mobo_temp = MagicMock(spec=FileTemp)()
mocked_metrics = MagicMock(spec=Metrics)()
with ExitStack() as stack:
stack.enter_context(
patch.object(afancontrol.manager, "Triggers", spec=Triggers)
)
manager = Manager(
fans={fan_name: mocked_case_fan for fan_name in expected_fan_speeds.keys()},
temps={temp_name: mocked_mobo_temp for temp_name in temps.keys()},
mappings=mappings,
report=report,
triggers_config=sentinel.some_triggers_config,
metrics=mocked_metrics,
)
stack.enter_context(manager)
assert expected_fan_speeds == pytest.approx(
dict(manager._map_temps_to_fan_speeds(temps))
)
| 2.109375 | 2 |