text stringlengths 4 1.02M | meta dict |
|---|---|
import numpy as np
import yaml
from qutip import *
from pylab import *
from scipy.fftpack import fft
import matplotlib.pyplot as plt
import yaml
from scipy.interpolate import interp1d
from scipy.optimize import fsolve
from qutip.ui.progressbar import TextProgressBar
class Parameters:
def __init__(self, wc, wq, eps, g, chi, kappa, gamma, t_levels, c_levels):
self.wc = wc
self.wq = wq
self.eps = eps
self.g = g
self.chi = chi
self.gamma = gamma
self.kappa = kappa
self.t_levels = t_levels
self.c_levels = c_levels
def copy(self):
params = Parameters(self.wc, self.wq, self.eps, self.g, self.chi, self.kappa, self.gamma, self.t_levels, self.c_levels)
return params
class Results:
def __init__(self, params=np.array([]), wd_points=np.array([]),
transmissions=np.array([]), edge_occupations_c=np.array([]), edge_occupations_t=np.array([])):
self.params = params
self.wd_points = wd_points
self.transmissions = transmissions
self.edge_occupations_c = edge_occupations_c
self.edge_occupations_t = edge_occupations_t
self.abs_transmissions = np.absolute(self.transmissions)
self.size = self.wd_points.size
def concatenate(self, results):
combined_params = np.concatenate([self.params, results.params])
combined_wd_points = np.concatenate([self.wd_points, results.wd_points])
combined_transmissions = np.concatenate([self.transmissions, results.transmissions])
combined_edge_occupations_c = np.concatenate([self.edge_occupations_c, results.edge_occupations_c])
combined_edge_occupations_t = np.concatenate([self.edge_occupations_t, results.edge_occupations_t])
sort_indices = np.argsort(combined_wd_points)
combined_params = combined_params[sort_indices]
combined_wd_points = combined_wd_points[sort_indices]
combined_transmissions = combined_transmissions[sort_indices]
combined_edge_occupations_c = combined_edge_occupations_c[sort_indices]
combined_edge_occupations_t = combined_edge_occupations_t[sort_indices]
combined_results = Results(combined_params, combined_wd_points,
combined_transmissions, combined_edge_occupations_c, combined_edge_occupations_t)
return combined_results
def delete(self, indices):
reduced_params = np.delete(self.params, indices)
reduced_wd_points = np.delete(self.wd_points, indices)
reduced_transmissions = np.delete(self.transmissions, indices)
reduced_edge_occupations_c = np.delete(self.edge_occupations_c, indices)
reduced_edge_occupations_t = np.delete(self.edge_occupations_t, indices)
reduced_results = Results(reduced_params, reduced_wd_points,
reduced_transmissions, reduced_edge_occupations_c, reduced_edge_occupations_t)
params_change = (reduced_params == self.params)
wd_points_change = (reduced_wd_points == self.wd_points)
transmissions_change = (reduced_transmissions == self.transmissions)
edge_occupations_c_change = (reduced_edge_occupations_c == self.edge_occupations_c)
edge_occupations_t_change = (reduced_edge_occupations_t == self.edge_occupations_t)
print np.all([params_change, wd_points_change, transmissions_change, edge_occupations_c_change, edge_occupations_t_change])
return reduced_results
def queue(self):
queue = Queue(self.params, self.wd_points)
return queue
class Queue:
def __init__(self, params = np.array([]), wd_points = np.array([])):
self.params = params
self.wd_points = wd_points
self.size = self.wd_points.size
sort_indices = np.argsort(self.wd_points)
self.wd_points = self.wd_points[sort_indices]
self.params = self.params[sort_indices]
def curvature_generate(self, results, threshold = 0.05):
curvature_info = CurvatureInfo(results, threshold)
self.wd_points = curvature_info.new_points()
self.params = hilbert_interpolation(self.wd_points, results)
self.size = self.wd_points.size
sort_indices = np.argsort(self.wd_points)
self.wd_points = self.wd_points[sort_indices]
self.params = self.params[sort_indices]
def hilbert_generate(self, results, threshold_c, threshold_t):
suggested_c_levels = []
suggested_t_levels = []
overload_occurred = False
for index, params_instance in enumerate(results.params):
threshold_c_weighted = threshold_c / params_instance.c_levels
threshold_t_weighted = threshold_t / params_instance.t_levels
overload_c = (results.edge_occupations_c[index] > threshold_c_weighted)
overload_t = (results.edge_occupations_t[index] > threshold_t_weighted)
if overload_c:
overload_occurred = True
suggestion = size_correction(
results.edge_occupations_c[index], params_instance.c_levels, threshold_c_weighted / 2)
else:
suggestion = params_instance.c_levels
suggested_c_levels.append(suggestion)
if overload_t:
overload_occurred = True
suggestion = size_correction(
results.edge_occupations_t[index], params_instance.t_levels, threshold_t_weighted / 2)
else:
suggestion = params_instance.t_levels
suggested_t_levels.append(suggestion)
if overload_occurred:
c_levels_new = np.max(suggested_c_levels)
t_levels_new = np.max(suggested_t_levels)
self.wd_points = results.wd_points
for index, params_instance in enumerate(results.params):
results.params[index].t_levels = t_levels_new
results.params[index].c_levels = c_levels_new
self.params = results.params
self.size = results.size
return Results()
else:
self.wd_points = np.array([])
self.params = np.array([])
self.size = 0
return results
def hilbert_generate_alternate(self, results, threshold_c, threshold_t):
old_c_levels = np.zeros(results.size)
suggested_c_levels = np.zeros(results.size)
old_t_levels = np.zeros(results.size)
suggested_t_levels = np.zeros(results.size)
for index, params_instance in enumerate(results.params):
suggested_c_levels[index] = \
size_suggestion(results.edge_occupations_c[index], params_instance.c_levels, threshold_c)
old_c_levels[index] = params_instance.c_levels
suggested_t_levels[index] = \
size_suggestion(results.edge_occupations_t[index], params_instance.t_levels, threshold_t)
old_t_levels[index] = params_instance.t_levels
if np.any(suggested_c_levels > old_c_levels) or np.any(suggested_t_levels > old_t_levels):
c_levels_new = np.max(suggested_c_levels)
t_levels_new = np.max(suggested_t_levels)
self.wd_points = results.wd_points
for index, params_instance in enumerate(results.params):
results.params[index].t_levels = t_levels_new
results.params[index].c_levels = c_levels_new
self.params = results.params
self.size = results.size
return Results()
else:
self.wd_points = np.array([])
self.params = np.array([])
self.size = 0
return results
class CurvatureInfo:
def __init__(self, results, threshold = 0.05):
self.threshold = threshold
self.wd_points = results.wd_points
self.new_wd_points_unique = None
self.abs_transmissions = results.abs_transmissions
self.n_points = self.abs_transmissions.size
def new_points(self):
self.curvature_positions, self.curvatures = derivative(self.wd_points, self.abs_transmissions, 2)
self.abs_curvatures = np.absolute(self.curvatures)
self.mean_curvatures = moving_average(self.abs_curvatures, 2)
self.midpoint_curvatures = \
np.concatenate((np.array([self.abs_curvatures[0]]), self.mean_curvatures))
self.midpoint_curvatures = \
np.concatenate((self.midpoint_curvatures, np.array([self.abs_curvatures[self.n_points - 3]])))
self.midpoint_transmissions = moving_average(self.abs_transmissions, 2)
self.midpoint_curvatures_normed = self.midpoint_curvatures / self.midpoint_transmissions
self.midpoints = moving_average(self.wd_points, 2)
self.intervals = np.diff(self.wd_points)
self.num_of_sections_required = \
np.ceil(self.intervals * np.sqrt(self.midpoint_curvatures_normed / threshold))
new_wd_points = np.array([])
for index in np.arange(self.n_points - 1):
multi_section = \
np.linspace(self.wd_points[index], self.wd_points[index + 1], self.num_of_sections_required[index] + 1)
new_wd_points = np.concatenate((new_wd_points, multi_section))
unique_set = set(new_wd_points) - set(self.wd_points)
self.new_wd_points_unique = np.array(list(unique_set))
return self.new_wd_points_unique
def size_suggestion(edge_occupation, size, threshold):
beta = fsolve(zero_func, 1, args=(edge_occupation, size - 1, size))
new_size = - np.log(threshold) / beta
new_size = int(np.ceil(new_size))
return new_size
def size_correction(edge_occupation, size, threshold):
beta_estimate = np.log(1 + 1 / edge_occupation) / size
beta = fsolve(zero_func, beta_estimate, args=(edge_occupation, size - 1, size))
new_size = 1 + np.log((1 - np.exp(-beta)) / threshold) / beta
new_size = int(np.ceil(new_size))
return new_size
def exponential_occupation(n, beta, size):
factor = np.exp(-beta)
f = np.power(factor, n) * (1 - factor) / (1 - np.power(factor, size))
return f
def zero_func(beta, p, level, size):
f = exponential_occupation(level, beta, size)
f = f - p
return f
def hilbert_interpolation(new_wd_points, results):
c_levels_array = np.array([params.c_levels for params in results.params])
t_levels_array = np.array([params.t_levels for params in results.params])
wd_points = results.wd_points
c_interp = interp1d(wd_points, c_levels_array)
t_interp = interp1d(wd_points, t_levels_array)
base_params = results.params[0]
params_list = []
for wd in new_wd_points:
new_params = base_params.copy()
new_params.c_levels = int(round(c_interp(wd)))
new_params.t_levels = int(round(t_interp(wd)))
params_list.append(new_params)
params_array = np.array(params_list)
return params_array
def moving_average(interval, window_size):
window = np.ones(int(window_size)) / float(window_size)
averages = np.convolve(interval, window, 'same')
return averages[window_size - 1 : averages.size]
def derivative(x, y, n_derivative = 1):
derivatives = np.zeros(y.size - 1)
positions = np.zeros(x.size - 1)
for index in np.arange(y.size - 1):
grad = (y[index + 1] - y[index]) / (x[index + 1] - x[index])
position = np.mean([x[index], x[index + 1]])
derivatives[index] = grad
positions[index] = position
if n_derivative > 1:
positions, derivatives = derivative(positions, derivatives, n_derivative - 1)
return positions, derivatives
def hamiltonian(params, wd):
a = tensor(destroy(params.c_levels), qeye(params.t_levels))
sm = tensor(qeye(params.c_levels), destroy(params.t_levels))
H = (params.wc - wd) * a.dag() * a + (params.wq - wd) * sm.dag() * sm \
+ params.chi * sm.dag() * sm * (sm.dag() * sm - 1) + params.g * (a.dag() * sm + a * sm.dag()) \
+ params.eps * (a + a.dag())
return H
def transmission_calc_array(queue):
args = []
for index, value in enumerate(queue.wd_points):
args.append([value, queue.params[index]])
steady_states = parallel_map(transmission_calc, args, num_cpus=10, progress_bar=TextProgressBar())
transmissions = np.array([steady_state[0] for steady_state in steady_states])
edge_occupations_c = np.array([steady_state[1] for steady_state in steady_states])
edge_occupations_c = np.absolute(edge_occupations_c)
edge_occupations_t = np.array([steady_state[2] for steady_state in steady_states])
edge_occupations_t = np.absolute(edge_occupations_t)
results = Results(queue.params, queue.wd_points, transmissions, edge_occupations_c, edge_occupations_t)
abs_transmissions = np.absolute(transmissions)
return results
def transmission_calc(args):
wd = args[0]
params = args[1]
a = tensor(destroy(params.c_levels), qeye(params.t_levels))
sm = tensor(qeye(params.c_levels), destroy(params.t_levels))
c_ops = []
c_ops.append(np.sqrt(params.kappa) * a)
c_ops.append(np.sqrt(params.gamma) * sm)
H = hamiltonian(params, wd)
rho_ss = steadystate(H, c_ops)
rho_c_ss = rho_ss.ptrace(0)
rho_t_ss = rho_ss.ptrace(1)
c_occupations = rho_c_ss.diag()
t_occupations = rho_t_ss.diag()
edge_occupation_c = c_occupations[params.c_levels - 1]
edge_occupation_t = t_occupations[params.t_levels - 1]
transmission = expect(a, rho_ss)
return np.array([transmission, edge_occupation_c, edge_occupation_t])
def sweep(eps, wd_lower, wd_upper, params, threshold):
hilbert_adjustment = False
threshold_c = 0.001
threshold_t = 0.001
params.eps = eps
wd_points = np.linspace(wd_lower, wd_upper, 10)
params_array = np.array([params.copy() for wd in wd_points])
queue = Queue(params_array, wd_points)
curvature_iterations = 0
results = Results()
while (queue.size > 0) and (curvature_iterations < 3):
print curvature_iterations
curvature_iterations = curvature_iterations + 1
new_results = transmission_calc_array(queue)
results = results.concatenate(new_results)
if hilbert_adjustment == True:
results = queue.hilbert_generate(results, threshold_c, threshold_t)
hilbert_iterations = 0
while (queue.size > 0) and (hilbert_iterations < 3) and hilbert_adjustment:
hilbert_iterations = hilbert_iterations + 1
results = transmission_calc_array(queue)
results = queue.hilbert_generate(results, threshold_c, threshold_t)
queue.curvature_generate(results)
c_levels = [params_instance.c_levels for params_instance in results.params]
t_levels = [params_instance.t_levels for params_instance in results.params]
return results
def multi_sweep(eps_array, wd_lower, wd_upper, params, threshold):
multi_results_dict = dict()
for eps in eps_array:
multi_results_dict[eps] = sweep(eps, wd_lower, wd_upper, params, threshold)
params = multi_results_dict[eps].params[0]
print params.c_levels
print params.t_levels
return multi_results_dict
if __name__ == '__main__':
#wc, wq, eps, g, chi, kappa, gamma, t_levels, c_levels
t_levels = 2
c_levels = 10
params = Parameters(10.4267, 9.39128, 0.0002, 0.3096, -0.097, 0.00146, 0.000833, t_levels, c_levels)
threshold = 0.01
wd_lower = 10.495
wd_upper = 10.520
eps = 0.008
eps_array = np.array([eps])
multi_results = multi_sweep(eps_array, wd_lower, wd_upper, params, threshold)
#with open('data.yml', 'w') as outfile:
# yaml.dump(multi_results, outfile, default_flow_style=False)
#multi_results = []
#multi_results = yaml.load(open('data.yml'))
results = multi_results[eps]
print results.params[0].t_levels
print results.params[0].c_levels
plt.scatter(results.wd_points, results.abs_transmissions)
plt.title('txc: ' + str(t_levels) + 'x' + str(c_levels))
plt.show()
| {
"content_hash": "cd77a0ed862708e2983f3ec2fb5f8b3f",
"timestamp": "",
"source": "github",
"line_count": 354,
"max_line_length": 131,
"avg_line_length": 45.40677966101695,
"alnum_prop": 0.6423416697772801,
"repo_name": "paulsbrookes/cqed_sims_qutip",
"id": "2672cca70f20fe0646e97f39f06b68a2c451d259",
"size": "16074",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spectroscopy/spec_multi_4.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "97801"
}
],
"symlink_target": ""
} |
"""Persistence Models"""
from __future__ import (absolute_import, print_function,
division)
from future.utils import text_to_native_str as n
# Base
from .base import Model, MetaModel
# Database Models
from .activation import Activation
from .dependency import Dependency
from .environment_attr import EnvironmentAttr
from .file_access import FileAccess, UniqueFileAccess
from .function_def import FunctionDef
from .graph_cache import GraphCache
from .head import Head
from .module import Module
from .object import Object
from .object_value import ObjectValue
from .variable import Variable
from .variable_dependency import VariableDependency
from .variable_usage import VariableUsage
from .tag import Tag
from .trial import Trial
# Other models
from .history import History
from .diff import Diff
from .trial_prolog import TrialProlog
from . import relationships
ORDER = [
Trial, Head, Tag, GraphCache, # Trial
Module, Dependency, EnvironmentAttr, # Deployment
FunctionDef, Object, # Definition
Activation, ObjectValue, FileAccess, # Execution
Variable, VariableUsage, VariableDependency # Slicing
]
__all__ = [
n(x.__modelname__) for x in ORDER
] + [
"History",
"Diff",
"TrialProlog",
"MetaModel",
"Model",
"ORDER"
]
| {
"content_hash": "cdb88f5af1596af273dc1f040118f377",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 58,
"avg_line_length": 24.62264150943396,
"alnum_prop": 0.7348659003831418,
"repo_name": "gems-uff/noworkflow",
"id": "7a3d7981f21111eff7daed2aec8e0ad919dec25f",
"size": "1523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "capture/noworkflow/now/persistence/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "176047"
},
{
"name": "HTML",
"bytes": "238"
},
{
"name": "JavaScript",
"bytes": "787748"
},
{
"name": "Jupyter Notebook",
"bytes": "5241520"
},
{
"name": "Prolog",
"bytes": "18527"
},
{
"name": "Python",
"bytes": "656680"
},
{
"name": "TypeScript",
"bytes": "122003"
}
],
"symlink_target": ""
} |
from setuptools import find_packages, setup
setup(
name='cmsplugin_gallery_filer',
version='0.5.2.1',
author='GISA Elkartea',
author_email='kontaktua@gisa-elkartea.org',
url='http://lagunak.gisa-elkartea.org/projects/cmsplugin-gallery-filer',
description = 'DjangoCMS image gallery plugin with drag&drop '
'reordering in admin, support for thumbnails and '
'jQueryTOOLS overlay. Fork to use django-filer',
packages=find_packages(),
provides=['cmsplugin_gallery', ],
include_package_data=True,
install_requires = ['django-inline-ordering>=0.1.1', 'easy-thumbnails',
'django-filer']
)
| {
"content_hash": "0563da79ec10a9474d5340b44c9b3643",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 76,
"avg_line_length": 40.23529411764706,
"alnum_prop": 0.652046783625731,
"repo_name": "shagi/cmsplugin_gallery_filer",
"id": "131c3e7fa556e90e7fde78a44db384cb3cb638a3",
"size": "707",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "73479"
},
{
"name": "Shell",
"bytes": "497"
}
],
"symlink_target": ""
} |
"""
Created on Mon Nov 03 10:50:30 2014
@author: User
"""
import lxml.etree as ET
dom = ET.parse("default.xsd")
xslt = ET.parse("XTSM_xsd_to_xsl_light.xsl")
transform = ET.XSLT(xslt)
newdom = transform(dom)
print(ET.tostring(newdom, pretty_print=True))
f = open('default.xsl','w')
f.write('<?xml version="1.0" encoding="utf-8"?>\n')
f.write(ET.tostring(newdom, pretty_print=True))
f.close() | {
"content_hash": "1c556e6271634409f9f50888a496e514",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 51,
"avg_line_length": 23.11764705882353,
"alnum_prop": 0.6870229007633588,
"repo_name": "gemelkelabs/timing_system_software",
"id": "7676ac712a3a38c6ca4ea749a101517e16d3b900",
"size": "417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xtsm_web_gui/transforms/generate_xtsm_html_xsl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "35332"
},
{
"name": "JavaScript",
"bytes": "1193880"
},
{
"name": "PHP",
"bytes": "1087"
},
{
"name": "Python",
"bytes": "568121"
},
{
"name": "XSLT",
"bytes": "63687"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
description = """
Compiler plugin to use Django Pipeline package with Compass
"""
setup(
name='django-pipeline-compass-compiler',
version='0.1.2',
description=description,
long_description=open('README.md').read(),
author='Javi Velasco',
author_email='javier.velasco86@gmail.com',
url='https://github.com/javivelasco/django-pipeline-compass-compiler',
license='MIT License',
platforms=['OS Independent'],
packages=find_packages(),
zip_safe=False,
include_package_data=True,
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
'Topic :: Utilities',
]
)
| {
"content_hash": "da067ebe458ad7bd81cae259ab3e9618",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 74,
"avg_line_length": 30.275862068965516,
"alnum_prop": 0.6526195899772209,
"repo_name": "javivelasco/django-pipeline-compass-compiler",
"id": "980dedf7d28566de918398222b17f5b1b053d9b2",
"size": "902",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1684"
}
],
"symlink_target": ""
} |
'''
8 puzzle problem, a smaller version of the fifteen puzzle:
http://en.wikipedia.org/wiki/Fifteen_puzzle
States are defined as string representations of the pieces on the puzzle.
Actions denote what piece will be moved to the empty space.
States must allways be inmutable. We will use strings, but internally most of
the time we will convert those strings to lists, which are easier to handle.
For example, the state (string):
'1-2-3
4-5-6
7-8-e'
will become (in lists):
[['1', '2', '3'],
['4', '5', '6'],
['7', '8', 'e']]
'''
from simpleai.search import astar, SearchProblem
from simpleai.search.viewers import WebViewer
GOAL = '''1-2-3
4-5-6
7-8-e'''
INITIAL = '''4-1-2
7-e-3
8-5-6'''
def list_to_string(list_):
return '\n'.join(['-'.join(row) for row in list_])
def string_to_list(string_):
return [row.split('-') for row in string_.split('\n')]
def find_location(rows, element_to_find):
'''Find the location of a piece in the puzzle.
Returns a tuple: row, column'''
for ir, row in enumerate(rows):
for ic, element in enumerate(row):
if element == element_to_find:
return ir, ic
# we create a cache for the goal position of each piece, so we don't have to
# recalculate them every time
goal_positions = {}
rows_goal = string_to_list(GOAL)
for number in '12345678e':
goal_positions[number] = find_location(rows_goal, number)
class EigthPuzzleProblem(SearchProblem):
def actions(self, state):
'''Returns a list of the pieces we can move to the empty space.'''
rows = string_to_list(state)
row_e, col_e = find_location(rows, 'e')
actions = []
if row_e > 0:
actions.append(rows[row_e - 1][col_e])
if row_e < 2:
actions.append(rows[row_e + 1][col_e])
if col_e > 0:
actions.append(rows[row_e][col_e - 1])
if col_e < 2:
actions.append(rows[row_e][col_e + 1])
return actions
def result(self, state, action):
'''Return the resulting state after moving a piece to the empty space.
(the "action" parameter contains the piece to move)
'''
rows = string_to_list(state)
row_e, col_e = find_location(rows, 'e')
row_n, col_n = find_location(rows, action)
rows[row_e][col_e], rows[row_n][col_n] = rows[row_n][col_n], rows[row_e][col_e]
return list_to_string(rows)
def is_goal(self, state):
'''Returns true if a state is the goal state.'''
return state == GOAL
def cost(self, state1, action, state2):
'''Returns the cost of performing an action. No useful on this problem, i
but needed.
'''
return 1
def heuristic(self, state):
'''Returns an *estimation* of the distance from a state to the goal.
We are using the manhattan distance.
'''
rows = string_to_list(state)
distance = 0
for number in '12345678e':
row_n, col_n = find_location(rows, number)
row_n_goal, col_n_goal = goal_positions[number]
distance += abs(row_n - row_n_goal) + abs(col_n - col_n_goal)
return distance
result = astar(EigthPuzzleProblem(INITIAL))
# if you want to use the visual debugger, use this instead:
# result = astar(EigthPuzzleProblem(INITIAL), viewer=WebViewer())
for action, state in result.path():
print('Move number', action)
print(state)
| {
"content_hash": "f2028f5b6317b20c136744993ac91c21",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 87,
"avg_line_length": 27.792,
"alnum_prop": 0.6134139320667819,
"repo_name": "iamaziz/simpleai",
"id": "e8313d1c85199f35759b32fd3d61b5073889a608",
"size": "3474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/search/eight_puzzle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1724"
},
{
"name": "HTML",
"bytes": "3449"
},
{
"name": "JavaScript",
"bytes": "7241"
},
{
"name": "Python",
"bytes": "125335"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division # requires Python >= 2.6
# numpy and scipy imports
import numpy as np
from scipy.sparse import kron, identity, lil_matrix
from scipy.sparse.linalg import eigsh # Lanczos routine from ARPACK
# We will use python's "namedtuple" to represent the Block and EnlargedBlock
# objects
from collections import namedtuple
Block = namedtuple("Block", ["length", "basis_size", "operator_dict", "basis_sector_array"])
EnlargedBlock = namedtuple("EnlargedBlock", ["length", "basis_size", "operator_dict", "basis_sector_array"])
def is_valid_block(block):
if len(block.basis_sector_array) != block.basis_size:
return False
for op in block.operator_dict.values():
if op.shape[0] != block.basis_size or op.shape[1] != block.basis_size:
return False
return True
# This function should test the same exact things, so there is no need to
# repeat its definition.
is_valid_enlarged_block = is_valid_block
# Model-specific code for the Heisenberg XXZ chain
model_d = 2 # single-site basis size
single_site_sectors = np.array([0.5, -0.5]) # S^z sectors corresponding to the
# single site basis elements
Sz1 = np.array([[0.5, 0], [0, -0.5]], dtype='d') # single-site S^z
Sp1 = np.array([[0, 1], [0, 0]], dtype='d') # single-site S^+
H1 = np.array([[0, 0], [0, 0]], dtype='d') # single-site portion of H is zero
def H2(Sz1, Sp1, Sz2, Sp2): # two-site part of H
"""Given the operators S^z and S^+ on two sites in different Hilbert spaces
(e.g. two blocks), returns a Kronecker product representing the
corresponding two-site term in the Hamiltonian that joins the two sites.
"""
J = Jz = 1.
return (
(J / 2) * (kron(Sp1, Sp2.conjugate().transpose()) + kron(Sp1.conjugate().transpose(), Sp2)) +
Jz * kron(Sz1, Sz2)
)
# conn refers to the connection operator, that is, the operator on the edge of
# the block, on the interior of the chain. We need to be able to represent S^z
# and S^+ on that site in the current basis in order to grow the chain.
initial_block = Block(length=1, basis_size=model_d, operator_dict={
"H": H1,
"conn_Sz": Sz1,
"conn_Sp": Sp1,
}, basis_sector_array=single_site_sectors)
def enlarge_block(block):
"""This function enlarges the provided Block by a single site, returning an
EnlargedBlock.
"""
mblock = block.basis_size
o = block.operator_dict
# Create the new operators for the enlarged block. Our basis becomes a
# Kronecker product of the Block basis and the single-site basis. NOTE:
# `kron` uses the tensor product convention making blocks of the second
# array scaled by the first. As such, we adopt this convention for
# Kronecker products throughout the code.
enlarged_operator_dict = {
"H": kron(o["H"], identity(model_d)) + kron(identity(mblock), H1) + H2(o["conn_Sz"], o["conn_Sp"], Sz1, Sp1),
"conn_Sz": kron(identity(mblock), Sz1),
"conn_Sp": kron(identity(mblock), Sp1),
}
# This array keeps track of which sector each element of the new basis is
# in. `np.add.outer()` creates a matrix that adds each element of the
# first vector with each element of the second, which when flattened
# contains the sector of each basis element in the above Kronecker product.
enlarged_basis_sector_array = np.add.outer(block.basis_sector_array, single_site_sectors).flatten()
return EnlargedBlock(length=(block.length + 1),
basis_size=(block.basis_size * model_d),
operator_dict=enlarged_operator_dict,
basis_sector_array=enlarged_basis_sector_array)
def rotate_and_truncate(operator, transformation_matrix):
"""Transforms the operator to the new (possibly truncated) basis given by
`transformation_matrix`.
"""
return transformation_matrix.conjugate().transpose().dot(operator.dot(transformation_matrix))
def index_map(array):
"""Given an array, returns a dictionary that allows quick access to the
indices at which a given value occurs.
Example usage:
>>> by_index = index_map([3, 5, 5, 7, 3])
>>> by_index[3]
[0, 4]
>>> by_index[5]
[1, 2]
>>> by_index[7]
[3]
"""
d = {}
for index, value in enumerate(array):
d.setdefault(value, []).append(index)
return d
def single_dmrg_step(sys, env, m, target_Sz):
"""Performs a single DMRG step using `sys` as the system and `env` as the
environment, keeping a maximum of `m` states in the new basis.
"""
assert is_valid_block(sys)
assert is_valid_block(env)
# Enlarge each block by a single site.
sys_enl = enlarge_block(sys)
sys_enl_basis_by_sector = index_map(sys_enl.basis_sector_array)
if sys is env: # no need to recalculate a second time
env_enl = sys_enl
env_enl_basis_by_sector = sys_enl_basis_by_sector
else:
env_enl = enlarge_block(env)
env_enl_basis_by_sector = index_map(env_enl.basis_sector_array)
assert is_valid_enlarged_block(sys_enl)
assert is_valid_enlarged_block(env_enl)
# Construct the full superblock Hamiltonian.
m_sys_enl = sys_enl.basis_size
m_env_enl = env_enl.basis_size
sys_enl_op = sys_enl.operator_dict
env_enl_op = env_enl.operator_dict
superblock_hamiltonian = kron(sys_enl_op["H"], identity(m_env_enl)) + kron(identity(m_sys_enl), env_enl_op["H"]) + \
H2(sys_enl_op["conn_Sz"], sys_enl_op["conn_Sp"], env_enl_op["conn_Sz"], env_enl_op["conn_Sp"])
# Build up a "restricted" basis of states in the target sector and
# reconstruct the superblock Hamiltonian in that sector.
sector_indices = {} # will contain indices of the new (restricted) basis
# for which the enlarged system is in a given sector
restricted_basis_indices = [] # will contain indices of the old (full) basis, which we are mapping to
for sys_enl_Sz, sys_enl_basis_states in sys_enl_basis_by_sector.items():
sector_indices[sys_enl_Sz] = []
env_enl_Sz = target_Sz - sys_enl_Sz
if env_enl_Sz in env_enl_basis_by_sector:
for i in sys_enl_basis_states:
i_offset = m_env_enl * i # considers the tensor product structure of the superblock basis
for j in env_enl_basis_by_sector[env_enl_Sz]:
current_index = len(restricted_basis_indices) # about-to-be-added index of restricted_basis_indices
sector_indices[sys_enl_Sz].append(current_index)
restricted_basis_indices.append(i_offset + j)
restricted_superblock_hamiltonian = superblock_hamiltonian[:, restricted_basis_indices][restricted_basis_indices, :]
# Call ARPACK to find the superblock ground state. ("SA" means find the
# "smallest in amplitude" eigenvalue.)
(energy,), restricted_psi0 = eigsh(restricted_superblock_hamiltonian, k=1, which="SA")
# Construct each block of the reduced density matrix of the system by
# tracing out the environment
rho_block_dict = {}
for sys_enl_Sz, indices in sector_indices.items():
if indices: # if indices is nonempty
psi0_sector = restricted_psi0[indices, :]
# We want to make the (sys, env) indices correspond to (row,
# column) of a matrix, respectively. Since the environment
# (column) index updates most quickly in our Kronecker product
# structure, psi0_sector is thus row-major ("C style").
psi0_sector = psi0_sector.reshape([len(sys_enl_basis_by_sector[sys_enl_Sz]), -1], order="C")
rho_block_dict[sys_enl_Sz] = np.dot(psi0_sector, psi0_sector.conjugate().transpose())
# Diagonalize each block of the reduced density matrix and sort the
# eigenvectors by eigenvalue.
possible_eigenstates = []
for Sz_sector, rho_block in rho_block_dict.items():
evals, evecs = np.linalg.eigh(rho_block)
current_sector_basis = sys_enl_basis_by_sector[Sz_sector]
for eval, evec in zip(evals, evecs.transpose()):
possible_eigenstates.append((eval, evec, Sz_sector, current_sector_basis))
possible_eigenstates.sort(reverse=True, key=lambda x: x[0]) # largest eigenvalue first
# Build the transformation matrix from the `m` overall most significant
# eigenvectors. It will have sparse structure due to the conserved quantum
# number.
my_m = min(len(possible_eigenstates), m)
transformation_matrix = lil_matrix((sys_enl.basis_size, my_m), dtype='d')
new_sector_array = np.zeros((my_m,), dtype='d') # lists the sector of each
# element of the new/truncated basis
for i, (eval, evec, Sz_sector, current_sector_basis) in enumerate(possible_eigenstates[:my_m]):
for j, v in zip(current_sector_basis, evec):
transformation_matrix[j, i] = v
new_sector_array[i] = Sz_sector
# Convert the transformation matrix to a more efficient internal
# representation. `lil_matrix` is good for constructing a sparse matrix
# efficiently, but `csr_matrix` is better for performing quick
# multiplications.
transformation_matrix = transformation_matrix.tocsr()
truncation_error = 1 - sum([x[0] for x in possible_eigenstates[:my_m]])
print("truncation error:", truncation_error)
# Rotate and truncate each operator.
new_operator_dict = {}
for name, op in sys_enl.operator_dict.items():
new_operator_dict[name] = rotate_and_truncate(op, transformation_matrix)
newblock = Block(length=sys_enl.length,
basis_size=my_m,
operator_dict=new_operator_dict,
basis_sector_array=new_sector_array)
return newblock, energy
def graphic(sys_block, env_block, sys_label="l"):
"""Returns a graphical representation of the DMRG step we are about to
perform, using '=' to represent the system sites, '-' to represent the
environment sites, and '**' to represent the two intermediate sites.
"""
assert sys_label in ("l", "r")
graphic = ("=" * sys_block.length) + "**" + ("-" * env_block.length)
if sys_label == "r":
# The system should be on the right and the environment should be on
# the left, so reverse the graphic.
graphic = graphic[::-1]
return graphic
def infinite_system_algorithm(L, m, target_Sz):
block = initial_block
# Repeatedly enlarge the system by performing a single DMRG step, using a
# reflection of the current block as the environment.
while 2 * block.length < L:
current_L = 2 * block.length + 2 # current superblock length
current_target_Sz = int(target_Sz) * current_L // L
print("L =", current_L)
block, energy = single_dmrg_step(block, block, m=m, target_Sz=current_target_Sz)
print("E/L =", energy / current_L)
def finite_system_algorithm(L, m_warmup, m_sweep_list, target_Sz):
assert L % 2 == 0 # require that L is an even number
# To keep things simple, this dictionary is not actually saved to disk, but
# we use it to represent persistent storage.
block_disk = {} # "disk" storage for Block objects
# Use the infinite system algorithm to build up to desired size. Each time
# we construct a block, we save it for future reference as both a left
# ("l") and right ("r") block, as the infinite system algorithm assumes the
# environment is a mirror image of the system.
block = initial_block
block_disk["l", block.length] = block
block_disk["r", block.length] = block
while 2 * block.length < L:
# Perform a single DMRG step and save the new Block to "disk"
print(graphic(block, block))
current_L = 2 * block.length + 2 # current superblock length
current_target_Sz = int(target_Sz) * current_L // L
block, energy = single_dmrg_step(block, block, m=m_warmup, target_Sz=current_target_Sz)
print("E/L =", energy / current_L)
block_disk["l", block.length] = block
block_disk["r", block.length] = block
# Now that the system is built up to its full size, we perform sweeps using
# the finite system algorithm. At first the left block will act as the
# system, growing at the expense of the right block (the environment), but
# once we come to the end of the chain these roles will be reversed.
sys_label, env_label = "l", "r"
sys_block = block; del block # rename the variable
for m in m_sweep_list:
while True:
# Load the appropriate environment block from "disk"
env_block = block_disk[env_label, L - sys_block.length - 2]
if env_block.length == 1:
# We've come to the end of the chain, so we reverse course.
sys_block, env_block = env_block, sys_block
sys_label, env_label = env_label, sys_label
# Perform a single DMRG step.
print(graphic(sys_block, env_block, sys_label))
sys_block, energy = single_dmrg_step(sys_block, env_block, m=m, target_Sz=target_Sz)
print("E/L =", energy / L)
# Save the block from this step to disk.
block_disk[sys_label, sys_block.length] = sys_block
# Check whether we just completed a full sweep.
if sys_label == "l" and 2 * sys_block.length == L:
break # escape from the "while True" loop
if __name__ == "__main__":
np.set_printoptions(precision=10, suppress=True, threshold=10000, linewidth=300)
#infinite_system_algorithm(L=100, m=20, target_Sz=0)
finite_system_algorithm(L=20, m_warmup=10, m_sweep_list=[10, 20, 30, 40, 40], target_Sz=0)
| {
"content_hash": "83625ee7c644f5fd7a147c8d36d642aa",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 123,
"avg_line_length": 46.83445945945946,
"alnum_prop": 0.6459640770396018,
"repo_name": "simple-dmrg/simple-dmrg",
"id": "c25879f3017eab5a5cdf44755df3681e7ff74306",
"size": "14338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simple_dmrg_03_conserved_quantum_numbers.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Julia",
"bytes": "16478"
},
{
"name": "Python",
"bytes": "49834"
},
{
"name": "TeX",
"bytes": "266"
}
],
"symlink_target": ""
} |
from flask import request,render_template, redirect,session
from utils import getone,check,_update,_delete,insert_sql,list
from . import app
from sessions import sessionmsg
import json
field=['id','name','idc_id','u_num','power']
idc_fields = ['id','name']
# 机柜列表
@app.route('/cabinet',methods=['GET', 'POST'])
def cabinet():
if 'username' not in session:
return redirect('/login/')
msg = sessionmsg()
idcs = list('idc',idc_fields)['msg']
cabinets = list('cabinet',field)['msg']
for cab in cabinets:
for items in idcs:
if cab['idc_id'] == items['id']:
cab['idc_id'] = items['name']
return render_template('cabinet.html',msg=msg,cabinet=cabinets)
# 添加机柜
@app.route('/cabinetadd',methods=['GET', 'POST'])
def cabinetadd():
if 'username' not in session:
return redirect('/login/')
msg = sessionmsg()
if request.method=='GET':
fields = ['id','name']
result = list('idc',fields)
return json.dumps(result)
if request.method=='POST':
cabinet = {k:v[0] for k,v in dict(request.form).items()}
print cabinet
field = ['name','idc_id','u_num','power']
result = insert_sql('cabinet',field,cabinet)
if result['code'] == 0:
result ={'code':0, 'msg':"success"}
return json.dumps(result)
# 更新机柜信息
@app.route('/cabinetupdate',methods=['GET', 'POST'])
def cabinetupdate():
if 'username' not in session:
return redirect('/login/')
msg = sessionmsg()
if request.method=='GET':
id = request.args.get('id')
data={'id':id}
cabinet = getone('cabinet',data,field)
idc = list('idc',idc_fields)
result = {'code':0,'idc':idc['msg'],'cabinet':cabinet['msg']}
return json.dumps(result)
if request.method=='POST':
cabinet = {k:v[0] for k,v in dict(request.form).items()}
print cabinet
result = _update('cabinet',field,cabinet)
if result['code'] == 0:
result ={'code':0, 'msg':"Update success"}
return json.dumps(result)
# 删除机柜
@app.route('/cabinetdelete',methods=['GET', 'POST'])
def cabinetdelete():
if 'username' not in session:
return redirect('/login/')
msg = sessionmsg()
if request.method=='POST':
cabinet = {k:v[0] for k,v in dict(request.form).items()}
if _delete('cabinet',cabinet):
result ={'code':0, 'msg':"delete success"}
return json.dumps(result)
| {
"content_hash": "4a1162ec8087cb5a903a98c9af07778c",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 70,
"avg_line_length": 34.148648648648646,
"alnum_prop": 0.5797388207360507,
"repo_name": "1032231418/python",
"id": "4894936213471d613aa7ae64c0f4f5b2b2173c7c",
"size": "2608",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "day11/app/cabinet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3604672"
},
{
"name": "HTML",
"bytes": "15255764"
},
{
"name": "JavaScript",
"bytes": "3123125"
},
{
"name": "Makefile",
"bytes": "9572"
},
{
"name": "PHP",
"bytes": "2830"
},
{
"name": "Python",
"bytes": "213304"
},
{
"name": "Ruby",
"bytes": "20492"
}
],
"symlink_target": ""
} |
"I want to format{} as well as" <ref>"{kwd}".format("positional args", kwd="keyword") | {
"content_hash": "c6ab2b44e5ad3e03107bf95b396c3ffe",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 85,
"avg_line_length": 85,
"alnum_prop": 0.6705882352941176,
"repo_name": "MER-GROUP/intellij-community",
"id": "9f31881539948d92df06f6c1b24e3a391c7b332c",
"size": "85",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/testData/resolve/FormatArgsAndKWargs1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AMPL",
"bytes": "20665"
},
{
"name": "AspectJ",
"bytes": "182"
},
{
"name": "Batchfile",
"bytes": "63554"
},
{
"name": "C",
"bytes": "214994"
},
{
"name": "C#",
"bytes": "1538"
},
{
"name": "C++",
"bytes": "190765"
},
{
"name": "CSS",
"bytes": "164277"
},
{
"name": "CoffeeScript",
"bytes": "1759"
},
{
"name": "Cucumber",
"bytes": "14382"
},
{
"name": "Erlang",
"bytes": "10"
},
{
"name": "FLUX",
"bytes": "57"
},
{
"name": "Groff",
"bytes": "35232"
},
{
"name": "Groovy",
"bytes": "2368473"
},
{
"name": "HTML",
"bytes": "1756000"
},
{
"name": "J",
"bytes": "5050"
},
{
"name": "Java",
"bytes": "153635614"
},
{
"name": "JavaScript",
"bytes": "141020"
},
{
"name": "Jupyter Notebook",
"bytes": "92629"
},
{
"name": "Kotlin",
"bytes": "1298939"
},
{
"name": "Lex",
"bytes": "166321"
},
{
"name": "Makefile",
"bytes": "2352"
},
{
"name": "NSIS",
"bytes": "88100"
},
{
"name": "Objective-C",
"bytes": "28878"
},
{
"name": "Perl6",
"bytes": "26"
},
{
"name": "Protocol Buffer",
"bytes": "6570"
},
{
"name": "Python",
"bytes": "23161125"
},
{
"name": "Ruby",
"bytes": "1213"
},
{
"name": "Scala",
"bytes": "11698"
},
{
"name": "Shell",
"bytes": "64460"
},
{
"name": "Smalltalk",
"bytes": "64"
},
{
"name": "TeX",
"bytes": "62325"
},
{
"name": "TypeScript",
"bytes": "6152"
},
{
"name": "XSLT",
"bytes": "113040"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division
import collections
import logging
import math
import string
import time
import pgdata
import sector
import system_internal as system
import util
import vector3
app_name = "pgnames"
log = logging.getLogger(app_name)
# #
# Publicly-useful functions
# #
"""
Check whether the given name is a valid PG system name, either in a PG or HA sector.
Args:
name: A system name
strict: If True, will also check the sector name is a valid sector.
Returns:
True if the name is valid, False if not
"""
def is_pg_system_name(name, strict = True):
m = pgdata.pg_system_regex.match(name.strip())
if m is None:
return False
return (get_sector(m.group("sector")) is not None) if strict else True
"""
Get the name of a sector that a position falls within.
Args:
pos: A position
format_output: Whether or not to format the output or return it as fragments
Returns:
The name of the sector which contains the input position, either as a string or as a list of fragments
"""
def get_sector_name(pos, allow_ha=True, format_output=True):
pos = util.get_as_position(pos)
if pos is None:
return None
if allow_ha:
ha_name = _ha_get_name(pos)
if ha_name is not None:
return ha_name
offset = _c1_get_offset(pos)
if _get_c1_or_c2(offset) == 1:
output = _c1_get_name(pos)
else:
output = _c2_get_name(pos)
if format_output:
return format_sector_name(output)
else:
return output
"""
Get a Sector object represented by a name, or which a position falls within.
Args:
input: A sector name, or a position
allow_ha: Whether to include hand-authored sectors in the search
get_name: Whether to look up the name of the sector
Returns:
A Sector object, or None if the input could not be looked up
"""
def get_sector(input, allow_ha = True, get_name = True):
pos_input = util.get_as_position(input)
if pos_input is not None:
input = pos_input
if allow_ha:
ha_name = _ha_get_name(input)
if ha_name is not None:
return pgdata.ha_sectors[ha_name.lower()]
# If we're not checking HA or it's not in such a sector, do PG
x = (input.x - sector.base_coords.x) // sector.sector_size
y = (input.y - sector.base_coords.y) // sector.sector_size
z = (input.z - sector.base_coords.z) // sector.sector_size
# Get the name, if we are
frags = None
if get_name:
frags = get_sector_name(input, allow_ha=allow_ha, format_output=False)
return sector.PGSector(int(x), int(y), int(z), format_sector_name(frags), _get_sector_class(frags))
else:
# Assume we have a string, call down to get it by name
return _get_sector_from_name(input, allow_ha=allow_ha)
"""
Get a system object based on its name or position
Args:
input: The system's name or position
mcode: The system's mass code ('a'-'h') or cube side length; only required when input is a position
Returns:
A system or system prototype object
"""
def get_system(input, mcode = None, allow_ha = True):
posinput = util.get_as_position(input)
if posinput is not None:
if mcode is not None:
return _get_system_from_pos(posinput, mcode, allow_ha)
else:
raise ValueError("mcode argument must be provided to get_system if input is a position")
else:
return _get_system_from_name(input, allow_ha)
"""
Get the correctly-cased name for a given sector or system name
Args:
name: A system or sector name, in any case
Returns:
The input system/sector name with its case corrected
"""
def get_canonical_name(name, sector_only = False):
sectname = None
sysid = None
# See if we have a full system name
m = pgdata.pg_system_regex.match(name)
if m is not None:
sectname_raw = m.group("sector")
else:
sectname_raw = name
# Check if this sector name appears in ha_sectors, pass it through the fragment process if not
if sectname_raw.lower() in pgdata.ha_sectors:
sectname = pgdata.ha_sectors[sectname_raw.lower()].name
else:
# get_sector_fragments converts to Title Case, so we don't need to
frags = get_sector_fragments(sectname_raw)
if frags is not None:
sectname = format_sector_name(frags)
if sector_only:
return sectname
# Work out what we should be returning, and do it
if m is not None and sectname is not None:
return format_system_name({
'SectorName': sectname,
'L1': m.group('l1'), 'L2': m.group('l2'), 'L3': m.group('l3'),
'MCode': m.group('mcode'),
'N1': m.group('n1'), 'N2': m.group('n2')})
else:
# This may be none if get_sector_fragments/format_sector_name failed
return sectname
"""
Get a list of fragments from an input sector name
e.g. "Dryau Aowsy" --> ["Dry","au","Ao","wsy"]
Args:
sector_name: The name of the sector
allow_long: Whether to allow sector names longer than the usual maximum fragment count (4)
Returns:
A list of fragments representing the sector name
"""
def get_sector_fragments(sector_name, allow_long = False):
# Convert the string to Title Case, then remove spaces
sector_name = sector_name.title().replace(' ', '')
segments = []
current_str = sector_name
while len(current_str) > 0:
found = False
for frag in pgdata.cx_fragments:
if current_str[0:len(frag)] == frag:
segments.append(frag)
current_str = current_str[len(frag):]
found = True
break
if not found:
break
if len(current_str) == 0 and (allow_long or len(segments) <= _expected_fragment_limit):
return segments
else:
return None
"""
Checks whether or not the provided sector name is a valid PG name
Mild weakness: due to the way get_sector_fragments works, this currently ignores all spaces
This means that names like "Synoo kio" are considered valid
Args:
input: A candidate sector name
Returns:
True if the sector name is valid, False if not
"""
def is_valid_sector_name(input):
frags = get_sector_fragments(input) if util.is_str(input) else input
if frags is None or len(frags) == 0 or frags[0] not in pgdata.cx_prefixes:
return False
if len(frags) == 4 and frags[2] in pgdata.cx_prefixes:
# Class 2
f1idx = pgdata.c2_prefix_suffix_override_map.get(frags[0], 1)
f3idx = pgdata.c2_prefix_suffix_override_map.get(frags[2], 1)
return (frags[1] in pgdata.c2_suffixes[f1idx] and frags[3] in pgdata.c2_suffixes[f3idx])
elif len(frags) in [3,4]:
# Class 1
fli_idx = pgdata.c1_prefix_infix_override_map.get(frags[0], 1)
if frags[1] not in pgdata.c1_infixes[fli_idx]:
return False
if len(frags) == 4:
fli_idx = 2 if fli_idx == 1 else 1
if frags[2] not in pgdata.c1_infixes[fli_idx]:
return False
flastidx = 2 if fli_idx == 1 else 1
return (frags[-1] in pgdata.c1_suffixes[flastidx])
else:
# Class NOPE
return False
"""
Format a given set of fragments into a full name
Args:
input: A list of sector name fragments
Returns:
The sector name as a string
"""
def format_sector_name(input):
frags = get_sector_fragments(input) if util.is_str(input) else input
if frags is None:
return None
if len(frags) == 4 and frags[2] in pgdata.cx_prefixes:
return "{0}{1} {2}{3}".format(*frags)
else:
return "".join(frags)
"""
Get the origin of the boxel (cube) that the given coordinates sit within
Args:
position: A vector or tuple of X/Y/Z coordinates, or a System object
mcode: The system's mass code ('a'-'h') or cube side length
Returns:
A Vector3 representing the origin of the boxel containing this position
"""
def get_boxel_origin(position, mcode):
posinput = util.get_as_position(position)
cube_width = sector.get_mcode_cube_width(mcode)
if posinput is None or cube_width is None:
return None
x = posinput.x - ((posinput.x - sector.internal_origin_offset.x) % cube_width)
y = posinput.y - ((posinput.y - sector.internal_origin_offset.y) % cube_width)
z = posinput.z - ((posinput.z - sector.internal_origin_offset.z) % cube_width)
return vector3.Vector3(x, y, z)
"""
Parse the given PG system name and return the canonical versions of its individual components
Args:
input: A string containing a system name of the form "Sector AB-C d1-23" or "Sector AB-C d1"
ensure_canonical: Whether to ensure that the name is in its canonical form before processing
Returns:
A dictionary containing keys of SectorName, L1, L2, L3, MCode, N1 and N2
"""
def get_system_fragments(input, ensure_canonical = True):
if ensure_canonical:
input = get_canonical_name(input)
if input is None:
return None
m = pgdata.pg_system_regex.match(input)
if m is None:
return None
return {
'SectorName': m.group('sector'), 'L1': m.group('l1'), 'L2': m.group('l2'), 'L3': m.group('l3'),
'MCode': m.group('mcode'), 'N1': int(m.group('n1')) if m.group('n1') is not None else 0, 'N2': int(m.group('n2'))
}
"""
Format the given system data into a full name
Args:
input: A dictionary containing keys of SectorName, L1, L2, L3, MCode, N1 and N2
Returns:
A string containing a system name of the form "Sector AB-C d1-23" or "Sector AB-C d1"
"""
def format_system_name(input):
if input is None:
return None
if not isinstance(input, dict) or not set(('SectorName','L1','L2','L3','MCode','N1','N2')).issubset(input):
raise ValueError("input dict to format_system_name must include keys SectorName, L1, L2, L3, MCode, N1, N2")
if input['N1'] is not None and int(input['N1']) != 0:
sysid = "{}{}-{} {}{}-{}".format(input['L1'].upper(), input['L2'].upper(), input['L3'].upper(), input['MCode'].lower(), input['N1'], input['N2'])
else:
sysid = "{}{}-{} {}{}".format(input['L1'].upper(), input['L2'].upper(), input['L3'].upper(), input['MCode'].lower(), input['N2'])
return "{} {}".format(input['SectorName'], sysid)
"""
Get hand-authored sectors, optionally in distance order around a reference point
Args:
reference: Optional, position or System/Sector-like object. If provided, returned sectors will be ordered by distance from this point
max_distance: Optional, may only be provided with reference. A maximum distance from the reference point, in LY, to limit returned sectors to.
Returns:
An OrderedDict object where keys are the names of the sectors, and values are the sector objects themselves.
"""
def get_ha_sectors(reference = None, max_distance = None):
if reference is not None:
pos_reference = util.get_as_position(reference)
if pos_reference is None:
raise ValueError("if provided, reference must be a position, or a System/Sector-like object")
result = [(s.name, s) for s in pgdata.ha_sectors.values() if (max_distance is None or (pos_reference - s.centre).length < max_distance)]
result.sort(key=lambda s: (pos_reference - s[1].centre).length)
return collections.OrderedDict(result)
else:
if max_distance is not None:
raise ValueError("cannot provide max_distance without a reference position")
return collections.OrderedDict([(s.name, s) for s in pgdata.ha_sectors.values()])
# #
# Internal variables
# #
_srp_divisor1 = len(string.ascii_uppercase)
_srp_divisor2 = _srp_divisor1**2
_srp_divisor3 = _srp_divisor1**3
_srp_rowlength = 128
_srp_sidelength = _srp_rowlength**2
_expected_fragment_limit = 4
# #
# Internal functions: shared/HA
# #
# Get a system's relative position within a sector
# Original version by CMDR Jackie Silver
# Note that in the form "Sector AB-C d3", the "3" is number2, NOT number1 (which is 0)
def _get_relpos_from_sysid(prefix, centre, suffix, mcode, number1, number2):
soffset = _get_soffset_from_sysid(prefix, centre, suffix, number1)
pos, uncertainty = _get_relpos_from_soffset(soffset, mcode)
if any(v < 0 or v > (sector.sector_size + uncertainty*2) for v in (pos.x, pos.y, pos.z)):
log.warning("Identifier '{}{}-{} {}{}-{}' generated out-of-range coords {}; bad input?".format(prefix, centre, suffix, mcode, number1, number2, pos))
return (pos, uncertainty)
def _get_soffset_from_sysid(prefix, centre, suffix, number1):
if number1 is None:
number1 = 0
position = _srp_divisor3 * int(number1)
position += _srp_divisor2 * (ord(suffix.upper()) - ord('A'))
position += _srp_divisor1 * (ord(centre.upper()) - ord('A'))
position += (ord(prefix.upper()) - ord('A'))
return position
def _get_relpos_from_soffset(position, mcode):
row = int(position // _srp_sidelength)
position -= (row * _srp_sidelength)
stack = int(position // _srp_rowlength)
position -= (stack * _srp_rowlength)
column = position
cubeside = sector.get_mcode_cube_width(mcode)
halfwidth = cubeside / 2
approx_x = (column * cubeside) + halfwidth
approx_y = (stack * cubeside) + halfwidth
approx_z = (row * cubeside) + halfwidth
return (vector3.Vector3(approx_x,approx_y,approx_z), halfwidth)
def _get_sysid_from_relpos(pos, mcode, format_output=False):
soffset = _get_soffset_from_relpos(pos, mcode)
return _get_sysid_from_soffset(soffset, mcode, format_output)
def _get_soffset_from_relpos(pos, mcode):
pos = util.get_as_position(pos)
if pos is None:
return None
cubeside = sector.get_mcode_cube_width(mcode)
column = int(pos.x // cubeside)
stack = int(pos.y // cubeside)
row = int(pos.z // cubeside)
position = column + (_srp_rowlength * stack) + (_srp_sidelength * row)
return position
def _get_sysid_from_soffset(position, mcode, format_output=False):
prefixn = int((position) % len(string.ascii_uppercase))
centren = int((position // _srp_divisor1) % len(string.ascii_uppercase))
suffixn = int((position // _srp_divisor2) % len(string.ascii_uppercase))
number1 = int((position // _srp_divisor3))
prefix = string.ascii_uppercase[prefixn]
centre = string.ascii_uppercase[centren]
suffix = string.ascii_uppercase[suffixn]
if format_output:
output = '{}{}-{} {}'.format(prefix, centre, suffix, sector.get_mcode(mcode))
if number1 != 0:
output += '{}-'.format(number1)
return output
else:
return [prefix, centre, suffix, sector.get_mcode(mcode), number1]
# Get the class of the sector from its name
# e.g. Froawns = 1, Froadue = 1, Eos Aowsy = 2
def _get_sector_class(sect):
if util.is_str(sect) and sect.lower() in pgdata.ha_sectors:
return "ha"
frags = get_sector_fragments(sect) if util.is_str(sect) else sect
if frags is not None and len(frags) == 4 and frags[0] in pgdata.cx_prefixes and frags[2] in pgdata.cx_prefixes:
return 2
elif frags is not None and len(frags) in [3,4] and frags[0] in pgdata.cx_prefixes:
return 1
else:
return None
# Get the full list of suffixes for a given set of fragments missing a suffix
# e.g. "Dryau Ao", "Ogair", "Wreg"
def _get_suffixes(input, get_all = False):
frags = get_sector_fragments(input) if util.is_str(input) else input
if frags is None:
return None
wordstart = frags[0]
if frags[-1] in pgdata.cx_prefixes:
# Append suffix straight onto a prefix (probably C2)
suffix_map_idx = pgdata.c2_prefix_suffix_override_map.get(frags[-1], 1)
result = pgdata.c2_suffixes[suffix_map_idx]
wordstart = frags[-1]
else:
# Likely C1
if frags[-1] in pgdata.c1_infixes[2]:
# Last infix is consonant-ish, return the vowel-ish suffix list
result = pgdata.c1_suffixes[1]
else:
result = pgdata.c1_suffixes[2]
if get_all:
return result
else:
return result[0 : _get_prefix_run_length(wordstart)]
# Get the specified prefix's run length (e.g. Th => 35, Tz => 1)
def _get_prefix_run_length(frag):
return pgdata.cx_prefix_length_overrides.get(frag, pgdata.cx_prefix_length_default)
def _get_entry_from_offset(offset, keys, data):
return [c for c in keys if offset >= data[c][0] and offset < (data[c][0] + data[c][1])][0]
# Get the sector offset of a position
def _get_offset_from_pos(pos, galsize):
sect = get_sector(pos, allow_ha=False, get_name=False) if not isinstance(pos, sector.PGSector) else pos
offset = sect.index[2] * galsize[1] * galsize[0]
offset += sect.index[1] * galsize[0]
offset += sect.index[0]
return offset
def _get_sector_pos_from_offset(offset, galsize):
x = (offset % galsize[0])
y = (offset // galsize[0]) % galsize[1]
z = (offset // (galsize[0] * galsize[1]))
if z >= galsize[2]:
log.warning("Sector position for offset {} is outside expected galaxy size!".format(offset))
# Put it in "our" coordinate space
x -= sector.base_sector_index[0]
y -= sector.base_sector_index[1]
z -= sector.base_sector_index[2]
return [x, y, z]
# Determines whether a given sector should be C1 or C2
def _get_c1_or_c2(key):
# Use Jenkins hash
key = util.jenkins32(key)
# Key is now an even/odd number, depending on which scheme we use
# Return 1 for a class 1 sector, 2 for a class 2
return (key % 2) + 1
def _get_sector_from_name(sector_name, allow_ha = True):
sector_name = get_canonical_name(sector_name, sector_only=True)
if sector_name is None:
return None
if allow_ha and util.is_str(sector_name) and sector_name.lower() in pgdata.ha_sectors:
return pgdata.ha_sectors[sector_name.lower()]
else:
frags = get_sector_fragments(sector_name) if util.is_str(sector_name) else sector_name
if frags is not None:
sc = _get_sector_class(frags)
if sc == 2:
# Class 2
return _c2_get_sector(frags)
elif sc == 1:
# Class 1
return _c1_get_sector(frags)
else:
return None
else:
return None
def _get_coords_from_name(raw_system_name, allow_ha = True):
system_name = get_canonical_name(raw_system_name)
if system_name is None:
return (None, None)
# Reparse it now it's (hopefully) right
m = get_system_fragments(system_name)
if m is None:
return (None, None)
sector_name = m['SectorName']
sect = _get_sector_from_name(sector_name, allow_ha)
if sect is None:
return (None, None)
# Get the absolute position of the sector
abs_pos = sect.get_origin(sector.get_mcode_cube_width(m['MCode']))
# Get the relative position of the star within the sector
# Also get the +/- error bounds
rel_pos, rel_pos_error = _get_relpos_from_sysid(m['L1'], m['L2'], m['L3'], m['MCode'], m['N1'], m['N2'])
# Check if the relpos is invalid
leeway = rel_pos_error if (sect.sector_class == 'ha') else 0
if any([s > (sector.sector_size + leeway) for s in rel_pos]):
log.warning("RelPos for input {} was invalid: {}, uncertainty {}".format(system_name, rel_pos, rel_pos_error))
return (None, None)
if abs_pos is not None and rel_pos is not None:
return (abs_pos + rel_pos, rel_pos_error)
else:
return (None, None)
def _get_system_from_pos(input, mcode, allow_ha = True):
input = util.get_as_position(input)
if input is None:
return None
psect = get_sector(input, allow_ha=allow_ha)
# Get cube width for this mcode, and the sector origin
cwidth = sector.get_mcode_cube_width(mcode)
psorig = psect.get_origin(cwidth)
# Get the relative inputition within this sector and the system identifier
relpos = vector3.Vector3(input.x - psorig.x, input.y - psorig.y, input.z - psorig.z)
sysid = _get_sysid_from_relpos(relpos, mcode, format_output=True)
return system.PGSystemPrototype(input.x, input.y, input.z, "{} {}".format(psect.name, sysid), sector=psect, uncertainty=0)
def _get_system_from_name(input, allow_ha = True):
m = get_system_fragments(input)
if m is not None:
sect = get_sector(m['SectorName'])
rel_pos, uncertainty = _get_relpos_from_sysid(m['L1'], m['L2'], m['L3'], m['MCode'], m['N1'], m['N2'])
if sect is not None and rel_pos is not None and uncertainty is not None:
cube_width = sector.get_mcode_cube_width(m['MCode'])
coords = sect.get_origin(cube_width) + rel_pos
if allow_ha:
return system.PGSystem(coords.x, coords.y, coords.z, uncertainty=uncertainty, name=get_canonical_name(input), sector=sect)
else:
pg_sect = get_sector(coords, allow_ha=False)
# Now subtract the coords from ye olde origin to get the real PG relpos
sysid = _get_sysid_from_relpos(coords - pg_sect.get_origin(cube_width), m['MCode'], format_output=True)
return system.PGSystem(coords.x, coords.y, coords.z, uncertainty=uncertainty, name="{} {}{}".format(pg_sect.name, sysid, m['N2']), sector=pg_sect)
else:
return None
else:
return None
# Get which HA sector this position would be part of, if any
def _ha_get_name(pos):
for (sname, s) in pgdata.ha_sectors.items():
if s.contains(pos):
return s.name
return None
# #
# Internal functions: c1-specific
# #
# Get the full list of infixes for a given set of fragments missing an infix
# e.g. "Ogai", "Wre", "P"
def _c1_get_infixes(input):
frags = get_sector_fragments(input) if util.is_str(input) else input
if frags is None:
return None
if frags[-1] in pgdata.cx_prefixes:
if frags[-1] in pgdata.c1_prefix_infix_override_map:
return pgdata.c1_infixes[pgdata.c1_prefix_infix_override_map[frags[-1]]]
else:
return pgdata.c1_infixes[1]
elif frags[-1] in pgdata.c1_infixes[1]:
return pgdata.c1_infixes[2]
elif frags[-1] in pgdata.c1_infixes[2]:
return pgdata.c1_infixes[1]
else:
return None
# Get the specified infix's run length
def _c1_get_infix_run_length(frag):
if frag in pgdata.c1_infixes_s1:
def_len = pgdata.c1_infix_s1_length_default
else:
def_len = pgdata.c1_infix_s2_length_default
return pgdata.c1_infix_length_overrides.get(frag, def_len)
# Get the total run length for the series of infixes the input is part of
def _c1_get_infix_total_run_length(frag):
if frag in pgdata.c1_infixes_s1:
return pgdata.c1_infix_s1_total_run_length
else:
return pgdata.c1_infix_s2_total_run_length
# Get the zero-based offset (counting from bottom-left of the galaxy) of the input sector name/position
def _c1_get_offset(input):
pos_input = util.get_as_position(input)
if pos_input is not None:
return _get_offset_from_pos(pos_input, sector.galaxy_size)
else:
return _c1_get_offset_from_name(input)
def _c1_get_offset_from_name(input):
frags = get_sector_fragments(input) if util.is_str(input) else input
if frags is None:
return None
sufs = _get_suffixes(frags[0:-1], True)
suf_len = len(sufs)
# Add the total length of all the infixes we've already passed over
if len(frags) > 3:
# We have a 4-phoneme name, which means we have to handle adjusting our "coordinates"
# from individual suffix runs up to fragment3 runs and then to fragment2 runs
# STEP 1: Acquire the offset for suffix runs, and adjust it
suf_offset = sufs.index(frags[-1])
# Check which fragment3 run we're on, and jump us up by that many total run lengths if not the first
suf_offset += (sufs.index(frags[-1]) // _c1_get_infix_run_length(frags[2])) * _c1_get_infix_total_run_length(frags[2])
# STEP 2: Take our current offset from "suffix space" to "fragment3 space"
# Divide by the current fragment3's run length
# Remember the offset that we're at on the current suffix-run
f3_offset, f3_offset_mod = divmod(suf_offset, _c1_get_infix_run_length(frags[2]))
# Multiply by the total run length for this series of fragment3s
f3_offset *= _c1_get_infix_total_run_length(frags[2])
# Reapply the f3 offset from earlier
f3_offset += f3_offset_mod
# Add the offset of the current fragment3, to give us our overall position in the f3-sequence
f3_offset += _c1_infix_offsets[frags[2]][0]
# STEP 3: Take our current offset from "fragment3 space" to "fragment2 space"
# Divide by the current fragment2's run length
# Remember the offset that we're at on the current f3-run
f2_offset, f2_offset_mod = divmod(f3_offset, _c1_get_infix_run_length(frags[1]))
# Multiply by the total run length for this series of fragment2s
f2_offset *= _c1_get_infix_total_run_length(frags[1])
# Reapply the f2 offset from earlier
f2_offset += f2_offset_mod
# Add the offset of the current fragment2, to give us our overall position in the f2-sequence
f2_offset += _c1_infix_offsets[frags[1]][0]
# Set this as the global offset to be manipulated by the prefix step
offset = f2_offset
else:
# We have a 3-phoneme name, which means we just have to adjust our coordinates
# from "suffix space" to "fragment2 space" (since there is no fragment3)
# STEP 1: Acquire the offset for suffix runs, and adjust it
suf_offset = sufs.index(frags[-1])
# STEP 2: Take our current offset from "suffix space" to "fragment2 space"
# Divide by the current fragment2's run length
# Remember the offset we're at on the current suffix-run
f2_offset, f2_offset_mod = divmod(suf_offset, _c1_get_infix_run_length(frags[1]))
# Multiply by the total run length for this series of fragment2s
f2_offset *= _c1_get_infix_total_run_length(frags[1])
# Reapply the f2 offset from earlier
f2_offset += f2_offset_mod
# Add the offset of the current fragment2, to give us our overall position in the f2-sequence
f2_offset += _c1_infix_offsets[frags[1]][0]
# Set this as the global offset to be manipulated by the prefix step
offset = f2_offset
# Divide by the current prefix's run length, this is now how many iterations of the full 3037 we should have passed over
# Also remember the current offset's position within a prefix run
offset, offset_mod = divmod(offset, _get_prefix_run_length(frags[0]))
# Now multiply by the total run length (3037) to get the actual offset of this run
offset *= pgdata.cx_prefix_total_run_length
# Add the infixes/suffix's position within this prefix's part of the overall prefix run
offset += offset_mod
# Add the base position of this prefix within the run
offset += _prefix_offsets[frags[0]][0]
# Whew!
return offset
# Get the sector position of the given input class 1 sector name
def _c1_get_sector(input):
frags = get_sector_fragments(input) if util.is_str(input) else input
if frags is None:
return None
offset = _c1_get_offset(frags)
if offset is None:
return None
# Calculate the X/Y/Z positions from the offset
spos = _get_sector_pos_from_offset(offset, sector.galaxy_size)
name = format_sector_name(frags)
return sector.PGSector(spos[0], spos[1], spos[2], name, _get_sector_class(frags))
def _c1_get_name(pos):
if pos is None:
return None
offset = _c1_get_offset(pos)
# Get the current prefix run we're on, and keep the remaining offset
prefix_cnt, cur_offset = divmod(offset, pgdata.cx_prefix_total_run_length)
# Work out which prefix we're currently within
prefix = _get_entry_from_offset(cur_offset, _prefix_offsets, _prefix_offsets)
# Put us in that prefix's space
cur_offset -= _prefix_offsets[prefix][0]
# Work out which set of infix1s we should be using, and its total length
infix1s = _c1_get_infixes([prefix])
infix1_total_len = _c1_get_infix_total_run_length(infix1s[0])
# Work out where we are in infix1 space, keep the remaining offset
infix1_cnt, cur_offset = divmod(prefix_cnt * _get_prefix_run_length(prefix) + cur_offset, infix1_total_len)
# Find which infix1 we're currently in
infix1 = _get_entry_from_offset(cur_offset, infix1s, _c1_infix_offsets)
# Put us in that infix1's space
cur_offset -= _c1_infix_offsets[infix1][0]
# Work out which set of suffixes we're using
infix1_run_len = _c1_get_infix_run_length(infix1)
sufs = _get_suffixes([prefix, infix1], True)
# Get the index of the next entry in that list, in infix1 space
next_idx = (infix1_run_len * infix1_cnt) + cur_offset
# Start creating our output
frags = [prefix, infix1]
# If the index of the next entry is longer than the list of suffixes...
# This means we've gone over all the 3-phoneme names and started the 4-phoneme ones
# So, we need to calculate our extra phoneme (infix2) before adding a suffix
if next_idx >= len(sufs):
# Work out which set of infix2s we should be using
infix2s = _c1_get_infixes(frags)
infix2_total_len = _c1_get_infix_total_run_length(infix2s[0])
# Work out where we are in infix2 space, still keep the remaining offset
infix2_cnt, cur_offset = divmod(infix1_cnt * _c1_get_infix_run_length(infix1) + cur_offset, infix2_total_len)
# Find which infix2 we're currently in
infix2 = _get_entry_from_offset(cur_offset, infix2s, _c1_infix_offsets)
# Put us in this infix2's space
cur_offset -= _c1_infix_offsets[infix2][0]
# Recalculate the next system index based on the infix2 data
infix2_run_len = _c1_get_infix_run_length(infix2)
sufs = _get_suffixes([prefix, infix1, infix2], True)
next_idx = (infix2_run_len * infix2_cnt) + cur_offset
# Add our infix2 to the output
frags.append(infix2)
# Add our suffix to the output, and return it
frags.append(sufs[next_idx])
return frags
# #
# Internal functions: c2-specific
# #
# Get the name of a class 2 sector based on its position
def _c2_get_name(pos):
offset = _get_offset_from_pos(pos, sector.galaxy_size)
return _c2_get_name_from_offset(offset)
# Get the sector position of the given input class 2 sector name
def _c2_get_sector(input):
frags = get_sector_fragments(input) if util.is_str(input) else input
if frags is None:
return None
offset = _c2_get_offset_from_name(frags)
if offset is None:
return None
# Calculate the X/Y/Z positions from the offset
spos = _get_sector_pos_from_offset(offset, sector.galaxy_size)
name = format_sector_name(frags)
return sector.PGSector(spos[0], spos[1], spos[2], name, _get_sector_class(frags))
def _c2_get_name_from_offset(offset, format_output=False):
# Deinterleave the two offsets from the single big one
cur_idx0, cur_idx1 = util.deinterleave(offset, 32) # No idea what length this actually is
# Get prefixes/suffixes from the individual offsets
p0 = _get_entry_from_offset(cur_idx0, _prefix_offsets, _prefix_offsets)
p1 = _get_entry_from_offset(cur_idx1, _prefix_offsets, _prefix_offsets)
s0 = _get_suffixes(p0)[cur_idx0 - _prefix_offsets[p0][0]]
s1 = _get_suffixes(p1)[cur_idx1 - _prefix_offsets[p1][0]]
# Done!
output = [p0, s0, p1, s1]
if format_output:
output = format_sector_name(output)
return output
def _c2_get_offset_from_name(input):
frags = get_sector_fragments(input) if util.is_str(input) else input
if frags is None:
return
try:
# Get the current indexes within prefix runs (3037)
cur_idx0 = _prefix_offsets[frags[0]][0] + _get_suffixes(frags[0]).index(frags[1])
cur_idx1 = _prefix_offsets[frags[2]][0] + _get_suffixes(frags[2]).index(frags[3])
except:
# Either the prefix or suffix lookup failed, likely a dodgy name
log.warning("Failed to look up prefixes/suffixes in _c2_get_offset_from_name; bad sector name?")
return None
# Interleave the individual offsets into one big offset
return util.interleave(cur_idx0, cur_idx1, 32) # Again, length is anyone's guess
# #
# Setup functions
# #
# Cache the run offsets of all prefixes and C1 infixes
_prefix_offsets = {}
_c1_infix_offsets = {}
def _construct_offsets():
global _prefix_offsets, _c1_infix_offsets
cnt = 0
for p in pgdata.cx_prefixes:
plen = _get_prefix_run_length(p)
_prefix_offsets[p] = (cnt, plen)
cnt += plen
cnt = 0
for i in pgdata.c1_infixes_s1:
ilen = _c1_get_infix_run_length(i)
_c1_infix_offsets[i] = (cnt, ilen)
cnt += ilen
cnt = 0
for i in pgdata.c1_infixes_s2:
ilen = _c1_get_infix_run_length(i)
_c1_infix_offsets[i] = (cnt, ilen)
cnt += ilen
# #
# Initialisation
# #
_init_start = time.clock()
_construct_offsets()
_init_time = time.clock() - _init_start
| {
"content_hash": "30523b2af39a50d72d06f2068d63b7c1",
"timestamp": "",
"source": "github",
"line_count": 899,
"max_line_length": 154,
"avg_line_length": 35.383759733036705,
"alnum_prop": 0.6834014460861364,
"repo_name": "KayJohnston/jackies-map",
"id": "64f7b7783415247bd359ed9202743ed9b4447976",
"size": "31810",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pgnames.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2613268"
}
],
"symlink_target": ""
} |
import django.contrib.auth.models
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import modeltranslation.tests.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AbstractConflictModelB',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('title_de', models.IntegerField()),
('title', models.CharField(max_length=255, verbose_name='title')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='AbstractModelB',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('titlea', models.CharField(max_length=255, verbose_name='title a')),
('titlea_de', models.CharField(max_length=255, null=True, verbose_name='title a')),
('titlea_en', models.CharField(max_length=255, null=True, verbose_name='title a')),
('titleb', models.CharField(max_length=255, verbose_name='title b')),
('titleb_de', models.CharField(max_length=255, null=True, verbose_name='title b')),
('titleb_en', models.CharField(max_length=255, null=True, verbose_name='title b')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ConflictModel',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('title', models.CharField(max_length=255, verbose_name='title')),
('title_de', models.IntegerField()),
],
),
migrations.CreateModel(
name='CustomManager2TestModel',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('title', models.CharField(max_length=255, verbose_name='title')),
('title_de', models.CharField(max_length=255, null=True, verbose_name='title')),
('title_en', models.CharField(max_length=255, null=True, verbose_name='title')),
],
),
migrations.CreateModel(
name='CustomManagerChildTestModel',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('needs_translation', models.BooleanField(default=False)),
('title', models.CharField(max_length=255, verbose_name='title')),
('title_de', models.CharField(max_length=255, null=True, verbose_name='title')),
('title_en', models.CharField(max_length=255, null=True, verbose_name='title')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CustomManagerTestModel',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('title', models.CharField(max_length=255, verbose_name='title')),
('title_de', models.CharField(max_length=255, null=True, verbose_name='title')),
('title_en', models.CharField(max_length=255, null=True, verbose_name='title')),
('description', models.CharField(db_column='xyz', max_length=255, null=True)),
],
),
migrations.CreateModel(
name='DataModel',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('data', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='DescriptorModel',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('normal', modeltranslation.tests.models.FancyField(default='')),
('trans', modeltranslation.tests.models.FancyField(default='')),
('trans_de', modeltranslation.tests.models.FancyField(default='', null=True)),
('trans_en', modeltranslation.tests.models.FancyField(default='', null=True)),
],
),
migrations.CreateModel(
name='FallbackModel',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('title', models.CharField(max_length=255, verbose_name='title')),
('title_de', models.CharField(max_length=255, null=True, verbose_name='title')),
('title_en', models.CharField(max_length=255, null=True, verbose_name='title')),
('text', models.TextField(blank=True, null=True)),
('text_de', models.TextField(blank=True, null=True)),
('text_en', models.TextField(blank=True, null=True)),
('url', models.URLField(blank=True, null=True)),
('url_de', models.URLField(blank=True, null=True)),
('url_en', models.URLField(blank=True, null=True)),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('email_de', models.EmailField(blank=True, max_length=254, null=True)),
('email_en', models.EmailField(blank=True, max_length=254, null=True)),
('description', models.CharField(max_length=255, null=True)),
('description_de', models.CharField(max_length=255, null=True)),
('description_en', models.CharField(max_length=255, null=True)),
],
),
migrations.CreateModel(
name='FallbackModel2',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('title', models.CharField(max_length=255, verbose_name='title')),
('title_de', models.CharField(max_length=255, null=True, verbose_name='title')),
('title_en', models.CharField(max_length=255, null=True, verbose_name='title')),
('text', models.TextField(blank=True, null=True)),
('text_de', models.TextField(blank=True, null=True)),
('text_en', models.TextField(blank=True, null=True)),
('url', models.URLField(blank=True, null=True)),
('url_de', models.URLField(blank=True, null=True)),
('url_en', models.URLField(blank=True, null=True)),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('email_de', models.EmailField(blank=True, max_length=254, null=True)),
('email_en', models.EmailField(blank=True, max_length=254, null=True)),
],
),
migrations.CreateModel(
name='FileFieldsModel',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('title', models.CharField(max_length=255, verbose_name='title')),
('title_de', models.CharField(max_length=255, null=True, verbose_name='title')),
('title_en', models.CharField(max_length=255, null=True, verbose_name='title')),
(
'file',
models.FileField(blank=True, null=True, upload_to='modeltranslation_tests'),
),
(
'file_de',
models.FileField(blank=True, null=True, upload_to='modeltranslation_tests'),
),
(
'file_en',
models.FileField(blank=True, null=True, upload_to='modeltranslation_tests'),
),
('file2', models.FileField(upload_to='modeltranslation_tests')),
('file2_de', models.FileField(null=True, upload_to='modeltranslation_tests')),
('file2_en', models.FileField(null=True, upload_to='modeltranslation_tests')),
(
'image',
models.ImageField(blank=True, null=True, upload_to='modeltranslation_tests'),
),
(
'image_de',
models.ImageField(blank=True, null=True, upload_to='modeltranslation_tests'),
),
(
'image_en',
models.ImageField(blank=True, null=True, upload_to='modeltranslation_tests'),
),
],
),
migrations.CreateModel(
name='FilteredTestModel',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('title', models.CharField(max_length=255, verbose_name='title')),
('title_de', models.CharField(max_length=255, null=True, verbose_name='title')),
('title_en', models.CharField(max_length=255, null=True, verbose_name='title')),
],
),
migrations.CreateModel(
name='GroupFieldsetsModel',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('title', models.CharField(max_length=255)),
('title_de', models.CharField(max_length=255, null=True)),
('title_en', models.CharField(max_length=255, null=True)),
('text', models.TextField(blank=True, null=True)),
('text_de', models.TextField(blank=True, null=True)),
('text_en', models.TextField(blank=True, null=True)),
('email', models.EmailField(blank=True, max_length=254, null=True)),
],
),
migrations.CreateModel(
name='InheritedPermission',
fields=[
(
'permission_ptr',
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to='auth.permission',
),
),
('translated_var', models.CharField(max_length=255)),
('translated_var_de', models.CharField(max_length=255, null=True)),
('translated_var_en', models.CharField(max_length=255, null=True)),
],
bases=('auth.permission',),
managers=[
('objects', django.contrib.auth.models.PermissionManager()),
],
),
migrations.CreateModel(
name='ManagerTestModel',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('title', models.CharField(max_length=255, verbose_name='title')),
('title_de', models.CharField(max_length=255, null=True, verbose_name='title')),
('title_en', models.CharField(max_length=255, null=True, verbose_name='title')),
('visits', models.IntegerField(default=0, verbose_name='visits')),
('visits_de', models.IntegerField(default=0, null=True, verbose_name='visits')),
('visits_en', models.IntegerField(default=0, null=True, verbose_name='visits')),
('description', models.CharField(max_length=255, null=True)),
('description_de', models.CharField(max_length=255, null=True)),
('description_en', models.CharField(max_length=255, null=True)),
],
options={
'ordering': ('-visits',),
},
),
migrations.CreateModel(
name='ModelX',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('name', models.CharField(max_length=255)),
('name_de', models.CharField(max_length=255, null=True)),
('name_en', models.CharField(max_length=255, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ModelXY',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
(
'model_x',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to='tests.modelx'
),
),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='MultitableConflictModelA',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('title_de', models.IntegerField()),
],
),
migrations.CreateModel(
name='MultitableModelA',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('titlea', models.CharField(max_length=255, verbose_name='title a')),
('titlea_de', models.CharField(max_length=255, null=True, verbose_name='title a')),
('titlea_en', models.CharField(max_length=255, null=True, verbose_name='title a')),
],
),
migrations.CreateModel(
name='NameModel',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('firstname', models.CharField(max_length=50)),
('firstname_de', models.CharField(max_length=50, null=True)),
('firstname_en', models.CharField(max_length=50, null=True)),
('lastname', models.CharField(max_length=50)),
('lastname_de', models.CharField(max_length=50, null=True)),
('lastname_en', models.CharField(max_length=50, null=True)),
('age', models.CharField(max_length=50)),
('slug', models.SlugField(max_length=100)),
('slug2', models.SlugField(max_length=100)),
('slug2_de', models.SlugField(max_length=100, null=True)),
('slug2_en', models.SlugField(max_length=100, null=True)),
],
),
migrations.CreateModel(
name='NonTranslated',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('title', models.CharField(max_length=255, verbose_name='title')),
],
),
migrations.CreateModel(
name='OtherFieldsModel',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
(
'int',
models.PositiveIntegerField(
default=42, validators=[django.core.validators.MinValueValidator(0)]
),
),
(
'int_de',
models.PositiveIntegerField(
default=42,
null=True,
validators=[django.core.validators.MinValueValidator(0)],
),
),
(
'int_en',
models.PositiveIntegerField(
default=42,
null=True,
validators=[django.core.validators.MinValueValidator(0)],
),
),
('boolean', models.BooleanField(default=False)),
('boolean_de', models.BooleanField(default=False)),
('boolean_en', models.BooleanField(default=False)),
('float', models.FloatField(blank=True, null=True)),
('float_de', models.FloatField(blank=True, null=True)),
('float_en', models.FloatField(blank=True, null=True)),
(
'decimal',
models.DecimalField(blank=True, decimal_places=2, max_digits=5, null=True),
),
(
'decimal_de',
models.DecimalField(blank=True, decimal_places=2, max_digits=5, null=True),
),
(
'decimal_en',
models.DecimalField(blank=True, decimal_places=2, max_digits=5, null=True),
),
('date', models.DateField(blank=True, null=True)),
('date_de', models.DateField(blank=True, null=True)),
('date_en', models.DateField(blank=True, null=True)),
('datetime', models.DateTimeField(blank=True, null=True)),
('datetime_de', models.DateTimeField(blank=True, null=True)),
('datetime_en', models.DateTimeField(blank=True, null=True)),
('time', models.TimeField(blank=True, null=True)),
('time_de', models.TimeField(blank=True, null=True)),
('time_en', models.TimeField(blank=True, null=True)),
('genericip', models.GenericIPAddressField(blank=True, null=True)),
('genericip_de', models.GenericIPAddressField(blank=True, null=True)),
('genericip_en', models.GenericIPAddressField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Page',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('slug', models.CharField(max_length=255)),
('slug_de', models.CharField(max_length=255, null=True)),
('slug_en', models.CharField(max_length=255, null=True)),
('keywords', models.CharField(max_length=255)),
('keywords_de', models.CharField(max_length=255, null=True)),
('keywords_en', models.CharField(max_length=255, null=True)),
('title', models.CharField(max_length=255)),
('title_de', models.CharField(max_length=255, null=True)),
('title_en', models.CharField(max_length=255, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='PlainChildTestModel',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('needs_translation', models.BooleanField(default=False)),
('title', models.CharField(max_length=255, verbose_name='title')),
('title_de', models.CharField(max_length=255, null=True, verbose_name='title')),
('title_en', models.CharField(max_length=255, null=True, verbose_name='title')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='RequiredModel',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('non_req', models.CharField(blank=True, max_length=10)),
('non_req_de', models.CharField(blank=True, max_length=10, null=True)),
('non_req_en', models.CharField(blank=True, max_length=10, null=True)),
('req', models.CharField(max_length=10)),
('req_de', models.CharField(max_length=10, null=True)),
('req_en', models.CharField(max_length=10, null=True)),
('req_reg', models.CharField(max_length=10)),
('req_reg_de', models.CharField(max_length=10, null=True)),
('req_reg_en', models.CharField(max_length=10, null=True)),
('req_en_reg', models.CharField(max_length=10)),
('req_en_reg_de', models.CharField(max_length=10, null=True)),
('req_en_reg_en', models.CharField(max_length=10, null=True)),
],
),
migrations.CreateModel(
name='TestModel',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('title', models.CharField(max_length=255, verbose_name='title')),
('title_de', models.CharField(max_length=255, null=True, verbose_name='title')),
('title_en', models.CharField(max_length=255, null=True, verbose_name='title')),
('text', models.TextField(blank=True, null=True)),
('text_de', models.TextField(blank=True, null=True)),
('text_en', models.TextField(blank=True, null=True)),
('url', models.URLField(blank=True, null=True)),
('url_de', models.URLField(blank=True, null=True)),
('url_en', models.URLField(blank=True, null=True)),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('email_de', models.EmailField(blank=True, max_length=254, null=True)),
('email_en', models.EmailField(blank=True, max_length=254, null=True)),
],
),
migrations.CreateModel(
name='ThirdPartyModel',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('name', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='ThirdPartyRegisteredModel',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('name', models.CharField(max_length=20)),
('name_de', models.CharField(max_length=20, null=True)),
('name_en', models.CharField(max_length=20, null=True)),
],
),
migrations.CreateModel(
name='UniqueNullableModel',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('title', models.CharField(max_length=255, null=True, unique=True)),
('title_de', models.CharField(max_length=255, null=True, unique=True)),
('title_en', models.CharField(max_length=255, null=True, unique=True)),
],
),
migrations.CreateModel(
name='MultitableConflictModelB',
fields=[
(
'multitableconflictmodela_ptr',
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to='tests.multitableconflictmodela',
),
),
('title', models.CharField(max_length=255, verbose_name='title')),
],
bases=('tests.multitableconflictmodela',),
),
migrations.CreateModel(
name='MultitableModelB',
fields=[
(
'multitablemodela_ptr',
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to='tests.multitablemodela',
),
),
('titleb', models.CharField(max_length=255, verbose_name='title b')),
('titleb_de', models.CharField(max_length=255, null=True, verbose_name='title b')),
('titleb_en', models.CharField(max_length=255, null=True, verbose_name='title b')),
],
bases=('tests.multitablemodela',),
),
migrations.CreateModel(
name='RichTextPage',
fields=[
(
'page_ptr',
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to='tests.page',
),
),
('content', models.CharField(max_length=255)),
('content_de', models.CharField(max_length=255, null=True)),
('content_en', models.CharField(max_length=255, null=True)),
],
options={
'abstract': False,
},
bases=('tests.page', models.Model),
),
migrations.CreateModel(
name='OneToOneFieldModel',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('title', models.CharField(max_length=255, verbose_name='title')),
('title_de', models.CharField(max_length=255, null=True, verbose_name='title')),
('title_en', models.CharField(max_length=255, null=True, verbose_name='title')),
(
'non',
models.OneToOneField(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name='test_o2o',
to='tests.nontranslated',
),
),
(
'non_de',
models.OneToOneField(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name='test_o2o',
to='tests.nontranslated',
),
),
(
'non_en',
models.OneToOneField(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name='test_o2o',
to='tests.nontranslated',
),
),
(
'optional',
models.OneToOneField(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to='tests.testmodel',
),
),
(
'optional_de',
models.OneToOneField(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to='tests.testmodel',
),
),
(
'optional_en',
models.OneToOneField(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to='tests.testmodel',
),
),
(
'test',
models.OneToOneField(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name='test_o2o',
to='tests.testmodel',
),
),
(
'test_de',
models.OneToOneField(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name='test_o2o',
to='tests.testmodel',
),
),
(
'test_en',
models.OneToOneField(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name='test_o2o',
to='tests.testmodel',
),
),
],
),
migrations.CreateModel(
name='ModelY',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('title', models.CharField(max_length=255)),
('title_de', models.CharField(max_length=255, null=True)),
('title_en', models.CharField(max_length=255, null=True)),
('xs', models.ManyToManyField(through='tests.ModelXY', to='tests.ModelX')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='modelxy',
name='model_y',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tests.modely'),
),
migrations.CreateModel(
name='ForeignKeyModel',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('title', models.CharField(max_length=255, verbose_name='title')),
('title_de', models.CharField(max_length=255, null=True, verbose_name='title')),
('title_en', models.CharField(max_length=255, null=True, verbose_name='title')),
(
'hidden',
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name='+',
to='tests.testmodel',
),
),
(
'hidden_de',
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name='+',
to='tests.testmodel',
),
),
(
'hidden_en',
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name='+',
to='tests.testmodel',
),
),
(
'non',
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name='test_fks',
to='tests.nontranslated',
),
),
(
'non_de',
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name='test_fks',
to='tests.nontranslated',
),
),
(
'non_en',
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name='test_fks',
to='tests.nontranslated',
),
),
(
'optional',
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to='tests.testmodel',
),
),
(
'optional_de',
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to='tests.testmodel',
),
),
(
'optional_en',
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to='tests.testmodel',
),
),
(
'test',
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name='test_fks',
to='tests.testmodel',
),
),
(
'test_de',
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name='test_fks',
to='tests.testmodel',
),
),
(
'test_en',
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name='test_fks',
to='tests.testmodel',
),
),
(
'untrans',
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name='test_fks_un',
to='tests.testmodel',
),
),
],
),
migrations.CreateModel(
name='ForeignKeyFilteredModel',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('title', models.CharField(max_length=255, verbose_name='title')),
('title_de', models.CharField(max_length=255, null=True, verbose_name='title')),
('title_en', models.CharField(max_length=255, null=True, verbose_name='title')),
(
'test',
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name='test_fks',
to='tests.filteredtestmodel',
),
),
],
),
migrations.CreateModel(
name='ProxyTestModel',
fields=[],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('tests.testmodel',),
),
migrations.CreateModel(
name='MultitableModelC',
fields=[
(
'multitablemodelb_ptr',
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to='tests.multitablemodelb',
),
),
('titlec', models.CharField(max_length=255, verbose_name='title c')),
('titlec_de', models.CharField(max_length=255, null=True, verbose_name='title c')),
('titlec_en', models.CharField(max_length=255, null=True, verbose_name='title c')),
],
bases=('tests.multitablemodelb',),
),
migrations.CreateModel(
name='MultitableModelD',
fields=[
(
'multitablemodelb_ptr',
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to='tests.multitablemodelb',
),
),
('titled', models.CharField(max_length=255, verbose_name='title d')),
],
bases=('tests.multitablemodelb',),
),
]
| {
"content_hash": "b4ca3abb0f1b94417596b2a0fc7d3e96",
"timestamp": "",
"source": "github",
"line_count": 986,
"max_line_length": 100,
"avg_line_length": 41.869168356997974,
"alnum_prop": 0.43155778407576967,
"repo_name": "deschler/django-modeltranslation",
"id": "98171623fd23836f79e5594fc094322a7baf2d8e",
"size": "41333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modeltranslation/tests/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4250"
},
{
"name": "JavaScript",
"bytes": "19541"
},
{
"name": "Makefile",
"bytes": "184"
},
{
"name": "Python",
"bytes": "309919"
}
],
"symlink_target": ""
} |
"""
Module to define matching template for checklist spreadsheet
"""
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2011-2013, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
from gridmatch import (
GridMatchReport, GridMatchError, GridMatch,
text, anyval, regexval, refval, intval, save, value, error, trace
)
checklist_start = value("matchtemplate", "checklist")
prefix = text("") + regexval(r"\w+", "prefix") + refval("uri")
prefixes = (text("Prefixes:").skipdownto()
// text("Prefixes:")
// prefix.repeatdown("prefixes", min=1, dkey="prefix", dval="uri")
)
checklist = text("") + regexval(r".+", "target_urit") + anyval("purpose") + refval("model")
checklists = (text("Checklists:").skipdownto()
// text("Checklists:")
// checklist.repeatdown("checklists", min=1)
)
itemlevel = save("level") + (text("MUST") | text("SHOULD") | text("MAY"))
checkitem = anyval("seq") + itemlevel + refval("reqid")
model = ( text("Model:").skipdownto()
// (text("Model:") + refval("modelid"))
// text("Items:")
// checkitem.repeatdown("items")
)
models = model.repeatdown("models", min=1)
matchforeach = ( (text("ForEach:") + regexval(".+", "foreach"))
// (text("ResultMod:") + regexval(".+", "result_mod")).optional()
// (text("Exists:") + regexval(".+", "exists")).optional()
// (text("Aggregates:") + regexval(".+", "aggregates")).optional()
// (text("IsLive:") + regexval(".+", "islive")).optional()
// (text("Min:") + intval("min")).optional()
// (text("Max:") + intval("max")).optional()
)
matchexists = text("Exists:") + regexval(".+", "exists")
matchsoftware = ( (text("Command:") + anyval("command"))
// (text("Response:") + anyval("response"))
)
rulebody = ( matchforeach
| matchexists
| matchsoftware
| error("No rule body found")
)
collectvarlist = ( ( regexval("\?\w+", "collectvar") + text("as:") + regexval("\?\w+", "collectlist") )
| trace("collectvarlist not matched")
)
collectall = ( text("Collect:") + collectvarlist )
collectpass = ( text("CollectPass:") + collectvarlist )
collectfail = ( text("CollectFail:") + collectvarlist )
collectvars = ( collectall.repeatdown("collectall")
// collectpass.repeatdown("collectpass")
// collectfail.repeatdown("collectfail")
)
rulediag = ( (text("Pass:") + anyval("pass"))
// (text("Fail:") + anyval("fail"))
// (text("None:") + anyval("miss")).optional()
)
requirement = ( text("Rule:").skipdownto()
// (text("Rule:") + refval("reqid"))
// (text("") + (rulebody // collectvars // rulediag))
)
requirements = requirement.repeatdown("requirements", min=1)
checklist_end = text("End:").skipdownto() // text("End:")
checklist = ( checklist_start
// prefixes
// checklists
// models
// requirements
// checklist_end
)
# Example data matched by the above:
#
# Prefixes:,Prefix,URI,,,@@NOTE: there is a shortcoming in the present Minim model and implementation that means there is no way to add new prefixes to those predefined in the minim evaluation code. Noted as technical debt fix.,
# ,rdf,http://www.w3.org/1999/02/22-rdf-syntax-ns#,,,,
# ,rdfs,http://www.w3.org/2000/01/rdf-schema#,,,,
# ,owl,http://www.w3.org/2002/07/owl#,,,,
# ,xsd,http://www.w3.org/2001/XMLSchema#,,,,
# ,xml,http://www.w3.org/XML/1998/namespace,,,,
# ,rdfg,http://www.w3.org/2004/03/trix/rdfg-1/,,,,
# ,ore,http://www.openarchives.org/ore/terms/,,,,
# ,ao,http://purl.org/ao/,,,,
# ,dcterms,http://purl.org/dc/terms/,,,,
# ,foaf,http://xmlns.com/foaf/0.1/,,,,
# ,ro,http://purl.org/wf4ever/ro#,,,,
# ,wfprov,http://purl.org/wf4ever/wfprov#,,,,
# ,wfdesc,http://purl.org/wf4ever/wfdesc#,,,,
# ,wf4ever,http://purl.org/wf4ever/wf4ever#,,,,
# ,minim,http://purl.org/minim/minim#,,,,
# Checklists:,Target,Purpose,Model,,Description,
# ,{+targetro},ready-to-release,#experiment_complete_model,,Checklist to be satisfied if the target RO is to be considered a complete and fully-described workflow experiment.,
# ,{+targetro},wf-accessible,#wf_accessible_model,,Checklist to test workflow accessible item in isolation,
# ,,,,,,
# Model:,#experiment_complete_model,,,,This model defines information that must be satisfied by the target RO for the target RO to be considered a complete and fully-described workflow experiment.,
# Items:,Level,Rule,,,,
# 010,SHOULD,#RO_has_hypothesys,,,RO should contain a resource describing the hypothesis the experiment is intended to test,
# 020,SHOULD,#RO_has_sketch,,,RO should contain a resource that is a high level sketch of the workflow that is used to test the hypothesys,
# 030,MUST,#WF_accessible,,,The RO must contain an accessible workflow definition,
# 040,MUST,#WF_services_accessible,,,All services used by the workflow must be live,
# 050,MUST,#RO_has_inputdata,,,The RO must specify input data that is used by the workflow,
# 060,SHOULD,#RO_has_conclusion,,,The RO should contain a resource that describes outcomes and conclusions obtained by running the workflow. ,
# Model:,#wf_accessible_model,,,,Model to test workflow accessible item in isolation,
# Items:,Level,Rule,,,,
# 030,MUST,#WF_accessible,,,The RO must contain an accessible workflow definition
# Define rules to test individual requirements,,,,,
# Rule:,#RO_has_hypothesys,,,,
# ,Exists:,?hypothesis rdf:type roterms:Hypothesis,,,
# ,Pass:,Experiment hypothesis is present,,,
# ,Fail:,Experiment hypothesis is not present,,,
# Rule:,#RO_has_sketch,,,,
# ,Exists:,?sketch rdf:type roterms:Sketch,,,
# ,Pass:,Workflow design sketch is present,,,
# ,Fail:,Workflow design sketch is not present,,,
# Rule:,#WF_accessible,,,,
# ,ForEach:,"?wf rdf:type wfdesc:Workflow ;
# rdfs:label ?wflab ;
# wfdesc:hasWorkflowDefinition ?wfdef",,,
# ,IsLive:,{+wfdef},,,
# ,Pass:,All workflow definitions are accessible,,,
# ,Fail:,The definition for workflow <i>%(wflab)s</i> is not accessible,,,
# ,None:,No workflow definitions are present,,,
# Rule:,#WF_services_accessible,,,,
# ,ForEach:,"?pr rdf:type wfdesc:Process ;
# rdfs:label ?prlab .
# { ?pr wf4ever:serviceURI ?pruri }
# UNION
# { ?pr wf4ever:wsdlURI ?pruri }",,,
# ,IsLive:,{+pruri},,,
# ,Pass:,All web services used by workflows are accessible,,,
# ,Fail:,"One or more web services used by a workflow are inaccessible, including <a href=""%(pruri)s""><i>%(prlab)s</i></a>",,,
# ,None:,No web services are referenced by any workflow,,,
# Rule:,#RO_has_inputdata,,,,
# ,Exists:,?wfbundle roterms:inputSelected ?inputdata,,,
# ,Pass:,Input data is present,,,
# ,Fail:,Input data is not present,,,
# Rule:,#RO_has_conclusion,,,,
# ,Exists:,?conclusion rdf:type roterms:Conclusions,,,
# ,Pass:,Experiment conclusions are present,,,
# ,Fail:,Experiment conclusions are not present,,,
# End:,,,,,
# End.
| {
"content_hash": "07e8acba4cf60a57cf70536be4522c27",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 229,
"avg_line_length": 38.21546961325967,
"alnum_prop": 0.6567876246927858,
"repo_name": "ninebynine/sds",
"id": "0701a473deb9409d52c1e4ae6be4f4a91e0139f6",
"size": "6940",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sds/grid/checklist_template.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "201527"
},
{
"name": "JavaScript",
"bytes": "167422"
},
{
"name": "Python",
"bytes": "57519"
}
],
"symlink_target": ""
} |
'''Unit tests for the admin template gatherer.'''
from __future__ import print_function
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
from six import StringIO
from grit.gather import admin_template
from grit import util
from grit import grd_reader
from grit import grit_runner
from grit.tool import build
class AdmGathererUnittest(unittest.TestCase):
def testParsingAndTranslating(self):
pseudofile = StringIO(
'bingo bongo\n'
'ding dong\n'
'[strings] \n'
'whatcha="bingo bongo"\n'
'gotcha = "bingolabongola "the wise" fingulafongula" \n')
gatherer = admin_template.AdmGatherer(pseudofile)
gatherer.Parse()
self.failUnless(len(gatherer.GetCliques()) == 2)
self.failUnless(gatherer.GetCliques()[1].GetMessage().GetRealContent() ==
'bingolabongola "the wise" fingulafongula')
translation = gatherer.Translate('en')
self.failUnless(translation == gatherer.GetText().strip())
def testErrorHandling(self):
pseudofile = StringIO(
'bingo bongo\n'
'ding dong\n'
'whatcha="bingo bongo"\n'
'gotcha = "bingolabongola "the wise" fingulafongula" \n')
gatherer = admin_template.AdmGatherer(pseudofile)
self.assertRaises(admin_template.MalformedAdminTemplateException,
gatherer.Parse)
_TRANSLATABLES_FROM_FILE = (
'Google', 'Google Desktop', 'Preferences',
'Controls Google Desktop preferences',
'Indexing and Capture Control',
'Controls what files, web pages, and other content will be indexed by Google Desktop.',
'Prevent indexing of email',
# there are lots more but we don't check any further
)
def VerifyCliquesFromAdmFile(self, cliques):
self.failUnless(len(cliques) > 20)
for clique, expected in zip(cliques, self._TRANSLATABLES_FROM_FILE):
text = clique.GetMessage().GetRealContent()
self.failUnless(text == expected)
def testFromFile(self):
fname = util.PathFromRoot('grit/testdata/GoogleDesktop.adm')
gatherer = admin_template.AdmGatherer(fname)
gatherer.Parse()
cliques = gatherer.GetCliques()
self.VerifyCliquesFromAdmFile(cliques)
def MakeGrd(self):
grd = grd_reader.Parse(StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3">
<release seq="3">
<structures>
<structure type="admin_template" name="IDAT_GOOGLE_DESKTOP_SEARCH"
file="GoogleDesktop.adm" exclude_from_rc="true" />
<structure type="txt" name="BINGOBONGO"
file="README.txt" exclude_from_rc="true" />
</structures>
</release>
<outputs>
<output filename="de_res.rc" type="rc_all" lang="de" />
</outputs>
</grit>'''), util.PathFromRoot('grit/testdata'))
grd.SetOutputLanguage('en')
grd.RunGatherers()
return grd
def testInGrd(self):
grd = self.MakeGrd()
cliques = grd.children[0].children[0].children[0].GetCliques()
self.VerifyCliquesFromAdmFile(cliques)
def testFileIsOutput(self):
grd = self.MakeGrd()
dirname = util.TempDir({})
try:
tool = build.RcBuilder()
tool.o = grit_runner.Options()
tool.output_directory = dirname.GetPath()
tool.res = grd
tool.Process()
self.failUnless(os.path.isfile(dirname.GetPath('de_GoogleDesktop.adm')))
self.failUnless(os.path.isfile(dirname.GetPath('de_README.txt')))
finally:
dirname.CleanUp()
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "203941ae0f45dd34b0c1bb0e3e61fae4",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 91,
"avg_line_length": 33.127272727272725,
"alnum_prop": 0.6599890230515917,
"repo_name": "chromium/chromium",
"id": "61755b37582be52eefcd1d9eb810ee939d012f0c",
"size": "3808",
"binary": false,
"copies": "8",
"ref": "refs/heads/main",
"path": "tools/grit/grit/gather/admin_template_unittest.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import uuid
import mock
from mock import patch
from oslo_utils import encodeutils
from oslo_utils import units
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
from glance.common import exception
from glance.common import store_utils
import glance.quota
from glance.tests.unit import utils as unit_test_utils
from glance.tests import utils as test_utils
UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d'
class FakeContext(object):
owner = 'someone'
is_admin = False
class FakeImage(object):
size = None
image_id = 'someid'
locations = [{'url': 'file:///not/a/path', 'metadata': {}}]
tags = set([])
def set_data(self, data, size=None):
self.size = 0
for d in data:
self.size += len(d)
def __init__(self, **kwargs):
self.extra_properties = kwargs.get('extra_properties', {})
class TestImageQuota(test_utils.BaseTestCase):
def setUp(self):
super(TestImageQuota, self).setUp()
def tearDown(self):
super(TestImageQuota, self).tearDown()
def _get_image(self, location_count=1, image_size=10):
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_image = FakeImage()
base_image.image_id = 'xyz'
base_image.size = image_size
image = glance.quota.ImageProxy(base_image, context, db_api, store)
locations = []
for i in range(location_count):
locations.append({'url': 'file:///g/there/it/is%d' % i,
'metadata': {}, 'status': 'active'})
image_values = {'id': 'xyz', 'owner': context.owner,
'status': 'active', 'size': image_size,
'locations': locations}
db_api.image_create(context, image_values)
return image
def test_quota_allowed(self):
quota = 10
self.config(user_storage_quota=str(quota))
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_image = FakeImage()
base_image.image_id = 'id'
image = glance.quota.ImageProxy(base_image, context, db_api, store)
data = '*' * quota
base_image.set_data(data, size=None)
image.set_data(data)
self.assertEqual(quota, base_image.size)
def _test_quota_allowed_unit(self, data_length, config_quota):
self.config(user_storage_quota=config_quota)
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_image = FakeImage()
base_image.image_id = 'id'
image = glance.quota.ImageProxy(base_image, context, db_api, store)
data = '*' * data_length
base_image.set_data(data, size=None)
image.set_data(data)
self.assertEqual(data_length, base_image.size)
def test_quota_allowed_unit_b(self):
self._test_quota_allowed_unit(10, '10B')
def test_quota_allowed_unit_kb(self):
self._test_quota_allowed_unit(10, '1KB')
def test_quota_allowed_unit_mb(self):
self._test_quota_allowed_unit(10, '1MB')
def test_quota_allowed_unit_gb(self):
self._test_quota_allowed_unit(10, '1GB')
def test_quota_allowed_unit_tb(self):
self._test_quota_allowed_unit(10, '1TB')
def _quota_exceeded_size(self, quota, data,
deleted=True, size=None):
self.config(user_storage_quota=quota)
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_image = FakeImage()
base_image.image_id = 'id'
image = glance.quota.ImageProxy(base_image, context, db_api, store)
if deleted:
with patch.object(store_utils, 'safe_delete_from_backend'):
store_utils.safe_delete_from_backend(
context,
image.image_id,
base_image.locations[0])
self.assertRaises(exception.StorageQuotaFull,
image.set_data,
data,
size=size)
def test_quota_exceeded_no_size(self):
quota = 10
data = '*' * (quota + 1)
# NOTE(jbresnah) When the image size is None it means that it is
# not known. In this case the only time we will raise an
# exception is when there is no room left at all, thus we know
# it will not fit.
# That's why 'get_remaining_quota' is mocked with return_value = 0.
with patch.object(glance.api.common, 'get_remaining_quota',
return_value=0):
self._quota_exceeded_size(str(quota), data)
def test_quota_exceeded_with_right_size(self):
quota = 10
data = '*' * (quota + 1)
self._quota_exceeded_size(str(quota), data, size=len(data),
deleted=False)
def test_quota_exceeded_with_right_size_b(self):
quota = 10
data = '*' * (quota + 1)
self._quota_exceeded_size('10B', data, size=len(data),
deleted=False)
def test_quota_exceeded_with_right_size_kb(self):
quota = units.Ki
data = '*' * (quota + 1)
self._quota_exceeded_size('1KB', data, size=len(data),
deleted=False)
def test_quota_exceeded_with_lie_size(self):
quota = 10
data = '*' * (quota + 1)
self._quota_exceeded_size(str(quota), data, deleted=False,
size=quota - 1)
def test_append_location(self):
new_location = {'url': 'file:///a/path', 'metadata': {},
'status': 'active'}
image = self._get_image()
pre_add_locations = image.locations[:]
image.locations.append(new_location)
pre_add_locations.append(new_location)
self.assertEqual(image.locations, pre_add_locations)
def test_insert_location(self):
new_location = {'url': 'file:///a/path', 'metadata': {},
'status': 'active'}
image = self._get_image()
pre_add_locations = image.locations[:]
image.locations.insert(0, new_location)
pre_add_locations.insert(0, new_location)
self.assertEqual(image.locations, pre_add_locations)
def test_extend_location(self):
new_location = {'url': 'file:///a/path', 'metadata': {},
'status': 'active'}
image = self._get_image()
pre_add_locations = image.locations[:]
image.locations.extend([new_location])
pre_add_locations.extend([new_location])
self.assertEqual(image.locations, pre_add_locations)
def test_iadd_location(self):
new_location = {'url': 'file:///a/path', 'metadata': {},
'status': 'active'}
image = self._get_image()
pre_add_locations = image.locations[:]
image.locations += [new_location]
pre_add_locations += [new_location]
self.assertEqual(image.locations, pre_add_locations)
def test_set_location(self):
new_location = {'url': 'file:///a/path', 'metadata': {},
'status': 'active'}
image = self._get_image()
image.locations = [new_location]
self.assertEqual(image.locations, [new_location])
def _make_image_with_quota(self, image_size=10, location_count=2):
quota = image_size * location_count
self.config(user_storage_quota=str(quota))
return self._get_image(image_size=image_size,
location_count=location_count)
def test_exceed_append_location(self):
image = self._make_image_with_quota()
self.assertRaises(exception.StorageQuotaFull,
image.locations.append,
{'url': 'file:///a/path', 'metadata': {},
'status': 'active'})
def test_exceed_insert_location(self):
image = self._make_image_with_quota()
self.assertRaises(exception.StorageQuotaFull,
image.locations.insert,
0,
{'url': 'file:///a/path', 'metadata': {},
'status': 'active'})
def test_exceed_extend_location(self):
image = self._make_image_with_quota()
self.assertRaises(exception.StorageQuotaFull,
image.locations.extend,
[{'url': 'file:///a/path', 'metadata': {},
'status': 'active'}])
def test_set_location_under(self):
image = self._make_image_with_quota(location_count=1)
image.locations = [{'url': 'file:///a/path', 'metadata': {},
'status': 'active'}]
def test_set_location_exceed(self):
image = self._make_image_with_quota(location_count=1)
try:
image.locations = [{'url': 'file:///a/path', 'metadata': {},
'status': 'active'},
{'url': 'file:///a/path2', 'metadata': {},
'status': 'active'}]
self.fail('Should have raised the quota exception')
except exception.StorageQuotaFull:
pass
def test_iadd_location_exceed(self):
image = self._make_image_with_quota(location_count=1)
try:
image.locations += [{'url': 'file:///a/path', 'metadata': {},
'status': 'active'}]
self.fail('Should have raised the quota exception')
except exception.StorageQuotaFull:
pass
def test_append_location_for_queued_image(self):
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_image = FakeImage()
base_image.image_id = str(uuid.uuid4())
image = glance.quota.ImageProxy(base_image, context, db_api, store)
self.assertIsNone(image.size)
self.stubs.Set(store_api, 'get_size_from_backend',
unit_test_utils.fake_get_size_from_backend)
image.locations.append({'url': 'file:///fake.img.tar.gz',
'metadata': {}})
self.assertIn({'url': 'file:///fake.img.tar.gz', 'metadata': {}},
image.locations)
def test_insert_location_for_queued_image(self):
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_image = FakeImage()
base_image.image_id = str(uuid.uuid4())
image = glance.quota.ImageProxy(base_image, context, db_api, store)
self.assertIsNone(image.size)
self.stubs.Set(store_api, 'get_size_from_backend',
unit_test_utils.fake_get_size_from_backend)
image.locations.insert(0,
{'url': 'file:///fake.img.tar.gz',
'metadata': {}})
self.assertIn({'url': 'file:///fake.img.tar.gz', 'metadata': {}},
image.locations)
def test_set_location_for_queued_image(self):
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_image = FakeImage()
base_image.image_id = str(uuid.uuid4())
image = glance.quota.ImageProxy(base_image, context, db_api, store)
self.assertIsNone(image.size)
self.stubs.Set(store_api, 'get_size_from_backend',
unit_test_utils.fake_get_size_from_backend)
image.locations = [{'url': 'file:///fake.img.tar.gz', 'metadata': {}}]
self.assertEqual([{'url': 'file:///fake.img.tar.gz', 'metadata': {}}],
image.locations)
def test_iadd_location_for_queued_image(self):
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_image = FakeImage()
base_image.image_id = str(uuid.uuid4())
image = glance.quota.ImageProxy(base_image, context, db_api, store)
self.assertIsNone(image.size)
self.stubs.Set(store_api, 'get_size_from_backend',
unit_test_utils.fake_get_size_from_backend)
image.locations += [{'url': 'file:///fake.img.tar.gz', 'metadata': {}}]
self.assertIn({'url': 'file:///fake.img.tar.gz', 'metadata': {}},
image.locations)
class TestImagePropertyQuotas(test_utils.BaseTestCase):
def setUp(self):
super(TestImagePropertyQuotas, self).setUp()
self.base_image = FakeImage()
self.image = glance.quota.ImageProxy(self.base_image,
mock.Mock(),
mock.Mock(),
mock.Mock())
self.image_repo_mock = mock.Mock()
self.image_repo_mock.add.return_value = self.base_image
self.image_repo_mock.save.return_value = self.base_image
self.image_repo_proxy = glance.quota.ImageRepoProxy(
self.image_repo_mock,
mock.Mock(),
mock.Mock(),
mock.Mock())
def test_save_image_with_image_property(self):
self.config(image_property_quota=1)
self.image.extra_properties = {'foo': 'bar'}
self.image_repo_proxy.save(self.image)
self.image_repo_mock.save.assert_called_once_with(self.base_image,
from_state=None)
def test_save_image_too_many_image_properties(self):
self.config(image_property_quota=1)
self.image.extra_properties = {'foo': 'bar', 'foo2': 'bar2'}
exc = self.assertRaises(exception.ImagePropertyLimitExceeded,
self.image_repo_proxy.save, self.image)
self.assertIn("Attempted: 2, Maximum: 1",
encodeutils.exception_to_unicode(exc))
def test_save_image_unlimited_image_properties(self):
self.config(image_property_quota=-1)
self.image.extra_properties = {'foo': 'bar'}
self.image_repo_proxy.save(self.image)
self.image_repo_mock.save.assert_called_once_with(self.base_image,
from_state=None)
def test_add_image_with_image_property(self):
self.config(image_property_quota=1)
self.image.extra_properties = {'foo': 'bar'}
self.image_repo_proxy.add(self.image)
self.image_repo_mock.add.assert_called_once_with(self.base_image)
def test_add_image_too_many_image_properties(self):
self.config(image_property_quota=1)
self.image.extra_properties = {'foo': 'bar', 'foo2': 'bar2'}
exc = self.assertRaises(exception.ImagePropertyLimitExceeded,
self.image_repo_proxy.add, self.image)
self.assertIn("Attempted: 2, Maximum: 1",
encodeutils.exception_to_unicode(exc))
def test_add_image_unlimited_image_properties(self):
self.config(image_property_quota=-1)
self.image.extra_properties = {'foo': 'bar'}
self.image_repo_proxy.add(self.image)
self.image_repo_mock.add.assert_called_once_with(self.base_image)
def _quota_exceed_setup(self):
self.config(image_property_quota=2)
self.base_image.extra_properties = {'foo': 'bar', 'spam': 'ham'}
self.image = glance.quota.ImageProxy(self.base_image,
mock.Mock(),
mock.Mock(),
mock.Mock())
def test_modify_image_properties_when_quota_exceeded(self):
self._quota_exceed_setup()
self.config(image_property_quota=1)
self.image.extra_properties = {'foo': 'frob', 'spam': 'eggs'}
self.image_repo_proxy.save(self.image)
self.image_repo_mock.save.assert_called_once_with(self.base_image,
from_state=None)
self.assertEqual('frob', self.base_image.extra_properties['foo'])
self.assertEqual('eggs', self.base_image.extra_properties['spam'])
def test_delete_image_properties_when_quota_exceeded(self):
self._quota_exceed_setup()
self.config(image_property_quota=1)
del self.image.extra_properties['foo']
self.image_repo_proxy.save(self.image)
self.image_repo_mock.save.assert_called_once_with(self.base_image,
from_state=None)
self.assertNotIn('foo', self.base_image.extra_properties)
self.assertEqual('ham', self.base_image.extra_properties['spam'])
def test_invalid_quota_config_parameter(self):
self.config(user_storage_quota='foo')
location = {"url": "file:///fake.img.tar.gz", "metadata": {}}
self.assertRaises(exception.InvalidOptionValue,
self.image.locations.append, location)
def test_exceed_quota_during_patch_operation(self):
self._quota_exceed_setup()
self.image.extra_properties['frob'] = 'baz'
self.image.extra_properties['lorem'] = 'ipsum'
self.assertEqual('bar', self.base_image.extra_properties['foo'])
self.assertEqual('ham', self.base_image.extra_properties['spam'])
self.assertEqual('baz', self.base_image.extra_properties['frob'])
self.assertEqual('ipsum', self.base_image.extra_properties['lorem'])
del self.image.extra_properties['frob']
del self.image.extra_properties['lorem']
self.image_repo_proxy.save(self.image)
call_args = mock.call(self.base_image, from_state=None)
self.assertEqual(call_args, self.image_repo_mock.save.call_args)
self.assertEqual('bar', self.base_image.extra_properties['foo'])
self.assertEqual('ham', self.base_image.extra_properties['spam'])
self.assertNotIn('frob', self.base_image.extra_properties)
self.assertNotIn('lorem', self.base_image.extra_properties)
def test_quota_exceeded_after_delete_image_properties(self):
self.config(image_property_quota=3)
self.base_image.extra_properties = {'foo': 'bar',
'spam': 'ham',
'frob': 'baz'}
self.image = glance.quota.ImageProxy(self.base_image,
mock.Mock(),
mock.Mock(),
mock.Mock())
self.config(image_property_quota=1)
del self.image.extra_properties['foo']
self.image_repo_proxy.save(self.image)
self.image_repo_mock.save.assert_called_once_with(self.base_image,
from_state=None)
self.assertNotIn('foo', self.base_image.extra_properties)
self.assertEqual('ham', self.base_image.extra_properties['spam'])
self.assertEqual('baz', self.base_image.extra_properties['frob'])
class TestImageTagQuotas(test_utils.BaseTestCase):
def setUp(self):
super(TestImageTagQuotas, self).setUp()
self.base_image = mock.Mock()
self.base_image.tags = set([])
self.base_image.extra_properties = {}
self.image = glance.quota.ImageProxy(self.base_image,
mock.Mock(),
mock.Mock(),
mock.Mock())
self.image_repo_mock = mock.Mock()
self.image_repo_proxy = glance.quota.ImageRepoProxy(
self.image_repo_mock,
mock.Mock(),
mock.Mock(),
mock.Mock())
def test_replace_image_tag(self):
self.config(image_tag_quota=1)
self.image.tags = ['foo']
self.assertEqual(1, len(self.image.tags))
def test_replace_too_many_image_tags(self):
self.config(image_tag_quota=0)
exc = self.assertRaises(exception.ImageTagLimitExceeded,
setattr, self.image, 'tags', ['foo', 'bar'])
self.assertIn('Attempted: 2, Maximum: 0',
encodeutils.exception_to_unicode(exc))
self.assertEqual(0, len(self.image.tags))
def test_replace_unlimited_image_tags(self):
self.config(image_tag_quota=-1)
self.image.tags = ['foo']
self.assertEqual(1, len(self.image.tags))
def test_add_image_tag(self):
self.config(image_tag_quota=1)
self.image.tags.add('foo')
self.assertEqual(1, len(self.image.tags))
def test_add_too_many_image_tags(self):
self.config(image_tag_quota=1)
self.image.tags.add('foo')
exc = self.assertRaises(exception.ImageTagLimitExceeded,
self.image.tags.add, 'bar')
self.assertIn('Attempted: 2, Maximum: 1',
encodeutils.exception_to_unicode(exc))
def test_add_unlimited_image_tags(self):
self.config(image_tag_quota=-1)
self.image.tags.add('foo')
self.assertEqual(1, len(self.image.tags))
def test_remove_image_tag_while_over_quota(self):
self.config(image_tag_quota=1)
self.image.tags.add('foo')
self.assertEqual(1, len(self.image.tags))
self.config(image_tag_quota=0)
self.image.tags.remove('foo')
self.assertEqual(0, len(self.image.tags))
class TestQuotaImageTagsProxy(test_utils.BaseTestCase):
def setUp(self):
super(TestQuotaImageTagsProxy, self).setUp()
def test_add(self):
proxy = glance.quota.QuotaImageTagsProxy(set([]))
proxy.add('foo')
self.assertIn('foo', proxy)
def test_add_too_many_tags(self):
self.config(image_tag_quota=0)
proxy = glance.quota.QuotaImageTagsProxy(set([]))
exc = self.assertRaises(exception.ImageTagLimitExceeded,
proxy.add, 'bar')
self.assertIn('Attempted: 1, Maximum: 0',
encodeutils.exception_to_unicode(exc))
def test_equals(self):
proxy = glance.quota.QuotaImageTagsProxy(set([]))
self.assertEqual(set([]), proxy)
def test_not_equals(self):
proxy = glance.quota.QuotaImageTagsProxy(set([]))
self.assertNotEqual('foo', proxy)
def test_contains(self):
proxy = glance.quota.QuotaImageTagsProxy(set(['foo']))
self.assertIn('foo', proxy)
def test_len(self):
proxy = glance.quota.QuotaImageTagsProxy(set(['foo',
'bar',
'baz',
'niz']))
self.assertEqual(4, len(proxy))
def test_iter(self):
items = set(['foo', 'bar', 'baz', 'niz'])
proxy = glance.quota.QuotaImageTagsProxy(items.copy())
self.assertEqual(4, len(items))
for item in proxy:
items.remove(item)
self.assertEqual(0, len(items))
class TestImageMemberQuotas(test_utils.BaseTestCase):
def setUp(self):
super(TestImageMemberQuotas, self).setUp()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
context = FakeContext()
self.image = mock.Mock()
self.base_image_member_factory = mock.Mock()
self.image_member_factory = glance.quota.ImageMemberFactoryProxy(
self.base_image_member_factory, context,
db_api, store)
def test_new_image_member(self):
self.config(image_member_quota=1)
self.image_member_factory.new_image_member(self.image,
'fake_id')
nim = self.base_image_member_factory.new_image_member
nim.assert_called_once_with(self.image, 'fake_id')
def test_new_image_member_unlimited_members(self):
self.config(image_member_quota=-1)
self.image_member_factory.new_image_member(self.image,
'fake_id')
nim = self.base_image_member_factory.new_image_member
nim.assert_called_once_with(self.image, 'fake_id')
def test_new_image_member_too_many_members(self):
self.config(image_member_quota=0)
self.assertRaises(exception.ImageMemberLimitExceeded,
self.image_member_factory.new_image_member,
self.image, 'fake_id')
class TestImageLocationQuotas(test_utils.BaseTestCase):
def setUp(self):
super(TestImageLocationQuotas, self).setUp()
self.base_image = mock.Mock()
self.base_image.locations = []
self.base_image.size = 1
self.base_image.extra_properties = {}
self.image = glance.quota.ImageProxy(self.base_image,
mock.Mock(),
mock.Mock(),
mock.Mock())
self.image_repo_mock = mock.Mock()
self.image_repo_proxy = glance.quota.ImageRepoProxy(
self.image_repo_mock,
mock.Mock(),
mock.Mock(),
mock.Mock())
def test_replace_image_location(self):
self.config(image_location_quota=1)
self.image.locations = [{"url": "file:///fake.img.tar.gz",
"metadata": {}
}]
self.assertEqual(1, len(self.image.locations))
def test_replace_too_many_image_locations(self):
self.config(image_location_quota=1)
self.image.locations = [{"url": "file:///fake.img.tar.gz",
"metadata": {}}
]
locations = [
{"url": "file:///fake1.img.tar.gz", "metadata": {}},
{"url": "file:///fake2.img.tar.gz", "metadata": {}},
{"url": "file:///fake3.img.tar.gz", "metadata": {}}
]
exc = self.assertRaises(exception.ImageLocationLimitExceeded,
setattr, self.image, 'locations', locations)
self.assertIn('Attempted: 3, Maximum: 1',
encodeutils.exception_to_unicode(exc))
self.assertEqual(1, len(self.image.locations))
def test_replace_unlimited_image_locations(self):
self.config(image_location_quota=-1)
self.image.locations = [{"url": "file:///fake.img.tar.gz",
"metadata": {}}
]
self.assertEqual(1, len(self.image.locations))
def test_add_image_location(self):
self.config(image_location_quota=1)
location = {"url": "file:///fake.img.tar.gz", "metadata": {}}
self.image.locations.append(location)
self.assertEqual(1, len(self.image.locations))
def test_add_too_many_image_locations(self):
self.config(image_location_quota=1)
location1 = {"url": "file:///fake1.img.tar.gz", "metadata": {}}
self.image.locations.append(location1)
location2 = {"url": "file:///fake2.img.tar.gz", "metadata": {}}
exc = self.assertRaises(exception.ImageLocationLimitExceeded,
self.image.locations.append, location2)
self.assertIn('Attempted: 2, Maximum: 1',
encodeutils.exception_to_unicode(exc))
def test_add_unlimited_image_locations(self):
self.config(image_location_quota=-1)
location1 = {"url": "file:///fake1.img.tar.gz", "metadata": {}}
self.image.locations.append(location1)
self.assertEqual(1, len(self.image.locations))
def test_remove_image_location_while_over_quota(self):
self.config(image_location_quota=1)
location1 = {"url": "file:///fake1.img.tar.gz", "metadata": {}}
self.image.locations.append(location1)
self.assertEqual(1, len(self.image.locations))
self.config(image_location_quota=0)
self.image.locations.remove(location1)
self.assertEqual(0, len(self.image.locations))
| {
"content_hash": "3b2f0a5f1dad5101eb30240f61545e37",
"timestamp": "",
"source": "github",
"line_count": 700,
"max_line_length": 79,
"avg_line_length": 41.488571428571426,
"alnum_prop": 0.5661800151504718,
"repo_name": "stevelle/glance",
"id": "22bd39561390c92b038587705750ca2abf7aca02",
"size": "29671",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "glance/tests/unit/test_quota.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3867110"
},
{
"name": "Shell",
"bytes": "7860"
}
],
"symlink_target": ""
} |
import datetime
import pymongo
import os
import sys
from pymongo import MongoClient
from subprocess import call
def update(args):
print('Beginning migrations...')
HOST = 'localhost'
PORT = 27017
for index, arg in enumerate(args):
if arg == '-h':
HOST = args[index+1]
if arg == '-p':
PORT = int(args[index+1])
client = MongoClient(HOST, PORT)
dev_db = client.dev_db
doc = dev_db.migrations.find_one(sort=[("timestamp", -1)])
timestamp = int(doc['timestamp']) if doc is not None else 0
files = get_files(timestamp)
for file in files:
print('Migrating: ' + file)
#run the script...
if call(['mongo','{0}:{1}'.format(HOST, PORT), '--eval','var conn=new Mongo(\'{0}:{1}\')'.format(HOST, PORT), file]) == 0:
dev_db.migrations.insert({'timestamp':file_time(file), 'name': file})
else:
print('Migration: ' + file + ' failed. Stopping now!')
sys.exit()
print('All migrations have been ran')
new_file_setup = '//Created at: {0}\n' \
'//Title: {1}\n' \
'print("Running migration: {1}");\n\n' \
'load("include.js");\n\n' \
'//connection stored in variable "conn"' \
'var db = conn.getDB("Example_DB_here");\n'
def create(args):
print('Creating migration file...')
timestamp = '{:%Y%m%d%H%M%S%f}'.format(datetime.datetime.now())
filename = '{0}-{1}.js'.format("_".join(i.lower() for i in args), timestamp)
print(filename)
try:
with open(filename, 'w+') as file:
file.write(new_file_setup
.format(timestamp, " ".join(args)))
print('Migration file created: {0}'.format(filename))
except Exception as e:
print(e)
print('Migrations file not created')
def file_time(filename):
return filename.split('-')[-1][0:-3]
def get_files(timestamp):
files = [i for i in os.listdir('.') if i not in [os.path.basename(__file__), "include.js", 'venv', 'README.md', 'LICENSE', 'requirements.txt']
and int(file_time(i)) > timestamp]
files.sort(key=file_time)
return files
if __name__ == '__main__':
commands = {
'create': create,
'update': update
}
command_names = (i.__name__ for i in commands)
if len(sys.argv) >= 2 and sys.argv[1] in commands.keys():
commands[sys.argv[1]](sys.argv[2:])
sys.exit()
print('Incorrect usage. You must provide 1 argument, either `create` or `update`')
| {
"content_hash": "a12699b3978eeb6d5da1496abb1b49ce",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 143,
"avg_line_length": 29.92,
"alnum_prop": 0.6439393939393939,
"repo_name": "jawm/mongo-migrate",
"id": "daefad9ed79b02d1edfb49d8091c0a5592834e2d",
"size": "2265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "146"
},
{
"name": "Python",
"bytes": "2265"
}
],
"symlink_target": ""
} |
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
from django.db import connection, transaction, IntegrityError
from django.db.models.fields import DateField
from django.db.models.query_utils import Q, select_related_descend
from django.db.models import signals, sql
from django.utils.datastructures import SortedDict
# Used to control how many objects are worked with at once in some cases (e.g.
# when deleting objects).
CHUNK_SIZE = 100
ITER_CHUNK_SIZE = CHUNK_SIZE
# Pull into this namespace for backwards compatibility.
EmptyResultSet = sql.EmptyResultSet
class CyclicDependency(Exception):
"""
An error when dealing with a collection of objects that have a cyclic
dependency, i.e. when deleting multiple objects.
"""
pass
class CollectedObjects(object):
"""
A container that stores keys and lists of values along with remembering the
parent objects for all the keys.
This is used for the database object deletion routines so that we can
calculate the 'leaf' objects which should be deleted first.
"""
def __init__(self):
self.data = {}
self.children = {}
def add(self, model, pk, obj, parent_model, nullable=False):
"""
Adds an item to the container.
Arguments:
* model - the class of the object being added.
* pk - the primary key.
* obj - the object itself.
* parent_model - the model of the parent object that this object was
reached through.
* nullable - should be True if this relation is nullable.
Returns True if the item already existed in the structure and
False otherwise.
"""
d = self.data.setdefault(model, SortedDict())
retval = pk in d
d[pk] = obj
# Nullable relationships can be ignored -- they are nulled out before
# deleting, and therefore do not affect the order in which objects
# have to be deleted.
if parent_model is not None and not nullable:
self.children.setdefault(parent_model, []).append(model)
return retval
def __contains__(self, key):
return self.data.__contains__(key)
def __getitem__(self, key):
return self.data[key]
def __nonzero__(self):
return bool(self.data)
def iteritems(self):
for k in self.ordered_keys():
yield k, self[k]
def items(self):
return list(self.iteritems())
def keys(self):
return self.ordered_keys()
def ordered_keys(self):
"""
Returns the models in the order that they should be dealt with (i.e.
models with no dependencies first).
"""
dealt_with = SortedDict()
# Start with items that have no children
models = self.data.keys()
while len(dealt_with) < len(models):
found = False
for model in models:
children = self.children.setdefault(model, [])
if len([c for c in children if c not in dealt_with]) == 0:
dealt_with[model] = None
found = True
if not found:
raise CyclicDependency(
"There is a cyclic dependency of items to be processed.")
return dealt_with.keys()
def unordered_keys(self):
"""
Fallback for the case where is a cyclic dependency but we don't care.
"""
return self.data.keys()
class QuerySet(object):
"""
Represents a lazy database lookup for a set of objects.
"""
def __init__(self, model=None, query=None):
self.model = model
self.query = query or sql.Query(self.model, connection)
self._result_cache = None
self._iter = None
self._sticky_filter = False
########################
# PYTHON MAGIC METHODS #
########################
def __getstate__(self):
"""
Allows the QuerySet to be pickled.
"""
# Force the cache to be fully populated.
len(self)
obj_dict = self.__dict__.copy()
obj_dict['_iter'] = None
return obj_dict
def __repr__(self):
return repr(list(self))
def __len__(self):
# Since __len__ is called quite frequently (for example, as part of
# list(qs), we make some effort here to be as efficient as possible
# whilst not messing up any existing iterators against the QuerySet.
if self._result_cache is None:
if self._iter:
self._result_cache = list(self._iter)
else:
self._result_cache = list(self.iterator())
elif self._iter:
self._result_cache.extend(list(self._iter))
return len(self._result_cache)
def __iter__(self):
if self._result_cache is None:
self._iter = self.iterator()
self._result_cache = []
if self._iter:
return self._result_iter()
# Python's list iterator is better than our version when we're just
# iterating over the cache.
return iter(self._result_cache)
def _result_iter(self):
pos = 0
while 1:
upper = len(self._result_cache)
while pos < upper:
yield self._result_cache[pos]
pos = pos + 1
if not self._iter:
raise StopIteration
if len(self._result_cache) <= pos:
self._fill_cache()
def __nonzero__(self):
if self._result_cache is not None:
return bool(self._result_cache)
try:
iter(self).next()
except StopIteration:
return False
return True
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice, int, long)):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0))
or (isinstance(k, slice) and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
if self._iter is not None:
# The result cache has only been partially populated, so we may
# need to fill it out a bit more.
if isinstance(k, slice):
if k.stop is not None:
# Some people insist on passing in strings here.
bound = int(k.stop)
else:
bound = None
else:
bound = k + 1
if len(self._result_cache) < bound:
self._fill_cache(bound - len(self._result_cache))
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return k.step and list(qs)[::k.step] or qs
try:
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
except self.model.DoesNotExist, e:
raise IndexError, e.args
def __and__(self, other):
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other._clone()
combined = self._clone()
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._merge_sanity_check(other)
combined = self._clone()
if isinstance(other, EmptyQuerySet):
return combined
combined.query.combine(other.query, sql.OR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
fill_cache = self.query.select_related
if isinstance(fill_cache, dict):
requested = fill_cache
else:
requested = None
max_depth = self.query.max_depth
extra_select = self.query.extra_select.keys()
index_start = len(extra_select)
for row in self.query.results_iter():
if fill_cache:
obj, _ = get_cached_row(self.model, row, index_start,
max_depth, requested=requested)
else:
obj = self.model(*row[index_start:])
for i, k in enumerate(extra_select):
setattr(obj, k, row[i])
yield obj
def count(self):
"""
Performs a SELECT COUNT() and returns the number of records as an
integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None and not self._iter:
return len(self._result_cache)
return self.query.get_count()
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist("%s matching query does not exist."
% self.model._meta.object_name)
raise self.model.MultipleObjectsReturned("get() returned more than one %s -- it returned %s! Lookup parameters were %s"
% (self.model._meta.object_name, num, kwargs))
def create(self, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
obj.save()
return obj
def get_or_create(self, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
assert kwargs, \
'get_or_create() must be passed at least one keyword argument'
defaults = kwargs.pop('defaults', {})
try:
return self.get(**kwargs), False
except self.model.DoesNotExist:
try:
params = dict([(k, v) for k, v in kwargs.items() if '__' not in k])
params.update(defaults)
obj = self.model(**params)
sid = transaction.savepoint()
obj.save()
transaction.savepoint_commit(sid)
return obj, True
except IntegrityError, e:
transaction.savepoint_rollback(sid)
try:
return self.get(**kwargs), False
except self.model.DoesNotExist:
raise e
def latest(self, field_name=None):
"""
Returns the latest object, according to the model's 'get_latest_by'
option or optional given field_name.
"""
latest_by = field_name or self.model._meta.get_latest_by
assert bool(latest_by), "latest() requires either a field_name parameter or 'get_latest_by' in the model"
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken."
obj = self._clone()
obj.query.set_limits(high=1)
obj.query.add_ordering('-%s' % latest_by)
return obj.get()
def in_bulk(self, id_list):
"""
Returns a dictionary mapping each of the given IDs to the object with
that ID.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with in_bulk"
assert isinstance(id_list, (tuple, list)), \
"in_bulk() must be provided with a list of IDs."
if not id_list:
return {}
qs = self._clone()
qs.query.add_filter(('pk__in', id_list))
return dict([(obj._get_pk_val(), obj) for obj in qs.iterator()])
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with delete."
del_query = self._clone()
# Disable non-supported fields.
del_query.query.select_related = False
del_query.query.clear_ordering()
# Delete objects in chunks to prevent the list of related objects from
# becoming too long.
while 1:
# Collect all the objects to be deleted in this chunk, and all the
# objects that are related to the objects that are to be deleted.
seen_objs = CollectedObjects()
for object in del_query[:CHUNK_SIZE]:
object._collect_sub_objects(seen_objs)
if not seen_objs:
break
delete_objects(seen_objs)
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
delete.alters_data = True
def update(self, **kwargs):
"""
Updates all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
query = self.query.clone(sql.UpdateQuery)
query.add_update_values(kwargs)
rows = query.execute_sql(None)
transaction.commit_unless_managed()
self._result_cache = None
return rows
update.alters_data = True
def _update(self, values):
"""
A version of update that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
query = self.query.clone(sql.UpdateQuery)
query.add_update_fields(values)
self._result_cache = None
return query.execute_sql(None)
_update.alters_data = True
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def values(self, *fields):
return self._clone(klass=ValuesQuerySet, setup=True, _fields=fields)
def values_list(self, *fields, **kwargs):
flat = kwargs.pop('flat', False)
if kwargs:
raise TypeError('Unexpected keyword arguments to values_list: %s'
% (kwargs.keys(),))
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
return self._clone(klass=ValuesListQuerySet, setup=True, flat=flat,
_fields=fields)
def dates(self, field_name, kind, order='ASC'):
"""
Returns a list of datetime objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
assert kind in ("month", "year", "day"), \
"'kind' must be one of 'year', 'month' or 'day'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
return self._clone(klass=DateQuerySet, setup=True,
_field_name=field_name, _kind=kind, _order=order)
def none(self):
"""
Returns an empty QuerySet.
"""
return self._clone(klass=EmptyQuerySet)
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._clone()
def filter(self, *args, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
return self._filter_or_exclude(False, *args, **kwargs)
def exclude(self, *args, **kwargs):
"""
Returns a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
return self._filter_or_exclude(True, *args, **kwargs)
def _filter_or_exclude(self, negate, *args, **kwargs):
if args or kwargs:
assert self.query.can_filter(), \
"Cannot filter a query once a slice has been taken."
clone = self._clone()
if negate:
clone.query.add_q(~Q(*args, **kwargs))
else:
clone.query.add_q(Q(*args, **kwargs))
return clone
def complex_filter(self, filter_obj):
"""
Returns a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object (or anything with an add_to_query()
method) or a dictionary of keyword lookup arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
clone = self._clone()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(None, **filter_obj)
def select_related(self, *fields, **kwargs):
"""
Returns a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
"""
depth = kwargs.pop('depth', 0)
if kwargs:
raise TypeError('Unexpected keyword arguments to select_related: %s'
% (kwargs.keys(),))
obj = self._clone()
if fields:
if depth:
raise TypeError('Cannot pass both "depth" and fields to select_related()')
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
if depth:
obj.query.max_depth = depth
return obj
def dup_select_related(self, other):
"""
Copies the related selection status from the QuerySet 'other' to the
current QuerySet.
"""
self.query.select_related = other.query.select_related
def order_by(self, *field_names):
"""
Returns a new QuerySet instance with the ordering changed.
"""
assert self.query.can_filter(), \
"Cannot reorder a query once a slice has been taken."
obj = self._clone()
obj.query.clear_ordering()
obj.query.add_ordering(*field_names)
return obj
def distinct(self, true_or_false=True):
"""
Returns a new QuerySet instance that will select only distinct results.
"""
obj = self._clone()
obj.query.distinct = true_or_false
return obj
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""
Adds extra SQL fragments to the query.
"""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
clone = self._clone()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""
Reverses the ordering of the QuerySet.
"""
clone = self._clone()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
###################
# PRIVATE METHODS #
###################
def _clone(self, klass=None, setup=False, **kwargs):
if klass is None:
klass = self.__class__
query = self.query.clone()
if self._sticky_filter:
query.filter_is_sticky = True
c = klass(model=self.model, query=query)
c.__dict__.update(kwargs)
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
def _fill_cache(self, num=None):
"""
Fills the result cache with 'num' more entries (or until the results
iterator is exhausted).
"""
if self._iter:
try:
for i in range(num or ITER_CHUNK_SIZE):
self._result_cache.append(self._iter.next())
except StopIteration:
self._iter = None
def _next_is_sticky(self):
"""
Indicates that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""
Checks that we are merging two comparable QuerySet classes. By default
this does nothing, but see the ValuesQuerySet for an example of where
it's useful.
"""
pass
class ValuesQuerySet(QuerySet):
def __init__(self, *args, **kwargs):
super(ValuesQuerySet, self).__init__(*args, **kwargs)
# select_related isn't supported in values(). (FIXME -#3358)
self.query.select_related = False
# QuerySet.clone() will also set up the _fields attribute with the
# names of the model fields to select.
def iterator(self):
if (not self.extra_names and
len(self.field_names) != len(self.model._meta.fields)):
self.query.trim_extra_select(self.extra_names)
names = self.query.extra_select.keys() + self.field_names
for row in self.query.results_iter():
yield dict(zip(names, row))
def _setup_query(self):
"""
Constructs the field_names list that the values query will be
retrieving.
Called by the _clone() method after initializing the rest of the
instance.
"""
self.extra_names = []
if self._fields:
if not self.query.extra_select:
field_names = list(self._fields)
else:
field_names = []
for f in self._fields:
if self.query.extra_select.has_key(f):
self.extra_names.append(f)
else:
field_names.append(f)
else:
# Default to all fields.
field_names = [f.attname for f in self.model._meta.fields]
self.query.add_fields(field_names, False)
self.query.default_cols = False
self.field_names = field_names
def _clone(self, klass=None, setup=False, **kwargs):
"""
Cloning a ValuesQuerySet preserves the current fields.
"""
c = super(ValuesQuerySet, self)._clone(klass, **kwargs)
c._fields = self._fields[:]
c.field_names = self.field_names
c.extra_names = self.extra_names
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
def _merge_sanity_check(self, other):
super(ValuesQuerySet, self)._merge_sanity_check(other)
if (set(self.extra_names) != set(other.extra_names) or
set(self.field_names) != set(other.field_names)):
raise TypeError("Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__)
class ValuesListQuerySet(ValuesQuerySet):
def iterator(self):
self.query.trim_extra_select(self.extra_names)
if self.flat and len(self._fields) == 1:
for row in self.query.results_iter():
yield row[0]
elif not self.query.extra_select:
for row in self.query.results_iter():
yield tuple(row)
else:
# When extra(select=...) is involved, the extra cols come are
# always at the start of the row, so we need to reorder the fields
# to match the order in self._fields.
names = self.query.extra_select.keys() + self.field_names
for row in self.query.results_iter():
data = dict(zip(names, row))
yield tuple([data[f] for f in self._fields])
def _clone(self, *args, **kwargs):
clone = super(ValuesListQuerySet, self)._clone(*args, **kwargs)
clone.flat = self.flat
return clone
class DateQuerySet(QuerySet):
def iterator(self):
return self.query.results_iter()
def _setup_query(self):
"""
Sets up any special features of the query attribute.
Called by the _clone() method after initializing the rest of the
instance.
"""
self.query = self.query.clone(klass=sql.DateQuery, setup=True)
self.query.select = []
field = self.model._meta.get_field(self._field_name, many_to_many=False)
assert isinstance(field, DateField), "%r isn't a DateField." \
% field_name
self.query.add_date_select(field, self._kind, self._order)
if field.null:
self.query.add_filter(('%s__isnull' % field.name, False))
def _clone(self, klass=None, setup=False, **kwargs):
c = super(DateQuerySet, self)._clone(klass, False, **kwargs)
c._field_name = self._field_name
c._kind = self._kind
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
class EmptyQuerySet(QuerySet):
def __init__(self, model=None, query=None):
super(EmptyQuerySet, self).__init__(model, query)
self._result_cache = []
def __and__(self, other):
return self._clone()
def __or__(self, other):
return other._clone()
def count(self):
return 0
def delete(self):
pass
def _clone(self, klass=None, setup=False, **kwargs):
c = super(EmptyQuerySet, self)._clone(klass, **kwargs)
c._result_cache = []
return c
def iterator(self):
# This slightly odd construction is because we need an empty generator
# (it raises StopIteration immediately).
yield iter([]).next()
def get_cached_row(klass, row, index_start, max_depth=0, cur_depth=0,
requested=None):
"""
Helper function that recursively returns an object with the specified
related attributes already populated.
"""
if max_depth and requested is None and cur_depth > max_depth:
# We've recursed deeply enough; stop now.
return None
restricted = requested is not None
index_end = index_start + len(klass._meta.fields)
fields = row[index_start:index_end]
if not [x for x in fields if x is not None]:
# If we only have a list of Nones, there was not related object.
return None, index_end
obj = klass(*fields)
for f in klass._meta.fields:
if not select_related_descend(f, restricted, requested):
continue
if restricted:
next = requested[f.name]
else:
next = None
cached_row = get_cached_row(f.rel.to, row, index_end, max_depth,
cur_depth+1, next)
if cached_row:
rel_obj, index_end = cached_row
setattr(obj, f.get_cache_name(), rel_obj)
return obj, index_end
def delete_objects(seen_objs):
"""
Iterate through a list of seen classes, and remove any instances that are
referred to.
"""
try:
ordered_classes = seen_objs.keys()
except CyclicDependency:
# If there is a cyclic dependency, we cannot in general delete the
# objects. However, if an appropriate transaction is set up, or if the
# database is lax enough, it will succeed. So for now, we go ahead and
# try anyway.
ordered_classes = seen_objs.unordered_keys()
obj_pairs = {}
for cls in ordered_classes:
items = seen_objs[cls].items()
items.sort()
obj_pairs[cls] = items
# Pre-notify all instances to be deleted.
for pk_val, instance in items:
signals.pre_delete.send(sender=cls, instance=instance)
pk_list = [pk for pk,instance in items]
del_query = sql.DeleteQuery(cls, connection)
del_query.delete_batch_related(pk_list)
update_query = sql.UpdateQuery(cls, connection)
for field, model in cls._meta.get_fields_with_model():
if (field.rel and field.null and field.rel.to in seen_objs and
filter(lambda f: f.column == field.column,
field.rel.to._meta.fields)):
if model:
sql.UpdateQuery(model, connection).clear_related(field,
pk_list)
else:
update_query.clear_related(field, pk_list)
# Now delete the actual data.
for cls in ordered_classes:
items = obj_pairs[cls]
items.reverse()
pk_list = [pk for pk,instance in items]
del_query = sql.DeleteQuery(cls, connection)
del_query.delete_batch(pk_list)
# Last cleanup; set NULLs where there once was a reference to the
# object, NULL the primary key of the found objects, and perform
# post-notification.
for pk_val, instance in items:
for field in cls._meta.fields:
if field.rel and field.null and field.rel.to in seen_objs:
setattr(instance, field.attname, None)
signals.post_delete.send(sender=cls, instance=instance)
setattr(instance, cls._meta.pk.attname, None)
transaction.commit_unless_managed()
def insert_query(model, values, return_id=False, raw_values=False):
"""
Inserts a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented. It is not
part of the public API.
"""
query = sql.InsertQuery(model, connection)
query.insert_values(values, raw_values)
return query.execute_sql(return_id)
| {
"content_hash": "11420ec277ec0673b75d63324e04ecd5",
"timestamp": "",
"source": "github",
"line_count": 884,
"max_line_length": 127,
"avg_line_length": 35.3235294117647,
"alnum_prop": 0.5668993787228591,
"repo_name": "Shrews/PyGerrit",
"id": "2ff1c26344f0b279e2f9bba8a0026acb9cbfd11d",
"size": "31226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webapp/django/db/models/query.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "157968"
},
{
"name": "JavaScript",
"bytes": "181665"
},
{
"name": "Python",
"bytes": "3224616"
},
{
"name": "Shell",
"bytes": "6903"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class FirstParameterGroup(Model):
"""
Additional parameters for a set of operations, such as:
parameterGrouping_postMultiParamGroups,
parameterGrouping_postSharedParameterGroupObject.
:param header_one:
:type header_one: str
:param query_one: Query parameter with default. Default value: 30 .
:type query_one: int
"""
def __init__(self, header_one=None, query_one=30):
self.header_one = header_one
self.query_one = query_one
| {
"content_hash": "42512bf922a74d73c36dadaefaaf991d",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 71,
"avg_line_length": 29.166666666666668,
"alnum_prop": 0.6952380952380952,
"repo_name": "csmengwan/autorest",
"id": "172a4349ff36cb3683804a5ac5075f95dbbd1193",
"size": "999",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "AutoRest/Generators/Python/Azure.Python.Tests/Expected/AcceptanceTests/AzureParameterGrouping/autorestparametergroupingtestservice/models/first_parameter_group.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "13761"
},
{
"name": "C#",
"bytes": "10517556"
},
{
"name": "CSS",
"bytes": "110"
},
{
"name": "HTML",
"bytes": "274"
},
{
"name": "Java",
"bytes": "4684473"
},
{
"name": "JavaScript",
"bytes": "4658203"
},
{
"name": "PowerShell",
"bytes": "5703"
},
{
"name": "Python",
"bytes": "2237671"
},
{
"name": "Ruby",
"bytes": "232025"
},
{
"name": "Shell",
"bytes": "142"
},
{
"name": "TypeScript",
"bytes": "179577"
}
],
"symlink_target": ""
} |
import collections
from supriya import CalculationRate
from supriya.ugens.UGen import UGen
class Wrap(UGen):
"""
Wraps a signal outside given thresholds.
::
>>> source = supriya.ugens.SinOsc.ar()
>>> wrap = supriya.ugens.Wrap.ar(
... maximum=0.9,
... minimum=0.1,
... source=source,
... )
>>> wrap
Wrap.ar()
"""
### CLASS VARIABLES ###
__documentation_section__ = "Trigger Utility UGens"
_ordered_input_names = collections.OrderedDict(
[("source", 0), ("minimum", 0), ("maximum", 1)]
)
_valid_calculation_rates = (
CalculationRate.AUDIO,
CalculationRate.CONTROL,
CalculationRate.SCALAR,
)
| {
"content_hash": "2684a1d3bc7a26f83c2bc88ee14dba68",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 55,
"avg_line_length": 20.86111111111111,
"alnum_prop": 0.5499334221038615,
"repo_name": "Pulgama/supriya",
"id": "0d744e20b5cd6965c297ea8079aceb68964192b8",
"size": "751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "supriya/ugens/Wrap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6712"
},
{
"name": "CSS",
"bytes": "446"
},
{
"name": "HTML",
"bytes": "1083"
},
{
"name": "JavaScript",
"bytes": "6163"
},
{
"name": "Makefile",
"bytes": "6775"
},
{
"name": "Python",
"bytes": "2790612"
},
{
"name": "Shell",
"bytes": "569"
}
],
"symlink_target": ""
} |
"""
make-release
~~~~~~~~~~~~
Helper script that performs a release. Does pretty much everything
automatically for us.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
import re
from datetime import datetime, date
from subprocess import Popen, PIPE
_date_clean_re = re.compile(r'(\d+)(st|nd|rd|th)')
def installed_libraries():
return Popen(['pip', 'freeze'], stdout=PIPE).communicate()[0]
def has_library_installed(library):
return library + '==' in installed_libraries()
def parse_changelog():
with open('CHANGES') as f:
lineiter = iter(f)
for line in lineiter:
match = re.search('^Version\s+(.*)', line.strip())
if match is None:
continue
version = match.group(1).strip()
if lineiter.next().count('-') != len(line.strip()):
fail('Invalid hyphen count below version line: %s', line.strip())
while 1:
released = lineiter.next().strip()
if released:
break
match = re.search(r'Released (\w+\s+\d+\w+\s+\d+)', released)
if match is None:
fail('Could not find release date in version %s' % version)
datestr = parse_date(match.group(1).strip())
return version, datestr
def bump_version(version):
try:
parts = map(int, version.split('.'))
except ValueError:
fail('Current version is not numeric')
parts[-1] += 1
return '.'.join(map(str, parts))
def parse_date(string):
string = _date_clean_re.sub(r'\1', string)
return datetime.strptime(string, '%B %d %Y')
def set_filename_version(filename, version_number, pattern):
changed = []
def inject_version(match):
before, old, after = match.groups()
changed.append(True)
return before + version_number + after
with open(filename) as f:
contents = re.sub(r"^(\s*%s\s*=\s*')(.+?)(')(?sm)" % pattern,
inject_version, f.read())
if not changed:
fail('Could not find %s in %s', pattern, filename)
with open(filename, 'w') as f:
f.write(contents)
def set_init_version(version):
info('Setting flask_rq.py version to %s', version)
set_filename_version('flask_rq.py', version, '__version__')
def set_setup_version(version):
info('Setting setup.py version to %s', version)
set_filename_version('setup.py', version, 'version')
def set_docs_version(version):
info('Setting docs/conf.py version to %s', version)
set_filename_version('docs/conf.py', version, 'version')
def build_and_upload():
Popen([sys.executable, 'setup.py', 'sdist', 'build_sphinx', 'upload', 'upload_sphinx']).wait()
def fail(message, *args):
print >> sys.stderr, 'Error:', message % args
sys.exit(1)
def info(message, *args):
print >> sys.stderr, message % args
def get_git_tags():
return set(Popen(['git', 'tag'], stdout=PIPE).communicate()[0].splitlines())
def git_is_clean():
return Popen(['git', 'diff', '--quiet']).wait() == 0
def make_git_commit(message, *args):
message = message % args
Popen(['git', 'commit', '-am', message]).wait()
def make_git_tag(tag):
info('Tagging "%s"', tag)
Popen(['git', 'tag', '-a', tag, '-m', '%s release' % tag]).wait()
Popen(['git', 'push', '--tags']).wait()
def update_version(version):
for f in [set_init_version, set_setup_version, set_docs_version]:
f(version)
def get_branches():
return set(Popen(['git', 'branch'], stdout=PIPE).communicate()[0].splitlines())
def branch_is(branch):
return '* ' + branch in get_branches()
def main():
os.chdir(os.path.join(os.path.dirname(__file__), '..'))
rv = parse_changelog()
if rv is None:
fail('Could not parse changelog')
version, release_date = rv
tags = get_git_tags()
for lib in ['Sphinx', 'Sphinx-PyPI-upload']:
if not has_library_installed(lib):
fail('Build requires that %s be installed', lib)
if version in tags:
fail('Version "%s" is already tagged', version)
if release_date.date() != date.today():
fail('Release date is not today')
if not branch_is('master'):
fail('You are not on the master branch')
if not git_is_clean():
fail('You have uncommitted changes in git')
info('Releasing %s (release date %s)',
version, release_date.strftime('%d/%m/%Y'))
update_version(version)
make_git_commit('Bump version number to %s', version)
make_git_tag(version)
build_and_upload()
if __name__ == '__main__':
main()
| {
"content_hash": "2407c029533a856dd1e57217f0964c22",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 98,
"avg_line_length": 25.331550802139038,
"alnum_prop": 0.5913025121384843,
"repo_name": "xen/flask-rq",
"id": "54fe0a57ea87d4399d642db7ef23f4648e610993",
"size": "4783",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/release.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24171"
}
],
"symlink_target": ""
} |
"""
This is the module that defines the invoke namespace.
It is imported by :func:`atelier.invlib.setup_from_tasks` which passes
it to :func:`invoke.Collection.from_module`.
"""
import os
import sys
import glob
import time
import datetime
from datetime import timedelta
import shutil
from atelier.utils import i2d
from babel.dates import format_date
import rstgen
from atelier.projects import load_projects
from pathlib import Path
try:
from invoke import ctask as task # , tasks
# before version 0.13 (see http://www.pyinvoke.org/changelog.html)
except ImportError:
from invoke import task # , tasks
from invoke.exceptions import Exit
from invoke import run
import atelier
from atelier.utils import confirm, cd
from .utils import must_confirm
LASTREL_INFO = "Last PyPI release was %(filename)s \
(%(upload_time)s, %(downloads)d downloads)."
RELEASE_CONFIRM = """
This is going to officially release %(name)s %(version)s to PyPI.
It will fail if version %(version)s of %(name)s has previously been released.
Your `docs/changes.rst` should have a section about this version.
Your working directory should be clean (otherwise answer 'n' and run `inv ci`).
Are you sure?"""
def local(*args, **kwargs): # probably no longer used
"""Call :func:`invoke.run` with `pty=True
<http://www.pyinvoke.org/faq.html#why-is-my-command-behaving-differently-under-invoke-versus-being-run-by-hand>`_.
This is useful e.g. to get colors in a terminal.
"""
kwargs.update(pty=True)
# kwargs.update(encoding='utf-8')
run(*args, **kwargs)
def get_current_date(today=None):
"""
"""
if today is None:
# return datetime.datetime.utcnow()
return datetime.date.today()
return i2d(today)
def rmtree_after_confirm(p, batch=False):
if not p.exists():
return
if batch or confirm(
"OK to remove %s and everything under it?" % p.absolute()):
shutil.rmtree(p)
def cleanup_pyc(p, batch=False): # no longer used
"""Thanks to oddthinking on http://stackoverflow.com/questions/2528283
"""
for root, dirs, files in os.walk(p):
pyc_files = [filename for filename in files if filename.endswith(".pyc")]
py_files = set([filename for filename in files if filename.endswith(".py")])
excess_pyc_files = [pyc_filename for pyc_filename in pyc_files if pyc_filename[:-1] not in py_files]
for excess_pyc_file in excess_pyc_files:
full_path = os.path.join(root, excess_pyc_file)
if batch or confirm("Remove excess file %s:" % full_path):
os.remove(full_path)
def sphinx_clean(ctx, batch=False):
"""Delete all generated Sphinx files.
"""
for b in atelier.current_project.get_doc_trees():
if b.src_path:
rmtree_after_confirm(b.src_path / '.doctrees', batch)
rmtree_after_confirm(b.out_path, batch)
def py_clean(ctx, batch=False):
"""
Delete :xfile:`.pyc` files, :xfile:`.eggs` and :xfile:`__cache__`
directories under the project's root direcotory.
"""
paths = []
for root, dirs, files in os.walk(ctx.root_dir):
p = Path(root) / '__pycache__'
if p.exists():
paths.append(p)
if len(paths):
if batch or confirm(
"Remove {0} __pycache__ directories".format(len(paths))):
for p in paths:
rmtree_after_confirm(p, True)
for root, dirs, files in os.walk(ctx.root_dir):
for fn in files:
if fn.endswith(".pyc"):
full_path = os.path.join(root, fn)
if batch or confirm("Remove file %s:" % full_path):
os.remove(full_path)
# cleanup_pyc(ctx.root_dir, batch)
# if atelier.current_project.main_package is not None:
# try:
# p = Path(atelier.current_project.main_package.__file__).parent
# cleanup_pyc(atelier.current_project.root_dir, batch)
# except AttributeError:
# # happened 20170310 in namespace package:
# # $ pywhich commondata
# # Traceback (most recent call last):
# # File "<string>", line 1, in <module>
# # AttributeError: 'module' object has no attribute '__file__'
# pass
p = ctx.root_dir / '.eggs'
if p.exists():
rmtree_after_confirm(p, batch)
files = []
for pat in ctx.cleanable_files:
for p in glob.glob(os.path.join(ctx.root_dir, pat)):
files.append(p)
if len(files):
if batch or confirm(
"Remove {0} cleanable files".format(len(files))):
for p in files:
os.remove(p)
class RstFile(object):
def __init__(self, local_root, url_root, parts):
self.path = local_root / ('/'.join(parts) + '.rst')
self.url = url_root + "/" + "/".join(parts) + '.html'
# if parts[0] == 'docs':
# self.url = url_root + "/" + "/".join(parts[1:]) + '.html'
# else:
# raise Exception("20131125")
# self.url = url_root + "/" + "/".join(parts) + '.html'
class MissingConfig(Exception):
def __init__(self, name):
msg = "Must set `config.{0}` in `tasks.py`!"
msg = msg.format(name)
Exception.__init__(self, msg)
@task(name='test')
def run_tests(ctx):
"""Run the test suite of this project."""
# assert os.environ['COVERAGE_PROCESS_START']
cmd = ctx.test_command
if cmd:
print("Run test command {0} :".format(cmd))
with ctx.cd(ctx.root_dir):
ctx.run(cmd, pty=True)
# if (ctx.root_dir / 'tox.ini').exists():
# ctx.run("REQ_VERSION=local tox", pty=True)
# elif (ctx.root_dir / 'setup.py').exists():
# ctx.run(sys.executable + ' setup.py -q test', pty=True)
@task(name='readme')
def write_readme(ctx):
"""Generate or update `README.txt` or `README.rst` file from `SETUP_INFO`. """
if not atelier.current_project.main_package:
return
atelier.current_project.load_info()
info = atelier.current_project.SETUP_INFO
if not info.get('long_description'):
return
# if len(ctx.doc_trees) == 0:
# # when there are no docs, then the README file is manually maintained
# return
if ctx.revision_control_system == 'git':
readme = ctx.root_dir / 'README.rst'
else:
readme = ctx.root_dir / 'README.txt'
# for k in ('name', 'description', 'long_description', 'url'):
# if k not in env.current_project.SETUP_INFO:
# msg = "SETUP_INFO for {0} has no key '{1}'"
# raise Exception(msg.format(env.current_project, k))
title = rstgen.header(1, "The ``{}`` package".format(info['name']))
txt = """\
{title}
{long_description}
""".format(title=title, **info)
if readme.exists() and readme.read_text() == txt:
return
must_confirm("Overwrite %s" % readme.absolute())
readme.write_text(txt)
docs_index = ctx.root_dir / 'docs' / 'index.rst'
if docs_index.exists():
mtime = time.time()
os.utime(docs_index, (mtime, mtime))
@task(write_readme, name='bd')
def build_docs(ctx, only=None):
"""Build docs. Build all Sphinx HTML doctrees for this project. """
# print("Build docs for {}".format(atelier.current_project))
cmd = ctx.build_docs_command
if cmd:
print("-" * 80)
print("Run build_docs_command {0} :".format(cmd))
ctx.run(cmd, pty=True)
else:
for tree in atelier.current_project.get_doc_trees():
if tree.src_path:
if only is None or tree.rel_path == only:
tree.build_docs(ctx)
@task(name='clean')
def clean(ctx, batch=False):
"""Remove temporary and generated files."""
# def clean(ctx, *cmdline_args):
sphinx_clean(ctx, batch)
py_clean(ctx, batch)
# clean_demo_caches()
@task(name='sdist')
def setup_sdist(ctx):
"Create a source distribution."
atelier.current_project.load_info()
info = atelier.current_project.SETUP_INFO
if not info.get('version'):
return
if not info.get('name'):
return
show_pypi_status(ctx, False)
dist_dir = ctx.sdist_dir.format(prj=info.get('name'))
args = [sys.executable, "setup.py"]
args += ["sdist", "--formats=gztar"]
args += ["--dist-dir", dist_dir]
ctx.run(' '.join(args), pty=True)
@task(name='release', help={
'branch': "Create a version branch."})
def pypi_release(ctx, branch=False):
"""
Publish a new version to PyPI.
See http://atelier.lino-framework.org/invlib.html for details.
"""
atelier.current_project.load_info()
info = atelier.current_project.SETUP_INFO
if not info.get('version'):
return
version = info['version']
# dist_dir = Path(ctx.sdist_dir) / info['name']
dist_dir = ctx.sdist_dir.format(prj=info.get('name'))
dist_dir += "/{name}-{version}.tar.gz".format(**info)
show_revision_status(ctx)
show_pypi_status(ctx, True)
must_confirm(RELEASE_CONFIRM % info)
# args = [sys.executable, "setup.py"]
# args += ["sdist", "--formats=gztar"]
# args += ["--dist-dir", dist_dir]
# args += ["upload"]
# sdist_cmd = ' '.join(args)
args = ["twine", "upload"]
# args +=["--repository-url",""]
args += [dist_dir]
sdist_cmd = ' '.join(args)
if ctx.revision_control_system == 'git' and branch:
msg = "You might want to ignore this and manually run:\n{}".format(
sdist_cmd)
tag_name = "v{}".format(version)
args = ["git", "branch", tag_name]
# args = ["git", "tag"]
# args += ["-a", tag_name]
# args += ["-m", "'Release %(name)s %(version)s.'" % info]
res = ctx.run(' '.join(args), pty=True, warn=True)
if res.exited:
print(msg)
return
args = ["git", "push", "origin", tag_name]
res = ctx.run(' '.join(args), pty=True, warn=True)
if res.exited:
print(msg)
return
# pypi_register(ctx)
ctx.run(sdist_cmd, pty=True)
@task(name='test_sdist')
def test_sdist(ctx):
"""Install a previously created sdist into a temporary virtualenv and
run test suite.
"""
info = atelier.current_project.SETUP_INFO
if not info.get('version'):
return
# from atelier.projects import load_projects
# projects = [p for p in load_projects() if p.SETUP_INFO.get('version') and p['name'] != info['name']]
with cd(ctx.root_dir):
ctx.run("rm -Rf tmp/tmp", pty=True)
ctx.run("virtualenv tmp/tmp", pty=True)
activate = ". tmp/tmp/bin/activate"
def vrun(cmd):
cmd = activate + ';' + cmd
ctx.run(cmd, pty=True)
vrun("pip install --download {0} {1}".format(ctx.pypi_dir, info['name']))
# DEPRECATION: pip install --download has been deprecated and will be removed in the future. Pip now has a download command that should be used instead.
# vrun("pip download {0}".format(info['name']))
vrun("pip install --no-allow-external --no-index --no-cache-dir -f {} -f {} {}".format(
ctx.sdist_dir, ctx.pypi_dir, info['name']))
# vrun("pip install -f {0} {1}".format(ctx.sdist_dir, info['name'])
vrun("inv test")
@task(name='mm')
def make_messages(ctx):
"Extract messages, then initialize and update all catalogs."
extract_messages(ctx)
init_catalog_code(ctx)
update_catalog_code(ctx)
for tree in atelier.current_project.get_doc_trees():
tree.make_messages(ctx)
# if False:
# pass
# extract_messages_userdocs()
# setup_babel_userdocs('init_catalog')
# setup_babel_userdocs('update_catalog')
@task(name='register')
def pypi_register(ctx):
"""Register this project (and its current version) to PyPI. """
args = [sys.executable, "setup.py"]
args += ["register"]
ctx.run(' '.join(args), pty=True)
@task(name='ci')
def checkin(ctx, today=None):
"""Checkin and push to repository, using today's blog entry as commit message."""
if ctx.revision_control_system is None:
return
if ctx.revision_control_system == 'git':
from git import Repo
repo = Repo(ctx.root_dir)
if not repo.is_dirty():
print("No changes to commit in {0}.".format(ctx.root_dir))
return
show_revision_status(ctx)
today = get_current_date(today)
entry = get_blog_entry(ctx, today)
if not entry.path.exists():
quit("%s does not exist!" % entry.path.absolute())
msg = entry.url
if not confirm("OK to checkin %s %s?" % (ctx.project_name, msg)):
return
# ~ puts("Commit message refers to %s" % entry.absolute())
if ctx.revision_control_system == 'hg':
args = ["hg", "ci"]
else:
args = ["git", "commit", "-a"]
args += ['-m', msg]
cmd = ' '.join(args)
ctx.run(cmd, pty=True)
if ctx.revision_control_system == 'hg':
ctx.run("hg push %s" % ctx.project_name, pty=True)
else:
ctx.run("git push", pty=True)
@task(name='blog')
def edit_blog_entry(ctx, today=None):
"""Edit today's blog entry, create an empty file if it doesn't yet exist.
:today: Useful when a working day lasted longer than midnight, or
when you start some work in the evening, knowing that you
won't commit it before the next morning. Note that you
must specify the date using the YYYYMMDD format.
Usage example::
$ inv blog -t 20150727
"""
if not ctx.editor_command:
raise MissingConfig("editor_command")
today = get_current_date(today)
entry = get_blog_entry(ctx, today)
if not entry.path.exists():
if ctx.languages is None:
# txt = today.strftime(ctx.long_date_format)
lng = 'en'
else:
lng = ctx.languages[0]
txt = format_date(today, format='full', locale=lng)
txt = txt[0].upper() + txt[1:] # estonian weekdays
content = rstgen.header(1, txt)
content = ":date: {0}\n\n".format(today) + content
msg = "{0}\nCreate file {1}?".format(content, entry.path)
if not confirm(msg):
return
# for every year we create a new directory.
yd = entry.path.parent
if not yd.exists():
if not confirm("Happy New Year! Create directory %s?" % yd):
return
yd.mkdir()
txt = ".. blogger_year::\n"
(yd / 'index.rst').write_text(txt)
entry.path.write_text(content)
# touch it for Sphinx:
# (entry.path.parent / 'index.rst').set_times()
args = [ctx.editor_command.format(entry.path)]
args += [str(entry.path)]
# raise Exception("20160324 %s", args)
ctx.run(' '.join(args), pty=False)
@task(name='pd')
def publish(ctx, only=None):
"""Publish docs. Upload docs to public web server. """
if not ctx.docs_rsync_dest:
raise MissingConfig("docs_rsync_dest")
for tree in atelier.current_project.get_doc_trees():
if tree.src_path:
if only is None or tree.rel_path == only:
tree.publish_docs(ctx)
def show_revision_status(ctx):
if ctx.revision_control_system == 'hg':
args = ["hg", "st"]
elif ctx.revision_control_system == 'git':
args = ["git", "status"]
else:
print("Invalid revision_control_system %r !" %
ctx.revision_control_system)
return
print("-" * 80)
ctx.run(' '.join(args), pty=True)
print("-" * 80)
def show_pypi_status(ctx, severe=True):
"""Show project status on PyPI before doing a release.
"""
info = atelier.current_project.SETUP_INFO
version = info['version']
name = info['name']
assert name and version
from xmlrpc.client import ServerProxy
# thanks to https://github.com/pypa/warehouse/issues/8753
class RateLimitedServerProxy(ServerProxy):
def __getattr__(self, name):
time.sleep(1)
return super(RateLimitedServerProxy, self).__getattr__(name)
client = RateLimitedServerProxy('https://pypi.python.org/pypi')
released_versions = client.package_releases(name)
if len(released_versions) == 0:
print("No PyPI release of %(name)s has been done so far." % info)
else:
urls = client.release_urls(name, released_versions[-1])
if len(urls) == 0:
msg = "Last PyPI release was {0} (no files available)."
msg = msg.format(released_versions[-1])
print(msg)
else:
lastrel = urls[-1]
# dt = lastrel['upload_time']
# lastrel['upload_time'] = dt.ISO()
print(LASTREL_INFO % lastrel)
if severe and version in released_versions:
raise Exit(
"ABORT: %(name)s %(version)s has already been "
"released." % info)
def get_blog_entry(ctx, today):
"""Return an RstFile object representing the blog entry for that date
in the current project.
"""
parts = ('blog', str(today.year), today.strftime("%m%d"))
return RstFile(Path(ctx.blog_root), ctx.blogref_url, parts)
def get_locale_dir(ctx):
locale_dir = ctx.locale_dir
if locale_dir is None:
return
return Path(locale_dir)
# @task(name='bh')
# def build_help_texts(ctx):
# """Build help_texts.py file for this project."""
# if not ctx.help_texts_source:
# return
# src_dir = ctx.help_texts_source
# dest_dir = Path(ctx.build_dir_name)
# args = ["sphinx-build", "-b", "help_texts"]
# args += [src_dir, dest_dir]
# cmd = ' '.join(args)
# ctx.run(cmd, pty=True)
def extract_messages(ctx):
"""Extract messages from source files to `django.pot` file"""
ld = get_locale_dir(ctx)
if not ld:
return
args = [sys.executable, "setup.py"]
args += ["extract_messages"]
args += ["-o", str(ld / "django.pot")]
cmd = ' '.join(args)
# ~ must_confirm(cmd)
ctx.run(cmd, pty=True)
def init_catalog_code(ctx):
"""Create code .po files if necessary."""
from lino.core.site import to_locale
ld = get_locale_dir(ctx)
if not ld:
return
for loc in ctx.languages:
if loc != 'en':
f = ld / (loc + '/LC_MESSAGES/django.po')
if f.exists():
print("Skip {} because file exists.".format(f))
else:
args = [sys.executable, "setup.py"]
args += ["init_catalog"]
args += ["--domain django"]
args += ["-l", to_locale(loc)]
args += ["-d", str(ld)]
# ~ args += [ "-o" , f ]
args += ["-i", str(ld / 'django.pot')]
cmd = ' '.join(args)
must_confirm(cmd)
ctx.run(cmd, pty=True)
def update_catalog_code(ctx):
"""Update .po files from .pot file."""
from lino.core.site import to_locale
ld = get_locale_dir(ctx)
if not ld:
return
for loc in ctx.languages:
if loc != ctx.languages[0]:
args = [sys.executable, "setup.py"]
args += ["update_catalog"]
args += ["--domain django"]
args += ["-o", str(ld / to_locale(loc) / 'LC_MESSAGES' / 'django.po')]
args += ["-i", str(ld / "django.pot")]
args += ["-l", to_locale(loc)]
cmd = ' '.join(args)
# ~ must_confirm(cmd)
ctx.run(cmd, pty=True)
# @task(name='ls')
# def list_projects(ctx, *cmdline_args):
# """List your projects."""
def git_projects():
for prj in load_projects():
prj.load_info()
if prj.config['revision_control_system'] == 'git':
yield prj
@task(name='ct')
def commited_today(ctx, today=None):
"""Print all today's commits to stdout."""
from git import Repo
list_options = dict()
if True:
today = get_current_date(today)
ONEDAY = timedelta(days=1)
yesterday = today - ONEDAY
tomorrow = today + ONEDAY
list_options.update(
after=yesterday.strftime("%Y-%m-%d"),
before=tomorrow.strftime("%Y-%m-%d"))
if False:
list_options.update(max_count=5)
rows = []
def load(prj):
# prj.load_info()
# repo = Repo(cfg['root_dir'])
repo = Repo(prj.root_dir)
it = list(repo.iter_commits(**list_options))
if len(it) == 0:
# print("20160816 no commits in {}".format(prj.nickname))
return
def fmtcommit(c):
url = repo.remotes.origin.url
if url.startswith("git@github.com"):
url = "https://github.com/" + url[15:-4] \
+ "/commit/" + c.hexsha
elif url.startswith("git+ssh://git@github.com"):
url = "https://github.com/" + url[25:-4] \
+ "/commit/" + c.hexsha
s = "`{0} <{1}>`__".format(c.hexsha[-7:], url)
# if c.message and not c.message.startswith("http://"):
s += "\n({})".format(c.message.strip())
return s
# url = prj.SETUP_INFO.get('url', "oops")
# desc = "`%s <%s>`__" % (prj.name, url)
desc = "*{}*".format(prj.nickname)
for c in it:
# ts = time.strftime("%H:%M", time.gmtime(c.committed_date))
ts = time.strftime("%Y-%m-%d %H:%M", time.localtime(c.committed_date))
rows.append([ts, desc, fmtcommit(c)])
for p in git_projects():
load(p)
rows.sort(key=lambda a: a[0])
print(rstgen.ul(["{0} in {1}:\n{2}".format(*row) for row in rows]))
# print rstgen.table(headers, rows)
# @task(name='pull')
# def git_pull(ctx):
# """Run git pull if it is a git project."""
# from git import Repo
# for p in git_projects():
# with cd(p.root_dir):
from importlib import import_module
def run_in_demo_projects(ctx, py_cmd, cov=False, bare=False):
"""
Run the given Python command line `py_cmd` in each demo project.
See also :attr:`ctx.demo_projects`.
"""
for dpname in ctx.demo_projects:
dpmodule = import_module(dpname)
pth = Path(dpmodule.__file__).parent
# join each demo project to root_dir to avoid failure when
# `inv prep` is invoked from a subdir of root.
with cd(pth):
if cov:
cmd = "coverage run --append " + py_cmd
datacovfile = ctx.root_dir / '.coverage'
if not datacovfile.exists():
print('No .coverage file in {0}'.format(ctx.project_name))
os.environ['COVERAGE_FILE'] = datacovfile
else:
cmd = sys.executable + ' ' + py_cmd
if not bare:
print("-" * 80)
print("Run in demo project {0}\n$ {1} :".format(dpname, cmd))
ctx.run(cmd, pty=True)
@task(name='install')
def configure(ctx):
"""Run `manage.py install` on every demo project."""
cmd = 'manage.py install --noinput'
run_in_demo_projects(ctx, cmd, bare=True)
@task(name='prep')
def prep(ctx, cov=False):
"""
Run preparation tasks that need to run before testing, but only once for all
tests.
"""
# if cov:
# covfile = ctx.root_dir / '.coveragerc'
# if not covfile.exists():
# raise Exception('No .coveragerc file in {0}'.format(
# ctx.project_name))
# # os.environ['COVERAGE_PROCESS_START'] = covfile
# ctx.run('coverage erase', pty=True)
cmd = ctx.prep_command
if cmd:
print("-" * 80)
print("Run main prep command {0} :".format(cmd))
ctx.run(cmd, pty=True)
cmd = ctx.demo_prep_command
if cmd:
run_in_demo_projects(ctx, cmd, cov=cov)
# @task(name='cov', pre=[run_tests])
@task(name='cov')
def run_tests_coverage(ctx, html=True, html_cov_dir='htmlcov'):
"""Run all tests and create a coverage report.
If there a directory named :xfile:`htmlcov` in your project's
`root_dir`, then it will write a html report into this directory
(overwriting any files without confirmation).
"""
# covfile = ctx.root_dir / '.coveragerc'
# if not covfile.exists():
# print('No .coveragerc file in {0}'.format(ctx.project_name))
# return
if not 'COVERAGE_PROCESS_START' in os.environ:
msg = "You must set COVERAGE_PROCESS_START before running `inv cov`!"
raise Exit(msg)
# test whether this Python installation is configured for coverage
from coverage import process_startup
if getattr(process_startup, "coverage", None) is None:
try:
import sitecustomize
fn = os.path.realpath(sitecustomize.__file__)
msg = "Please add the following to your {} file:".format(str(fn))
except ImportError:
msg = 'Please create a sitecustomize.py file with the following content:'
msg += """
try:
import coverage
except ImportError:
pass
else:
coverage.process_startup()"""
msg += "\nSee also https://coverage.readthedocs.io/en/coverage-4.3.4/subprocess.html"
raise Exit(msg)
ctx.run('coverage erase', pty=True)
print("Running {0} in {1} within coverage...".format(
ctx.coverage_command, ctx.project_name))
ctx.run('coverage run --parallel-mode {}'.format(
ctx.coverage_command), pty=True)
if False:
ctx.run('coverage combine', pty=True)
ctx.run('coverage report', pty=True)
if html:
pth = ctx.root_dir / html_cov_dir
print("Writing html report to {}".format(pth))
ctx.run('coverage html -d {}'.format(pth), pty=True)
if False:
ctx.run('open {}/index.html'.format(pth), pty=True)
print('{}/index.html has been generated.'.format(pth))
ctx.run('coverage erase', pty=True)
| {
"content_hash": "2a8f05e528f44dca250b79afd180986e",
"timestamp": "",
"source": "github",
"line_count": 810,
"max_line_length": 160,
"avg_line_length": 32.18641975308642,
"alnum_prop": 0.5769629089793257,
"repo_name": "lsaffre/atelier",
"id": "a557fd9a70ceb26e8fa983eed6cc4d2c2e480694",
"size": "26178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "atelier/invlib/tasks.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "584"
},
{
"name": "Python",
"bytes": "163112"
}
],
"symlink_target": ""
} |
from taiga.requestmaker import RequestMaker
from taiga.models import Role, Roles
import unittest
from mock import patch
class TestRoles(unittest.TestCase):
@patch('taiga.models.base.ListResource._new_resource')
def test_create_role(self, mock_new_resource):
rm = RequestMaker('/api/v1', 'fakehost', 'faketoken')
mock_new_resource.return_value = Role(rm)
sv = Roles(rm).create(1, 'RL 1')
mock_new_resource.assert_called_with(
payload={'project': 1, 'name': 'RL 1'}
)
| {
"content_hash": "e0c7fdb166e002ec8c8b05761623d66b",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 61,
"avg_line_length": 31.176470588235293,
"alnum_prop": 0.6622641509433962,
"repo_name": "jespino/python-taiga",
"id": "f1319755eacb0f68efa96d6834494233f256ac8d",
"size": "530",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/test_roles.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "102181"
},
{
"name": "Shell",
"bytes": "85"
}
],
"symlink_target": ""
} |
from functional import seq
ascii_control_chars = ''.join([chr(x) for x in range(0,32)])
invalid_filename_chars = set(":*?\"<>|/\\ \n" + ascii_control_chars)
def sanitize_anki_deck_name(name: str, replace_char='_'):
"""
Get name that conforms to fs standards from deck name
"""
return seq(list(name)) \
.map(lambda c: replace_char if c in invalid_filename_chars else c) \
.make_string('')
| {
"content_hash": "11ca7a5c154fb954a0ef0a062a2aac45",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 76,
"avg_line_length": 32.53846153846154,
"alnum_prop": 0.6288416075650118,
"repo_name": "Stvad/CrowdAnki",
"id": "3f40c1397ea411b827dde2de2c73062e0d3bff6a",
"size": "423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crowd_anki/utils/filesystem/name_sanitizer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5495"
},
{
"name": "Python",
"bytes": "152637"
},
{
"name": "Shell",
"bytes": "777"
}
],
"symlink_target": ""
} |
import seaborn as sns
import matplotlib.pyplot as plt
# Data Set
tips = sns.load_dataset('tips')
# total_bill | tip | sex | smoker | day | time | size
# 16.99 | 1.01 | Female | No | Sun | Dinner | 2
# Linear Model Plot
# scatter_kws parameter to adjust size of markers
sns.lmplot(x='total_bill', y='tip', data=tips, hue='sex', markers=['o','v'],
scatter_kws={'s':100})
# Create additional plots by column and/or row based on the categorical data selected
sns.lmplot(x='total_bill', y='tip', data=tips, col='sex',row='time')
# Aspect parameter is L x W
sns.lmplot(x='total_bill', y='tip', data=tips, col='day',hue='sex',
aspect=0.4, size=7)
plt.show() | {
"content_hash": "5ac2793907b5887ba63383e8cccdc482",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 85,
"avg_line_length": 32.23809523809524,
"alnum_prop": 0.654357459379616,
"repo_name": "leon-lei/learning-materials",
"id": "5aa4a4a766225ed71f23d3627e67bbab5a89cf1a",
"size": "677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data_science/seaborn_tutorials/seaborn_practice4_linear_model_plot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "748"
},
{
"name": "HTML",
"bytes": "2372434"
},
{
"name": "JavaScript",
"bytes": "9461"
},
{
"name": "Python",
"bytes": "98761"
}
],
"symlink_target": ""
} |
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
def configure_cache_dir(cache_dir):
r"""Setup menpobench by configuring a directory for caching datasets and
methods. datasets are generally large, so you will want to choose a
directory on a volume with a fairly large capacity (we recommend 20GB+).
Your preference will be saved at '~/.menpobenchrc', and used for future
uses of menpobench.
"""
from menpobench.config import save_custom_config
save_custom_config({'cache_dir': cache_dir})
def configure_matlab_bin_path(matlab_bin_path):
r"""Setup menpobench by configuring the path to the Matlab executable.
Your preference will be saved at '~/.menpobenchrc', and used for future
uses of menpobench.
"""
from menpobench.config import save_custom_config
save_custom_config({'matlab_bin_path': matlab_bin_path})
from menpobench.utils import memoize
@memoize
def menpobench_dir():
r"""The path to the top of the menpobench Python package.
Useful for locating where the data folder is stored.
Returns
-------
path : ``pathlib.Path``
The full path to the top of the menpobench package
"""
from pathlib import Path # to avoid cluttering the menpo.base namespace
import os
return Path(os.path.abspath(__file__)).parent
del memoize # clean up namespace
def predefined_dir():
return menpobench_dir() / 'predefined'
from .base import invoke_benchmark
| {
"content_hash": "9244b308b12b76d3436274b798b2d9bc",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 76,
"avg_line_length": 28.433962264150942,
"alnum_prop": 0.7073656270736562,
"repo_name": "nontas/menpobench",
"id": "272a2622ad297a6204f762784b2e2e2192cac978",
"size": "1507",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "menpobench/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "145"
},
{
"name": "Matlab",
"bytes": "19203"
},
{
"name": "Python",
"bytes": "170827"
},
{
"name": "Shell",
"bytes": "119"
}
],
"symlink_target": ""
} |
"""
fileiobase provides general purpose NMR file IO functions and classes
used by multiple nmrglue.fileio modules.
"""
from __future__ import division
import os
import string
import sys
import itertools
from functools import reduce
import numpy as np
def create_blank_udic(ndim):
"""
Create a blank universal dictionary for a spectrum of dimension ndim.
"""
udic = dict()
udic["ndim"] = ndim
for i in range(ndim):
d = dict()
d["sw"] = 999.99 # spectral width in Hz
d["complex"] = True # Quadrature, True when dimension is complex
d["obs"] = 999.99 # Observation frequency in MHz
d["car"] = 999.99 # Carrier frequency in Hz
# Number of points in dimension based on the shape of the data array.
# As such the direct dimension (-1) size is R|I, all indirect
# dimensions are R+I
d["size"] = 1
d["label"] = ["X", "Y", "Z", "A"][i] # name of dimension
# encoding of dimension, ie states, tppi, etc. The direct dimension
# should be listed as direct.
if i == ndim - 1:
d["encoding"] = "direct"
else:
d["encoding"] = "states"
# time and freq flags for domain of dimension
d["time"] = True
d["freq"] = False
udic[i] = d
return udic
class unit_conversion():
"""
Provides methods to convert between common NMR units
Parameters
----------
size : int
Number of points in dimension (R|I).
cplex : bool
True if dimension is complex, False is real.
sw : float
Spectral width in Hz.
obs : float
Observation frequency in MHz.
car : float
Carrier frequency in Hz.
"""
def __init__(self, size, cplx, sw, obs, car):
"""
create and set up a unit_conversion object
"""
# fundamental units
self._size = int(size)
self._cplx = bool(cplx)
self._sw = float(sw)
self._obs = float(obs)
self._car = float(car)
# derived units (these are in ppm)
self._delta = -self._sw / (self._size * self._obs)
self._first = self._car / self._obs - self._delta * self._size / 2.
# individual unit conversion functions
def __percent2pts(self, percent):
return percent * (self._size - 1) / 100.0
def __pts2percent(self, pts):
return pts * 100 / (self._size - 1.0)
def __hz2pts(self, hz):
return ((hz / self._obs) - self._first) / self._delta
def __pts2hz(self, pts):
return (pts * self._delta + self._first) * self._obs
def __ppm2pts(self, ppm):
return (ppm - self._first) / self._delta
def __pts2ppm(self, pts):
return (pts * self._delta) + self._first
# times based units: seconds, ms, and us
def __sec2pts(self, sec):
return sec * self._sw
def __pts2sec(self, pts):
return pts * 1. / self._sw
def __ms2pts(self, ms):
return ms * self._sw / 1.e3
def __pts2ms(self, pts):
return pts * 1.e3 / self._sw
def __us2pts(self, us):
return us * self._sw / 1.e6
def __pts2us(self, pts):
return pts * 1.e6 / self._sw
# routers
def __unit2pnt(self, val, units):
"""
Convert units to points
"""
units = units.upper()
if units == "PPM":
pts = self.__ppm2pts(val)
elif units == "HZ":
pts = self.__hz2pts(val)
elif units == "%" or units == "PERCENT":
pts = self.__percent2pts(val)
elif units == "SEC" or units == "SECOND" or units == "S":
pts = self.__sec2pts(val)
elif units == "MS":
pts = self.__ms2pts(val)
elif units == "US":
pts = self.__us2pts(val)
else:
raise ValueError("invalid unit type")
# if self._cplx:
# return pts+round(pts)
# else:
return pts
def __pnt2unit(self, val, units):
"""
Convert points to units
"""
units = units.upper()
# if self._cplx:
# val = val-round(val)
if units == "PPM":
k = self.__pts2ppm(val)
elif units == "HZ":
k = self.__pts2hz(val)
elif units == "%" or units == "PERCENT":
k = self.__pts2percent(val)
elif units == "SEC" or units == "SECOND" or units == "S":
k = self.__pts2sec(val)
elif units == "MS":
k = self.__pts2ms(val)
elif units == "US":
k = self.__pts2us(val)
else:
raise ValueError("invalid units")
return k
def __str2pnt(self, s):
"""
Convert string with units to points
"""
units = s.strip(string.digits + string.whitespace + "." + "-").upper()
val = float(s.strip(string.ascii_letters + string.whitespace + "%"))
return self.__unit2pnt(val, units)
def __convert(self, val, unit=None):
"""
Convert string or value/unit pair
"""
if isinstance(val, str):
return self.__str2pnt(val)
else:
if unit is None:
raise ValueError("invalid unit type")
return self.__unit2pnt(val, unit)
# User functions
def f(self, val, unit=None):
"""
Convert string or value/unit pair to float
"""
return self.__convert(val, unit)
def i(self, val, unit=None):
"""
Convert string or value/unit pair to integer
"""
return int(round(self.__convert(val, unit)))
def ppm(self, val):
"""
Convert to ppm
"""
return self.__pnt2unit(val, "PPM")
def hz(self, val):
"""
Convert to Hz
"""
return self.__pnt2unit(val, "HZ")
def percent(self, val):
"""
Convert to percent
"""
return self.__pnt2unit(val, "PERCENT")
def seconds(self, val):
"""
Convert to seconds
"""
return self.__pnt2unit(val, "SEC")
def sec(self, val):
"""
Convert to seconds
"""
return self.__pnt2unit(val, "SEC")
def ms(self, val):
"""
Convert to milliseconds (ms)
"""
return self.__pnt2unit(val, "MS")
def us(self, val):
"""
Convert to microseconds (us)
"""
return self.__pnt2unit(val, "US")
def unit(self, val, unit):
"""
Convert val points to unit
"""
return self.__pnt2unit(val, unit)
# limits and scales
def percent_limits(self):
"""
Return tuple of left and right edges in percent
"""
return 0.0, 100.0
def percent_scale(self):
"""
Return array of percent values
"""
return np.linspace(0.0, 100.0, self._size)
def ppm_limits(self):
"""
Return tuple of left and right edges in ppm
"""
return self.ppm(0), self.ppm(self._size - 1)
def ppm_scale(self):
"""
Return array of ppm values
"""
x0, x1 = self.ppm_limits()
return np.linspace(x0, x1, self._size)
def hz_limits(self):
"""
Return tuple of left and right edges in Hz
"""
return self.hz(0), self.hz(self._size - 1)
def hz_scale(self):
"""
Return array of Hz values
"""
x0, x1 = self.hz_limits()
return np.linspace(x0, x1, self._size)
def sec_limits(self):
"""
Return tuple of left and right edges in seconds
"""
return self.sec(0), self.sec(self._size - 1)
def sec_scale(self):
"""
Return array of seconds values
"""
x0, x1 = self.sec_limits()
return np.linspace(x0, x1, self._size)
def ms_limits(self):
"""
Return tuple of left and right edges in milliseconds
"""
return self.ms(0), self.ms(self._size - 1)
def ms_scale(self):
"""
Return array of seconds values
"""
x0, x1 = self.ms_limits()
return np.linspace(x0, x1, self._size)
def us_limits(self):
"""
Return tuple of left and right edges in milliseconds
"""
return self.us(0), self.us(self._size - 1)
def us_scale(self):
"""
Return array of seconds values
"""
x0, x1 = self.us_limits()
return np.linspace(x0, x1, self._size)
__call__ = i # calling the object x is the same as x.i
def uc_from_udic(udic, dim=-1):
"""
Create a unit conversion object from a Universal dictionary.
Parameters
----------
udic : dic
Universal dictionary of spectral parameters.
dim : int. optional
Dimension number to create unit conversion object for. Default is for
last dimension.
Returns
-------
uc : unit conversion object.
Unit conversion object for given dimension.
"""
if dim == -1:
dim = udic['ndim'] - 1 # last dimension
adic = udic[dim]
return unit_conversion(adic['size'], adic['complex'], adic['sw'],
adic['obs'], adic['car'])
def uc_from_freqscale(scale, obs, unit='ppm'):
"""
Create a unit conversion object from a spectrum frequency scale axis.
Parameters
----------
scale : array like
array of spectrum axis
obs : float
Observation frequency in MHz.
unit: {'ppm', 'hz', 'khz'}
The unit of the scale axis.
Returns
-------
uc : unit conversion object.
Unit conversion object for given axis.
"""
scale = np.array(scale)
size = len(scale)
if unit in ['ppm', 'hz', 'khz']:
complex = False
min = scale.min()
max = scale.max()
# The scale needs be corrected by extending each extremum by half the
# bin width (to convert from centers to edges).
dx = abs(scale[1]-scale[0])
if unit == 'ppm':
sw = ((max + dx/2.0) - (min - dx/2.0)) * obs
car = (min-dx/2.0 + (max-min)/2.0) * obs
elif unit == 'hz':
sw = ((max + dx/2.0) - (min - dx/2.0))
car = (min-dx/2.0 + (max-min)/2.0)
else:
# unit is 'kHz':
sw = ((max + dx/2.0) - (min - dx/2.0)) / 1.e3
car = (min-dx/2.0 + (max-min)/2.0) / 1.e3
else:
mesg = '{} is not a supported unit.'.format(unit)
raise ValueError(mesg)
return unit_conversion(size, complex, sw, obs, car)
def open_towrite(filename, overwrite=False, mode='wb'):
"""
Open filename for writing and return file object
Function checks if file exists (and raises IOError if overwrite=False) and
creates necessary directories as needed.
"""
# check if file exists and overwrite if False
if os.path.exists(filename) and (overwrite is False):
raise IOError("File exists, recall with overwrite=True")
p, fn = os.path.split(filename) # split into filename and path
# create directories if needed
if p != '' and os.path.exists(p) is False:
os.makedirs(p)
return open(filename, mode)
################################################
# numpy ndarray emulation and helper functions #
################################################
# iterators for ND array
def ndfrom_iter(shape, slices):
ch = [range(lenx)[sX] for lenx, sX in zip(shape, slices)]
return itertools.product(*ch)
def ndto_iter(shape, slices):
ich = [range(len(range(lenx)[sX])) for lenx, sX in zip(shape, slices)]
return itertools.product(*ich)
def ndtofrom_iter(shape, slices):
ch = [range(lenx)[sX] for lenx, sX in zip(shape, slices)]
ich = [range(len(i)) for i in ch]
return zip(itertools.product(*ich), itertools.product(*ch))
def size_and_ndtofrom_iter(shape, slices):
ch = [range(lenx)[sX] for lenx, sX in zip(shape, slices)]
s = [len(i) for i in ch]
ich = [range(i) for i in s]
return s, zip(itertools.product(*ich), itertools.product(*ch))
# index2trace and trace2index functions
def index2trace_flat(shape, index):
"""
Calculate trace number from shape and index of all indirect dimensions
assuming a flat structure
"""
# We need to perform:
# index[0]*shape[1]*...shape[-1] + index[1]*shape[2]*...shape[-1] + ...
# + index[-1]*shape[-1] + index[-1]
# To do this we calculate the product of shape[X] elements and multiple
# by the corresponding index element, index[-1] as added at the beginning
a = index[-1]
for i, v in enumerate(index[:-1]):
mult = reduce(lambda x, y: x * y, shape[i + 1:])
a = a + mult * v
return a
def trace2index_flat(shape, ntrace):
"""
Calculate the index of a trace assuming a flat structure
"""
# algorithm is to take quotient/remainers of sizes in reverse
q = ntrace # seed quotient with remained
index = []
for s in shape[:0:-1]: # loop from last size to 2nd size
q, r = divmod(q, s)
index.insert(0, r)
index.insert(0, q)
return tuple(index)
def index2trace_opp(shape, index):
"""
Calculate trace number from shape and index of all indirect dimensions
assuming a phase ordering opposite the time increments.
"""
n = len(shape)
# deal with the phase component
phases = [v % 2 for v in index]
nphase = index2trace_flat([2] * n, phases[::-1])
# deal with the remainder
pindex = [v // 2 for v in index]
pshape = [i // 2 for i in shape]
nbase = index2trace_flat(pshape, pindex)
return nbase * 2 ** n + nphase
def trace2index_opp(shape, ntrace):
"""
Calculate the index of a trace assuming opposite phase/time increment
ordering
"""
n = len(shape)
q, r = divmod(ntrace, 2 ** n)
to_add = list(trace2index_flat([2] * n, r))[::-1]
pshape = [i // 2 for i in shape]
base = list(trace2index_flat(pshape, q))
total = [b * 2 + a for b, a in zip(base, to_add)]
return tuple(total)
def index2trace_reg(shape, index):
"""
Calculate trace number from shape and index of all indirect dimensions
assuming the same phase and time ordering.
"""
n = len(shape)
# deal with the phase component
phases = [v % 2 for v in index]
nphase = index2trace_flat([2] * n, phases)
# deal with the remainder
pindex = [v // 2 for v in index]
pshape = [i // 2 for i in shape]
nbase = index2trace_flat(pshape, pindex)
return nbase * 2 ** n + nphase
def trace2index_reg(shape, ntrace):
"""
Calculate the index of a trace assuming the same phase/time increment
ordering
"""
n = len(shape)
q, r = divmod(ntrace, 2 ** n)
to_add = list(trace2index_flat([2] * n, r))
pshape = [i // 2 for i in shape]
base = list(trace2index_flat(pshape, q))
total = [b * 2 + a for b, a in zip(base, to_add)]
return tuple(total)
#
# data_nd class
#
# inherited classes should define:
#
# __init__ which sets up the object and defines at minimum
#
# self.fshape shape of data on disk (shape when order = (0,1,2...)
# self.order order of axes, default is (0,1,2,...)
# self.dtype
#
# self.__setdimandshape__ can be called to set self.dim and self.shape
# if they are not set by __init__
#
# __fgetitem__ which takes well formatted tuples of slices
# and returns ndarray objects
#
# __fcopy__ which creates a copy provided only self and order parameters
#
class data_nd(object):
"""
Base class for building objects which emulate ndarray objects without
loading data into memory. These object have the following properties:
* slicing operations return ndarray objects
* can iterate over with expected results
* transpose and swapaxes functions create a new data_nd object with the
new axes ordering
* has ndim, shape, and dtype attributes.
Notes
-----
Classes which are use this class as a base should define the following
methods:
__init__ which must set up the object and defines at minimum:
self.fshape : tuple
Shape of the data on disk, the shape when order = (0, 1, 2, ..)
self.order : tuple
Ordering of the axes
self.dtype : dtype
Dtype of the emulated ndarray
self.__setdimandshape__ should be called if self.dim and self.shape
are not set up __init__.
__fgetitem__ which takes a well formatted tuple of slices and returns a
ndarray object with the selected data
__fcopy__(self, order) which created a copy of the object with the new
order
"""
def __init__(self, order):
pass
def __setdimandshape__(self):
""" Set the the ndim and shape attributes from fshape """
# set ndim and shape
self.ndim = len(self.fshape)
self.shape = tuple([self.fshape[i] for i in self.order])
def __copy__(self):
"""
create a copy
"""
return __fcopy(self, self.order)
def __getitem__(self, key):
"""
x.__getitem__(y) <==> x[y]
"""
# convert the key into a list
if not isinstance(key, tuple):
rlist = [key]
else:
rlist = list(key)
# remove Ellipsis
while Ellipsis in rlist:
i = rlist.index(Ellipsis)
rlist.pop(i)
for j in range(self.ndim - len(rlist)):
rlist.insert(i, slice(None))
if len(rlist) > self.ndim:
raise IndexError("invalid index")
# replace none-slices with slice objects
for i, v in enumerate(rlist):
if not isinstance(v, slice):
# check for out of range indexes
if v >= self.shape[i]:
raise IndexError("index(%s) out of range(0 <= index < %s) \
in dimension %s" % (v, self.shape[i] - 1, i))
if v <= (-1 * self.shape[i] - 1):
raise IndexError("index(%s) out of range(0 <= index < %s) \
in dimension %s" % (v, self.shape[i] - 1, i))
if v < 0:
w = self.shape[i] + v
rlist[i] = slice(w, w + 1, 1)
else:
rlist[i] = slice(v, v + 1, 1)
# pad the list with additional dimensions
for i in range(len(rlist), self.ndim):
rlist.append(slice(None))
# reorder the slices into file order
frlist = [rlist[self.order.index(i)] for i in range(self.ndim)]
# get the data from disk
data = self.__fgetitem__(tuple(frlist))
# re-order the data
if data.shape != (0,):
return np.squeeze(data.transpose(self.order))
else:
return data
def __len__(self):
"""
x._len__ <==> len(x)
"""
return self.shape[0]
def __iter__(self):
""" x.__iter__() <==> iter(x) """
for index in range(0, self.shape[0]):
yield self[index]
def swapaxes(self, axis1, axis2):
"""
Return object with `axis1` and `axis2` interchanged.
"""
axis1, axis2 = int(axis1), int(axis2)
if axis1 < 0:
axis1 = self.ndim - axis1
if axis2 < 0:
axis2 = self.ndim - axis2
if axis1 >= self.ndim:
raise ValueError("bad axis1 argument to swapaxes")
if axis2 >= self.ndim:
raise ValueError("bad axis2 argument to swapaxes")
order = list(self.order)
order[axis1], order[axis2] = order[axis2], order[axis1]
n = self.__fcopy__(order=order)
return n
def transpose(self, *axes):
"""
Return object with axes transposed.
Parameters
----------
axes : None, tuple or ints, or `n` ints
* None or no arguments: reverse order of the axes
* tuple of ints: `i` in the `j`-th place in the tuples means the
'i'-th axis becomes the new objects `j`-th axis.
* `n` ints: same as an n-tuple.
Returns
-------
out : data_nd object
Object whose axes are permuted.
"""
if axes == (): # default is to switch order of axes
axes = range(self.ndim)[::-1]
if len(axes) == 1: # if a single tuple is given unpack
axes = axes[0]
try: # convert to integers
axes = [int(i) for i in axes]
except:
raise TypeError("an integer is required")
if len(axes) != self.ndim: # check for to few/many axes
raise ValueError("axes don't match array")
# replace negatives axes values with positives
for i, v in enumerate(axes):
if v < 0:
axes[i] = self.ndim + v
# check for invalid axes
for v in axes:
if v >= self.ndim:
raise ValueError("invalid axis for this array")
# check for repeated axes
if len(set(axes)) != self.ndim:
raise ValueError("repeated axis in tranpose")
# create a new data_nd object with transposed order
return self.__fcopy__(order=tuple([self.order[i] for i in axes]))
| {
"content_hash": "c92500c0a5945123d49c320d459f0a9c",
"timestamp": "",
"source": "github",
"line_count": 754,
"max_line_length": 79,
"avg_line_length": 28.419098143236074,
"alnum_prop": 0.5440545081202165,
"repo_name": "kaustubhmote/nmrglue",
"id": "3c7f38e32e5281a90624626865c430bfc642e245",
"size": "21428",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nmrglue/fileio/fileiobase.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "978743"
},
{
"name": "Shell",
"bytes": "35758"
}
],
"symlink_target": ""
} |
from euca2ools.commands.iam import IAMRequest, AS_ACCOUNT
from requestbuilder import Arg
class DeleteUserPolicy(IAMRequest):
DESCRIPTION = 'Remove a policy from a user'
ARGS = [Arg('-u', '--user-name', dest='UserName', metavar='USER',
required=True,
help='user the policy is attached to (required)'),
Arg('-p', '--policy-name', dest='PolicyName', metavar='POLICY',
required=True, help='name of the policy to delete (required)'),
AS_ACCOUNT]
| {
"content_hash": "e3c82ffaa52912d427929dfc331337dc",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 79,
"avg_line_length": 43.583333333333336,
"alnum_prop": 0.621414913957935,
"repo_name": "Juniper/euca2ools",
"id": "0288680e4c745f4c88dbd6bc80e3f94005e6585a",
"size": "1870",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "euca2ools/commands/iam/deleteuserpolicy.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [],
"symlink_target": ""
} |
"""
NaiveBayes
"""
__author__ = "Manan Kalra"
__email__ = "manankalr29@gmail.com"
import nltk
import random
from nltk.corpus import movie_reviews
# # training-data
documents = [(list(movie_reviews.words(fileid)), category)
for category in movie_reviews.categories()
for fileid in movie_reviews.fileids(category)]
random.shuffle(documents)
# # list of all the words; like everything in our data-set
all_words = []
for w in movie_reviews.words():
all_words.append(w.lower())
# # counting the frequency of each word
all_words = nltk.FreqDist(all_words) # <FreqDist with 39768 samples and 1583820 outcomes>
# print(all_words.most_common(15))
# print(all_words["<random_word>"])
# # list of all the words (non-repeated), top 3000
word_features = list(all_words.keys())[:3000]
# # returns a dictionary, keys = word_features, value = Boolean (acc. to the presence of that feature in a document)
def find_features(document):
words = set(document)
features = {}
for w in word_features:
features[w] = (w in words)
return features
# # transforming documents
feature_sets = [(find_features(rev), category) for (rev, category) in documents]
training_set = feature_sets[:1900]
testing_set = feature_sets[1900:]
# # Naive-Bayes (posterior = (prior occurrences * likelihood) / evidence)
classifier = nltk.NaiveBayesClassifier.train(training_set)
accuracy = nltk.classify.accuracy(classifier, testing_set) * 100
print("Original NB Accuracy: ", accuracy)
classifier.show_most_informative_features()
'''
# save
save_classifier = open("naivebayes.pickle", "wb")
pickle.dump(classifier, save_classifier)
save_classifier.close()
# load
temp = open("naivebayes.pickle", "rb")
myclassifier = pickle.load(temp)
myclassifier.close()
'''
| {
"content_hash": "c9d3bae1aa740d62c3b807b8a646cd8e",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 118,
"avg_line_length": 27.060606060606062,
"alnum_prop": 0.7082866741321389,
"repo_name": "manankalra/Twitter-Sentiment-Analysis",
"id": "f9e53fee5c636afc70d39e849d1e26d821257be4",
"size": "1809",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main/naive_bayes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32184"
}
],
"symlink_target": ""
} |
"""
.. _basic-mat-mult:
Simple Matrix Multiply
======================
**Author**: `Thierry Moreau <https://homes.cs.washington.edu/~moreau/>`_
In this tutorial, we will build on top of the :ref:`vta-get-started` tutorial
and introduce additional concepts required to implement matrix multiplication
on VTA with the TVM workflow.
"""
######################################################################
# RPC Setup
# ---------
# We start by programming the Pynq's FPGA and building its RPC runtime
# as we did in the VTA introductory tutorial.
from __future__ import absolute_import, print_function
import os
import tvm
import vta
import numpy as np
from tvm import rpc
from tvm.contrib import util
from vta.testing import simulator
# Load VTA parameters from the vta/config/vta_config.json file
env = vta.get_env()
# We read the Pynq RPC host IP address and port number from the OS environment
host = os.environ.get("VTA_PYNQ_RPC_HOST", "192.168.2.99")
port = int(os.environ.get("VTA_PYNQ_RPC_PORT", "9091"))
# We configure both the bitstream and the runtime system on the Pynq
# to match the VTA configuration specified by the vta_config.json file.
if env.TARGET == "pynq":
# Make sure that TVM was compiled with RPC=1
assert tvm.module.enabled("rpc")
remote = rpc.connect(host, port)
# Reconfigure the JIT runtime
vta.reconfig_runtime(remote)
# Program the FPGA with a pre-compiled VTA bitstream.
# You can program the FPGA with your own custom bitstream
# by passing the path to the bitstream file instead of None.
vta.program_fpga(remote, bitstream=None)
# In simulation mode, host the RPC server locally.
elif env.TARGET in ["sim", "tsim"]:
remote = rpc.LocalSession()
######################################################################
# Computation Declaration
# -----------------------
# In this example we describe a simple matrix multiplication addition, which
# requires multiple computation stages, as shown in the dataflow diagram below.
# First we describe the input tensors :code:`A` and :code:`B` that are living
# in main memory.
# Second, we need to declare intermediate tensors :code:`A_buf` and
# :code:`B_buf`, which will live in VTA's on-chip buffers.
# Having this extra computational stage allows us to explicitly
# stage cached reads and writes.
# Third, we describe the matrix multiplication computation over
# :code:`A_buf` and :code:`B_buf` to produce the product matrix :code:`C_buf`.
# The last operation is a cast and copy back to DRAM, into results tensor
# :code:`C`.
#
# .. image:: https://raw.githubusercontent.com/uwsaml/web-data/master/vta/tutorial/gemm_dataflow.png
# :align: center
######################################################################
# Data Layout
# ~~~~~~~~~~~
# We describe the placeholder tensors :code:`A`, and :code:`B` in a tiled data
# format to match the data layout requirements imposed by the VTA tensor core.
######################################################################
# .. note::
#
# **Data Tiling**
#
# One source of complexity when targeting accelerators is to make sure
# that the data layout matches the layout imposed by the accelerator design.
# VTA is designed around a *tensor core* that performs, one matrix-matrix
# operation per cycle between an activation matrix and a weight matrix,
# adding the result matrix to an accumulator matrix, as shown in the
# figure below.
#
# .. image:: https://raw.githubusercontent.com/uwsaml/web-data/master/vta/tutorial/tensor_core.png
# :align: center
# :width: 480px
#
# The dimensions of that matrix-matrix multiplication are specified in
# the :code:`vta_config.json` configuration file.
# The activation matrix has a :code:`(BATCH, BLOCK_IN)` shape
# and the transposed weight matrix has a :code:`(BLOCK_OUT, BLOCK_IN)` shape,
# thus inferring that the resulting output matrix has a
# :code:`(BATCH, BLOCK_OUT)` shape.
# Consequently input and output tensors processed by VTA need to be
# tiled according to these aforementioned dimension.
#
# The diagram below shows the impact of data tiling on a matrix that is
# originally of shape (4, 8).
# Tiling by a (2, 2) tile shape ensures that data within each tile is
# contiguous.
# The resulting tiled tensor has a shape of (2, 4, 2, 2).
#
# .. image:: https://raw.githubusercontent.com/uwsaml/web-data/master/vta/tutorial/data_tiling.png
# :align: center
# :width: 480px
#
# We first define the variables :code:`m`, :code:`n`, :code:`o` to represent
# the shape of the matrix multiplication. These variables are multiplicative
# factors over the :code:`BLOCK_OUT`, :code:`BLOCK_IN`, and :code:`BATCH`
# tensor dimensions respectively.
# By default, the configuration file sets :code:`BATCH`, :code:`BLOCK_IN`, and
# :code:`BLOCK_OUT` to be 1, 16 and 16 respectively (:code:`BATCH` being set to
# 1 implies that our compute building block is vector-matrix multiply).
#
######################################################################
# .. note::
#
# **Data Types**
#
# It's important to not only match the inner-tile
# dimension of VTA's tensor core, but also to match the specific data types
# expected by VTA.
# VTA for now only supports fixed point data types, which integer width is
# specified in the :code:`vta_config.json` file by :code:`INP_WIDTH` and
# :code:`WGT_WIDTH` for the activations and weights data types respectively.
# In addition, the accumulator data type integer width is specified by
# :code:`ACC_WIDTH`.
#
# By default, the configuration file sets :code:`INP_WIDTH`
# and :code:`WGT_WIDTH` to 8.
# The accumulator width :code:`ACC_WIDTH` is set to 32, in order to avoid
# overflow during accumulation.
# As a result, :code:`env.inp_dtype` and :code:`env.wgt_dtype` are all
# narrow 8-bit integers, while :code:`env.acc_dtype` is a standard 32-bit
# integer.
# Output channel factor m - total 16x16=256 output channels
m = 16
# Input channel factor n - total 16x16=256 input channels
n = 16
# Batch factor o (we use single batch inference)
o = 1
# A placeholder tensor in tiled data format
A = tvm.placeholder((o, n, env.BATCH, env.BLOCK_IN), name="A", dtype=env.inp_dtype)
# B placeholder tensor in tiled data format
B = tvm.placeholder((m, n, env.BLOCK_OUT, env.BLOCK_IN), name="B", dtype=env.wgt_dtype)
# A copy buffer
A_buf = tvm.compute((o, n, env.BATCH, env.BLOCK_IN), lambda *i: A(*i), "A_buf")
# B copy buffer
B_buf = tvm.compute((m, n, env.BLOCK_OUT, env.BLOCK_IN), lambda *i: B(*i), "B_buf")
######################################################################
# Matrix Multiplication
# ~~~~~~~~~~~~~~~~~~~~~
# Now we're ready to describe the matrix multiplication result tensor :code:`C`,
# with another compute operation.
# The compute function takes the shape of the tensor, as well as a lambda
# function that describes the computation rule for each position of the tensor.
#
# In order to implement matrix multiplication, the lambda function needs to
# include a reduction formula over the input channel dimension axes.
# To create a reduction formula, we can declare a reduction axis using
# :code:`tvm.reduce_axis`, which takes in the range of reductions.
# :code:`tvm.sum` takes in the expression to be reduced as well as
# the reduction axes to compute the sum of value over all k in the declared
# ranges.
#
# Note that the reduction needs to be performed over 32-bit :code:`env.acc_dtype`
# accumulator data types.
#
# No computation happens during this phase, as we are only declaring how
# the computation should be done.
# Outer input feature reduction axis
ko = tvm.reduce_axis((0, n), name="ko")
# Inner input feature reduction axis
ki = tvm.reduce_axis((0, env.BLOCK_IN), name="ki")
# Describe the in-VTA matrix multiplication
C_buf = tvm.compute(
(o, m, env.BATCH, env.BLOCK_OUT),
lambda bo, co, bi, ci:
tvm.sum(A_buf[bo, ko, bi, ki].astype(env.acc_dtype) *
B_buf[co, ko, ci, ki].astype(env.acc_dtype),
axis=[ko, ki]),
name="C_buf")
######################################################################
# Casting the Results
# ~~~~~~~~~~~~~~~~~~~
# After the computation is done, we'll need to send the results computed by VTA
# back to main memory.
######################################################################
# .. note::
#
# **Memory Store Restrictions**
#
# One specificity of VTA is that it only supports DRAM stores in the narrow
# :code:`env.inp_dtype` data type format.
# This lets us reduce the data footprint for memory transfers, but also lets
# us quantize the wide accumulator data type down to a data format that
# matches the input activation data type.
# This means that in the context of neural network inference, the outputs
# of a given layer after activation can be consumed directly by the next
# layer.
#
# We perform one last typecast operation to the narrow
# input activation data format.
# Cast to output type, and send to main memory
C = tvm.compute(
(o, m, env.BATCH, env.BLOCK_OUT),
lambda *i: C_buf(*i).astype(env.inp_dtype),
name="C")
######################################################################
# This concludes the computation declaration part of this tutorial.
######################################################################
# Scheduling the Computation
# --------------------------
# While the above lines describes the computation rule, we can obtain
# :code:`C` in many ways.
# TVM asks the user to provide an implementation of the computation called
# *schedule*.
#
# A schedule is a set of transformations to an original computation that
# transforms the implementation of the computation without affecting
# correctness.
# This simple VTA programming tutorial aims to demonstrate basic schedule
# transformations that will map the original schedule down to VTA hardware
# primitives.
######################################################################
# Default Schedule
# ~~~~~~~~~~~~~~~~
# After we construct the schedule, by default the schedule computes
# :code:`C` in the following way:
# Let's take a look at the generated schedule
s = tvm.create_schedule(C.op)
print(tvm.lower(s, [A, B, C], simple_mode=True))
######################################################################
# Although this schedule makes sense, it won't compile to VTA.
# In order to obtain correct code generation, we need to apply scheduling
# primitives and code annotation that will transform the schedule into
# one that can be directly lowered onto VTA hardware intrinsics.
# Those include:
#
# - DMA copy operations which will take globally-scoped tensors and copy
# those into locally-scoped tensors.
# - Tensor operations that will perform the matrix multiplication.
######################################################################
# Buffer Scopes
# ~~~~~~~~~~~~~
# First, we set the scope of the buffers to tell TVM that these buffers
# will be living in the VTA's on-chip SRAM caches.
# Below, we tell TVM that :code:`A_buf`, :code:`B_buf`, :code:`C_buf`
# will respectively live in VTA's on-chip input, weight and accumulator
# memory.
######################################################################
# .. note::
#
# **VTA's On-Chip SRAMs**
#
# VTA has three different memory scopes, each corresponding to different
# on-chip SRAM buffers.
#
# - :code:`env.inp_scope`: Input buffer, which is a read-only SRAM buffer
# that stores input matrices of shape :code:`(env.BATCH, env.BLOCK_IN)`
# of type :code:`env.inp_dtype`. The input buffer contains
# `2 ^ LOG_INP_BUFF_SIZE` matrix elements (as specified in the
# :code:`vta_config.json` file).
# - :code:`env.wgt_scope`: Weight buffer, which is a read-only SRAM buffer
# that stores weight matrices of shape :code:`(env.BLOCK_OUT, env.BLOCK_IN)`
# of type :code:`env.wgt_dtype`. The weight buffer contains
# `2 ^ LOG_WGT_BUFF_SIZE` matrix elements.
# - :code:`env.acc_scope`: Accumulator buffer, which is a read/write SRAM
# buffer that stores accumulator matrices of shape
# :code:`(env.BATCH, env.BLOCK_OUT)` of type :code:`env.acc_dtype`.
# The accumulator buffer is VTA's general purpose register file: it holds
# both intermediate results of convolutions and matrix multiplications
# as well as intermediate results of pooling, batch normalization, and
# activation layers. The accumulator buffer contains
# `2 ^ LOG_ACC_BUFF_SIZE` matrix elements.
# Set the intermediate tensor's scope to VTA's on-chip buffers
s[A_buf].set_scope(env.inp_scope)
s[B_buf].set_scope(env.wgt_scope)
s[C_buf].set_scope(env.acc_scope)
######################################################################
# DMA Transfers
# ~~~~~~~~~~~~~
# We need to schedule DMA transfers to move data living in DRAM to
# and from the VTA on-chip buffers.
# This can be achieved using the :code:`compute_at` schedule primitive
# which nests the copying of the buffers into the computation loop
# that performs the matrix multiplication.
#
# We insert :code:`dma_copy` pragmas to indicate to the compiler
# that the copy operations will be performed in bulk via DMA,
# which is common in hardware accelerators.
# Finally, we print the temporary schedule to observe the effects of
# moving the copy operations into the matrix multiplication loop.
# Move buffer copy into matrix multiply loop
s[A_buf].compute_at(s[C_buf], ko)
s[B_buf].compute_at(s[C_buf], ko)
# Tag the buffer copies with the DMA pragma to insert a DMA transfer
s[A_buf].pragma(s[A_buf].op.axis[0], env.dma_copy)
s[B_buf].pragma(s[B_buf].op.axis[0], env.dma_copy)
s[C].pragma(s[C].op.axis[0], env.dma_copy)
# Let's take a look at the transformed schedule
print(tvm.lower(s, [A, B, C], simple_mode=True))
######################################################################
# Tensorization
# ~~~~~~~~~~~~~
# The last step of the schedule transformation consists in applying
# *tensorization* to our schedule.
# Tensorization is analogous to vectorization, but extends the concept
# to a higher-dimensional unit of computation.
# Consequently, tensorization imposes data layout constraints as discussed
# earlier when declaring the data layout input placeholders.
# We've already arranged our tensors in a tiled format, so the next thing
# we need to perform is loop reordering to accommodate for tensorization.
#
# Here we choose to move the outermost reduction axis all the way out.
# This dictates that we first iterate over input channels, then batch
# dimensions, and finally output channels.
# Lastly, we apply the tensorization scheduling primitive :code:`tensorize`
# along the outer axis of the inner-most matrix matrix multiplication tensor
# block.
# We print the finalized schedule that is ready for code-generation
# by the VTA runtime JIT compiler.
s[C_buf].reorder(
ko,
s[C_buf].op.axis[0],
s[C_buf].op.axis[1],
s[C_buf].op.axis[2],
s[C_buf].op.axis[3],
ki)
s[C_buf].tensorize(s[C_buf].op.axis[2], env.gemm)
# Let's take a look at the finalized schedule
print(vta.lower(s, [A, B, C], simple_mode=True))
######################################################################
# This concludes the scheduling portion of this tutorial.
######################################################################
# TVM Compilation
# ---------------
# After we have finished specifying the schedule, we can compile it
# into a TVM function.
# Build GEMM VTA kernel
my_gemm = vta.build(s, [A, B, C], "ext_dev", env.target_host, name="my_gemm")
# Write the compiled module into an object file.
temp = util.tempdir()
my_gemm.save(temp.relpath("gemm.o"))
# Send the executable over RPC
remote.upload(temp.relpath("gemm.o"))
# Load the compiled module
f = remote.load_module("gemm.o")
######################################################################
# Running the Function
# --------------------
# The compiled TVM function uses a concise C API and can be invoked from
# code language.
#
# TVM provides an array API in python to aid quick testing and prototyping.
# The array API is based on `DLPack <https://github.com/dmlc/dlpack>`_ standard.
#
# - We first create a remote context (for remote execution on the Pynq).
# - Then :code:`tvm.nd.array` formats the data accordingly.
# - :code:`f()` runs the actual computation.
# - :code:`asnumpy()` copies the result array back in a format that can be
# interpreted.
#
# Get the remote device context
ctx = remote.ext_dev(0)
# Initialize the A and B arrays randomly in the int range of (-128, 128]
A_orig = np.random.randint(
-128, 128, size=(o * env.BATCH, n * env.BLOCK_IN)).astype(A.dtype)
B_orig = np.random.randint(
-128, 128, size=(m * env.BLOCK_OUT, n * env.BLOCK_IN)).astype(B.dtype)
# Apply packing to the A and B arrays from a 2D to a 4D packed layout
A_packed = A_orig.reshape(
o, env.BATCH, n, env.BLOCK_IN).transpose((0, 2, 1, 3))
B_packed = B_orig.reshape(
m, env.BLOCK_OUT, n, env.BLOCK_IN).transpose((0, 2, 1, 3))
# Format the input/output arrays with tvm.nd.array to the DLPack standard
A_nd = tvm.nd.array(A_packed, ctx)
B_nd = tvm.nd.array(B_packed, ctx)
C_nd = tvm.nd.array(np.zeros((o, m, env.BATCH, env.BLOCK_OUT)).astype(C.dtype), ctx)
# Clear stats
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
# Invoke the module to perform the computation
f(A_nd, B_nd, C_nd)
######################################################################
# Verifying Correctness
# ---------------------
# Compute the reference result with numpy and assert that the output of the
# matrix multiplication indeed is correct
# Compute reference result with numpy
C_ref = np.dot(A_orig.astype(env.acc_dtype),
B_orig.T.astype(env.acc_dtype)).astype(C.dtype)
C_ref = C_ref.reshape(
o, env.BATCH, m, env.BLOCK_OUT).transpose((0, 2, 1, 3))
np.testing.assert_equal(C_ref, C_nd.asnumpy())
# Print stats
if env.TARGET in ["sim", "tsim"]:
sim_stats = simulator.stats()
print("Execution statistics:")
for k, v in sim_stats.items():
print("\t{:<16}: {:>16}".format(k, v))
print("Successful matrix multiply test!")
######################################################################
# Summary
# -------
# This tutorial showcases the TVM workflow to implement a simple matrix
# multiplication example on VTA.
# The general workflow includes:
#
# - Programming the FPGA with the VTA bitstream over RPC.
# - Describing matrix multiplication via a series of computations.
# - Describing how we want to perform the computation using schedule primitives.
# - Compiling the function to the VTA target.
# - Running the compiled module and verifying it against a numpy implementation.
#
| {
"content_hash": "e81c63dab8f8c52b15fc462a5e5d1d74",
"timestamp": "",
"source": "github",
"line_count": 465,
"max_line_length": 100,
"avg_line_length": 40.39354838709677,
"alnum_prop": 0.6532502795080658,
"repo_name": "mlperf/training_results_v0.7",
"id": "70a899bb534fd7c2123e84dd01ecb33e27da5c2f",
"size": "19568",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/tvm/vta/tutorials/matrix_multiply.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Awk",
"bytes": "14530"
},
{
"name": "Batchfile",
"bytes": "13130"
},
{
"name": "C",
"bytes": "172914"
},
{
"name": "C++",
"bytes": "13037795"
},
{
"name": "CMake",
"bytes": "113458"
},
{
"name": "CSS",
"bytes": "70255"
},
{
"name": "Clojure",
"bytes": "622652"
},
{
"name": "Cuda",
"bytes": "1974745"
},
{
"name": "Dockerfile",
"bytes": "149523"
},
{
"name": "Groovy",
"bytes": "160449"
},
{
"name": "HTML",
"bytes": "171537"
},
{
"name": "Java",
"bytes": "189275"
},
{
"name": "JavaScript",
"bytes": "98224"
},
{
"name": "Julia",
"bytes": "430755"
},
{
"name": "Jupyter Notebook",
"bytes": "11091342"
},
{
"name": "Lua",
"bytes": "17720"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "215967"
},
{
"name": "Perl",
"bytes": "1551186"
},
{
"name": "PowerShell",
"bytes": "13906"
},
{
"name": "Python",
"bytes": "36943114"
},
{
"name": "R",
"bytes": "134921"
},
{
"name": "Raku",
"bytes": "7280"
},
{
"name": "Ruby",
"bytes": "4930"
},
{
"name": "SWIG",
"bytes": "140111"
},
{
"name": "Scala",
"bytes": "1304960"
},
{
"name": "Shell",
"bytes": "1312832"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "Starlark",
"bytes": "69877"
},
{
"name": "TypeScript",
"bytes": "243012"
}
],
"symlink_target": ""
} |
import six
from mod_pywebsocket import msgutil
def web_socket_do_extra_handshake(request):
pass # Always accept.
def web_socket_transfer_data(request):
while True:
line = msgutil.receive_message(request)
if line == b'exit':
return
if line is not None:
if isinstance(line, six.string_types):
line = line.encode()
request.connection.write(line)
| {
"content_hash": "5e2b1c1d337c5337d145cf1920470818",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 50,
"avg_line_length": 26.9375,
"alnum_prop": 0.6148491879350348,
"repo_name": "youtube/cobalt_sandbox",
"id": "0d5de62c1cb5f9f8d8444b587612077c384e159c",
"size": "450",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "third_party/web_platform_tests/websockets/handlers/echo_raw_wsh.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
Simple Training CLI.
"""
import argparse
import os
import shutil
import sys
import tempfile
from contextlib import ExitStack
from typing import Any, cast, Optional, Dict, List, Tuple
import mxnet as mx
from . import arguments
from . import checkpoint_decoder
from . import constants as C
from . import convolution
from . import coverage
from . import data_io
from . import decoder
from . import encoder
from . import initializer
from . import loss
from . import lr_scheduler
from . import model
from . import rnn
from . import rnn_attention
from . import training
from . import transformer
from . import utils
from . import vocab
from .config import Config
from .log import setup_main_logger
from .optimizers import OptimizerConfig
from .utils import check_condition
# Temporary logger, the real one (logging to a file probably, will be created in the main function)
logger = setup_main_logger(__name__, file_logging=False, console=True)
def none_if_negative(val):
return None if val < 0 else val
def _list_to_tuple(v):
"""Convert v to a tuple if it is a list."""
if isinstance(v, list):
return tuple(v)
return v
def _dict_difference(dict1: Dict, dict2: Dict):
diffs = set()
for k, v in dict1.items():
# Note: A list and a tuple with the same values is considered equal
# (this is due to json deserializing former tuples as list).
if k not in dict2 or _list_to_tuple(dict2[k]) != _list_to_tuple(v):
diffs.add(k)
return diffs
def check_arg_compatibility(args: argparse.Namespace):
"""
Check if some arguments are incompatible with each other.
:param args: Arguments as returned by argparse.
"""
check_condition(args.optimized_metric == C.BLEU or args.optimized_metric in args.metrics,
"Must optimize either BLEU or one of tracked metrics (--metrics)")
if args.encoder == C.TRANSFORMER_TYPE:
check_condition(args.transformer_model_size[0] == args.num_embed[0],
"Source embedding size must match transformer model size: %s vs. %s"
% (args.transformer_model_size, args.num_embed[0]))
total_source_factor_size = sum(args.source_factors_num_embed)
if total_source_factor_size > 0:
adjusted_transformer_encoder_model_size = args.num_embed[0] + total_source_factor_size
check_condition(adjusted_transformer_encoder_model_size % 2 == 0 and
adjusted_transformer_encoder_model_size % args.transformer_attention_heads[0] == 0,
"Sum of source factor sizes, i.e. num-embed plus source-factors-num-embed, (%d) "
"has to be even and a multiple of encoder attention heads (%d)" % (
adjusted_transformer_encoder_model_size, args.transformer_attention_heads[0]))
if args.decoder == C.TRANSFORMER_TYPE:
check_condition(args.transformer_model_size[1] == args.num_embed[1],
"Target embedding size must match transformer model size: %s vs. %s"
% (args.transformer_model_size, args.num_embed[1]))
if args.lhuc is not None:
# Actually this check is a bit too strict
check_condition(args.encoder != C.CONVOLUTION_TYPE or args.decoder != C.CONVOLUTION_TYPE,
"LHUC is not supported for convolutional models yet.")
check_condition(args.decoder != C.TRANSFORMER_TYPE or C.LHUC_STATE_INIT not in args.lhuc,
"The %s options only applies to RNN models" % C.LHUC_STATE_INIT)
def check_resume(args: argparse.Namespace, output_folder: str) -> bool:
"""
Check if we should resume a broken training run.
:param args: Arguments as returned by argparse.
:param output_folder: Main output folder for the model.
:return: Flag signaling if we are resuming training and the directory with
the training status.
"""
resume_training = False
training_state_dir = os.path.join(output_folder, C.TRAINING_STATE_DIRNAME)
if os.path.exists(output_folder):
if args.overwrite_output:
logger.info("Removing existing output folder %s.", output_folder)
shutil.rmtree(output_folder)
os.makedirs(output_folder)
elif os.path.exists(training_state_dir):
old_args = vars(arguments.load_args(os.path.join(output_folder, C.ARGS_STATE_NAME)))
arg_diffs = _dict_difference(vars(args), old_args) | _dict_difference(old_args, vars(args))
# Remove args that may differ without affecting the training.
arg_diffs -= set(C.ARGS_MAY_DIFFER)
# allow different device-ids provided their total count is the same
if 'device_ids' in arg_diffs and len(old_args['device_ids']) == len(vars(args)['device_ids']):
arg_diffs.discard('device_ids')
if not arg_diffs:
resume_training = True
else:
# We do not have the logger yet
logger.error("Mismatch in arguments for training continuation.")
logger.error("Differing arguments: %s.", ", ".join(arg_diffs))
sys.exit(1)
elif os.path.exists(os.path.join(output_folder, C.PARAMS_BEST_NAME)):
logger.error("Refusing to overwrite model folder %s as it seems to contain a trained model.", output_folder)
sys.exit(1)
else:
logger.info("The output folder %s already exists, but no training state or parameter file was found. "
"Will start training from scratch.", output_folder)
else:
os.makedirs(output_folder)
return resume_training
def determine_context(args: argparse.Namespace, exit_stack: ExitStack) -> List[mx.Context]:
"""
Determine the context we should run on (CPU or GPU).
:param args: Arguments as returned by argparse.
:param exit_stack: An ExitStack from contextlib.
:return: A list with the context(s) to run on.
"""
if args.use_cpu:
logger.info("Training Device: CPU")
context = [mx.cpu()]
else:
num_gpus = utils.get_num_gpus()
check_condition(num_gpus >= 1,
"No GPUs found, consider running on the CPU with --use-cpu "
"(note: check depends on nvidia-smi and this could also mean that the nvidia-smi "
"binary isn't on the path).")
if args.disable_device_locking:
context = utils.expand_requested_device_ids(args.device_ids)
else:
context = exit_stack.enter_context(utils.acquire_gpus(args.device_ids, lock_dir=args.lock_dir))
if args.batch_type == C.BATCH_TYPE_SENTENCE:
check_condition(args.batch_size % len(context) == 0, "When using multiple devices the batch size must be "
"divisible by the number of devices. Choose a batch "
"size that is a multiple of %d." % len(context))
logger.info("Training Device(s): GPU %s", context)
context = [mx.gpu(gpu_id) for gpu_id in context]
return context
def create_checkpoint_decoder(args: argparse.Namespace,
exit_stack: ExitStack,
train_context: List[mx.Context]) -> Optional[checkpoint_decoder.CheckpointDecoder]:
"""
Returns a checkpoint decoder or None.
:param args: Arguments as returned by argparse.
:param exit_stack: An ExitStack from contextlib.
:param train_context: Context for training.
:return: A CheckpointDecoder if --decode-and-evaluate != 0, else None.
"""
sample_size = args.decode_and_evaluate
if args.optimized_metric == C.BLEU and sample_size == 0:
logger.info("You chose BLEU as the optimized metric, will turn on BLEU monitoring during training. "
"To control how many validation sentences are used for calculating bleu use "
"the --decode-and-evaluate argument.")
sample_size = -1
if sample_size == 0:
return None
if args.use_cpu or args.decode_and_evaluate_use_cpu:
context = mx.cpu()
elif args.decode_and_evaluate_device_id is not None:
# decode device is defined from the commandline
num_gpus = utils.get_num_gpus()
check_condition(num_gpus >= 1,
"No GPUs found, consider running on the CPU with --use-cpu "
"(note: check depends on nvidia-smi and this could also mean that the nvidia-smi "
"binary isn't on the path).")
if args.disable_device_locking:
context = utils.expand_requested_device_ids([args.decode_and_evaluate_device_id])
else:
context = exit_stack.enter_context(utils.acquire_gpus([args.decode_and_evaluate_device_id],
lock_dir=args.lock_dir))
context = mx.gpu(context[0])
else:
# default decode context is the last training device
context = train_context[-1]
return checkpoint_decoder.CheckpointDecoder(context=context,
inputs=[args.validation_source] + args.validation_source_factors,
references=args.validation_target,
model=args.output,
sample_size=sample_size)
def use_shared_vocab(args: argparse.Namespace) -> bool:
"""
True if arguments entail a shared source and target vocabulary.
:param: args: Arguments as returned by argparse.
"""
weight_tying = args.weight_tying
weight_tying_type = args.weight_tying_type
shared_vocab = args.shared_vocab
if weight_tying and C.WEIGHT_TYING_SRC in weight_tying_type and C.WEIGHT_TYING_TRG in weight_tying_type:
if not shared_vocab:
logger.info("A shared source/target vocabulary will be used as weight tying source/target weight tying "
"is enabled")
shared_vocab = True
return shared_vocab
def create_data_iters_and_vocabs(args: argparse.Namespace,
max_seq_len_source: int,
max_seq_len_target: int,
shared_vocab: bool,
resume_training: bool,
output_folder: str) -> Tuple['data_io.BaseParallelSampleIter',
'data_io.BaseParallelSampleIter',
'data_io.DataConfig',
List[vocab.Vocab], vocab.Vocab]:
"""
Create the data iterators and the vocabularies.
:param args: Arguments as returned by argparse.
:param max_seq_len_source: Source maximum sequence length.
:param max_seq_len_target: Target maximum sequence length.
:param shared_vocab: Whether to create a shared vocabulary.
:param resume_training: Whether to resume training.
:param output_folder: Output folder.
:return: The data iterators (train, validation, config_data) as well as the source and target vocabularies.
"""
num_words_source, num_words_target = args.num_words
word_min_count_source, word_min_count_target = args.word_min_count
batch_num_devices = 1 if args.use_cpu else sum(-di if di < 0 else 1 for di in args.device_ids)
batch_by_words = args.batch_type == C.BATCH_TYPE_WORD
validation_sources = [args.validation_source] + args.validation_source_factors
validation_sources = [str(os.path.abspath(source)) for source in validation_sources]
either_raw_or_prepared_error_msg = "Either specify a raw training corpus with %s and %s or a preprocessed corpus " \
"with %s." % (C.TRAINING_ARG_SOURCE,
C.TRAINING_ARG_TARGET,
C.TRAINING_ARG_PREPARED_DATA)
if args.prepared_data is not None:
utils.check_condition(args.source is None and args.target is None, either_raw_or_prepared_error_msg)
if not resume_training:
utils.check_condition(args.source_vocab is None and args.target_vocab is None,
"You are using a prepared data folder, which is tied to a vocabulary. "
"To change it you need to rerun data preparation with a different vocabulary.")
train_iter, validation_iter, data_config, source_vocabs, target_vocab = data_io.get_prepared_data_iters(
prepared_data_dir=args.prepared_data,
validation_sources=validation_sources,
validation_target=str(os.path.abspath(args.validation_target)),
shared_vocab=shared_vocab,
batch_size=args.batch_size,
batch_by_words=batch_by_words,
batch_num_devices=batch_num_devices,
fill_up=args.fill_up)
check_condition(len(source_vocabs) == len(args.source_factors_num_embed) + 1,
"Data was prepared with %d source factors, but only provided %d source factor dimensions." % (
len(source_vocabs), len(args.source_factors_num_embed) + 1))
if resume_training:
# resuming training. Making sure the vocabs in the model and in the prepared data match up
model_source_vocabs = vocab.load_source_vocabs(output_folder)
for i, (v, mv) in enumerate(zip(source_vocabs, model_source_vocabs)):
utils.check_condition(vocab.are_identical(v, mv),
"Prepared data and resumed model source vocab %d do not match." % i)
model_target_vocab = vocab.load_target_vocab(output_folder)
utils.check_condition(vocab.are_identical(target_vocab, model_target_vocab),
"Prepared data and resumed model target vocabs do not match.")
check_condition(len(args.source_factors) == len(args.validation_source_factors),
'Training and validation data must have the same number of factors: %d vs. %d.' % (
len(args.source_factors), len(args.validation_source_factors)))
return train_iter, validation_iter, data_config, source_vocabs, target_vocab
else:
utils.check_condition(args.prepared_data is None and args.source is not None and args.target is not None,
either_raw_or_prepared_error_msg)
if resume_training:
# Load the existing vocabs created when starting the training run.
source_vocabs = vocab.load_source_vocabs(output_folder)
target_vocab = vocab.load_target_vocab(output_folder)
# Recover the vocabulary path from the data info file:
data_info = cast(data_io.DataInfo, Config.load(os.path.join(output_folder, C.DATA_INFO)))
source_vocab_paths = data_info.source_vocabs
target_vocab_path = data_info.target_vocab
else:
# Load or create vocabs
source_vocab_paths = [args.source_vocab] + [None] * len(args.source_factors)
target_vocab_path = args.target_vocab
source_vocabs, target_vocab = vocab.load_or_create_vocabs(
source_paths=[args.source] + args.source_factors,
target_path=args.target,
source_vocab_paths=source_vocab_paths,
target_vocab_path=target_vocab_path,
shared_vocab=shared_vocab,
num_words_source=num_words_source,
num_words_target=num_words_target,
word_min_count_source=word_min_count_source,
word_min_count_target=word_min_count_target)
check_condition(len(args.source_factors) == len(args.source_factors_num_embed),
"Number of source factor data (%d) differs from provided source factor dimensions (%d)" % (
len(args.source_factors), len(args.source_factors_num_embed)))
sources = [args.source] + args.source_factors
sources = [str(os.path.abspath(source)) for source in sources]
train_iter, validation_iter, config_data, data_info = data_io.get_training_data_iters(
sources=sources,
target=os.path.abspath(args.target),
validation_sources=validation_sources,
validation_target=os.path.abspath(args.validation_target),
source_vocabs=source_vocabs,
target_vocab=target_vocab,
source_vocab_paths=source_vocab_paths,
target_vocab_path=target_vocab_path,
shared_vocab=shared_vocab,
batch_size=args.batch_size,
batch_by_words=batch_by_words,
batch_num_devices=batch_num_devices,
fill_up=args.fill_up,
max_seq_len_source=max_seq_len_source,
max_seq_len_target=max_seq_len_target,
bucketing=not args.no_bucketing,
bucket_width=args.bucket_width)
data_info_fname = os.path.join(output_folder, C.DATA_INFO)
logger.info("Writing data config to '%s'", data_info_fname)
data_info.save(data_info_fname)
return train_iter, validation_iter, config_data, source_vocabs, target_vocab
def create_encoder_config(args: argparse.Namespace,
max_seq_len_source: int,
max_seq_len_target: int,
config_conv: Optional[encoder.ConvolutionalEmbeddingConfig]) -> Tuple[encoder.EncoderConfig,
int]:
"""
Create the encoder config.
:param args: Arguments as returned by argparse.
:param max_seq_len_source: Maximum source sequence length.
:param max_seq_len_target: Maximum target sequence length.
:param config_conv: The config for the convolutional encoder (optional).
:return: The encoder config and the number of hidden units of the encoder.
"""
encoder_num_layers, _ = args.num_layers
num_embed_source, _ = args.num_embed
config_encoder = None # type: Optional[Config]
if args.encoder in (C.TRANSFORMER_TYPE, C.TRANSFORMER_WITH_CONV_EMBED_TYPE):
encoder_transformer_preprocess, _ = args.transformer_preprocess
encoder_transformer_postprocess, _ = args.transformer_postprocess
encoder_transformer_model_size = args.transformer_model_size[0]
total_source_factor_size = sum(args.source_factors_num_embed)
if total_source_factor_size > 0:
logger.info("Encoder transformer-model-size adjusted to account source factor embeddings: %d -> %d" % (
encoder_transformer_model_size, num_embed_source + total_source_factor_size))
encoder_transformer_model_size = num_embed_source + total_source_factor_size
config_encoder = transformer.TransformerConfig(
model_size=encoder_transformer_model_size,
attention_heads=args.transformer_attention_heads[0],
feed_forward_num_hidden=args.transformer_feed_forward_num_hidden[0],
act_type=args.transformer_activation_type,
num_layers=encoder_num_layers,
dropout_attention=args.transformer_dropout_attention,
dropout_act=args.transformer_dropout_act,
dropout_prepost=args.transformer_dropout_prepost,
positional_embedding_type=args.transformer_positional_embedding_type,
preprocess_sequence=encoder_transformer_preprocess,
postprocess_sequence=encoder_transformer_postprocess,
max_seq_len_source=max_seq_len_source,
max_seq_len_target=max_seq_len_target,
conv_config=config_conv,
lhuc=args.lhuc is not None and (C.LHUC_ENCODER in args.lhuc or C.LHUC_ALL in args.lhuc))
encoder_num_hidden = encoder_transformer_model_size
elif args.encoder == C.CONVOLUTION_TYPE:
cnn_kernel_width_encoder, _ = args.cnn_kernel_width
cnn_config = convolution.ConvolutionConfig(kernel_width=cnn_kernel_width_encoder,
num_hidden=args.cnn_num_hidden,
act_type=args.cnn_activation_type,
weight_normalization=args.weight_normalization)
cnn_num_embed = num_embed_source + sum(args.source_factors_num_embed)
config_encoder = encoder.ConvolutionalEncoderConfig(num_embed=cnn_num_embed,
max_seq_len_source=max_seq_len_source,
cnn_config=cnn_config,
num_layers=encoder_num_layers,
positional_embedding_type=args.cnn_positional_embedding_type)
encoder_num_hidden = args.cnn_num_hidden
else:
encoder_rnn_dropout_inputs, _ = args.rnn_dropout_inputs
encoder_rnn_dropout_states, _ = args.rnn_dropout_states
encoder_rnn_dropout_recurrent, _ = args.rnn_dropout_recurrent
config_encoder = encoder.RecurrentEncoderConfig(
rnn_config=rnn.RNNConfig(cell_type=args.rnn_cell_type,
num_hidden=args.rnn_num_hidden,
num_layers=encoder_num_layers,
dropout_inputs=encoder_rnn_dropout_inputs,
dropout_states=encoder_rnn_dropout_states,
dropout_recurrent=encoder_rnn_dropout_recurrent,
residual=args.rnn_residual_connections,
first_residual_layer=args.rnn_first_residual_layer,
forget_bias=args.rnn_forget_bias,
lhuc=args.lhuc is not None and (C.LHUC_ENCODER in args.lhuc or C.LHUC_ALL in args.lhuc)),
conv_config=config_conv,
reverse_input=args.rnn_encoder_reverse_input)
encoder_num_hidden = args.rnn_num_hidden
return config_encoder, encoder_num_hidden
def create_decoder_config(args: argparse.Namespace, encoder_num_hidden: int,
max_seq_len_source: int, max_seq_len_target: int) -> decoder.DecoderConfig:
"""
Create the config for the decoder.
:param args: Arguments as returned by argparse.
:param encoder_num_hidden: Number of hidden units of the Encoder.
:param max_seq_len_source: Maximum source sequence length.
:param max_seq_len_target: Maximum target sequence length.
:return: The config for the decoder.
"""
_, decoder_num_layers = args.num_layers
_, num_embed_target = args.num_embed
config_decoder = None # type: Optional[Config]
if args.decoder == C.TRANSFORMER_TYPE:
_, decoder_transformer_preprocess = args.transformer_preprocess
_, decoder_transformer_postprocess = args.transformer_postprocess
config_decoder = transformer.TransformerConfig(
model_size=args.transformer_model_size[1],
attention_heads=args.transformer_attention_heads[1],
feed_forward_num_hidden=args.transformer_feed_forward_num_hidden[1],
act_type=args.transformer_activation_type,
num_layers=decoder_num_layers,
dropout_attention=args.transformer_dropout_attention,
dropout_act=args.transformer_dropout_act,
dropout_prepost=args.transformer_dropout_prepost,
positional_embedding_type=args.transformer_positional_embedding_type,
preprocess_sequence=decoder_transformer_preprocess,
postprocess_sequence=decoder_transformer_postprocess,
max_seq_len_source=max_seq_len_source,
max_seq_len_target=max_seq_len_target,
conv_config=None,
lhuc=args.lhuc is not None and (C.LHUC_DECODER in args.lhuc or C.LHUC_ALL in args.lhuc))
elif args.decoder == C.CONVOLUTION_TYPE:
_, cnn_kernel_width_decoder = args.cnn_kernel_width
convolution_config = convolution.ConvolutionConfig(kernel_width=cnn_kernel_width_decoder,
num_hidden=args.cnn_num_hidden,
act_type=args.cnn_activation_type,
weight_normalization=args.weight_normalization)
config_decoder = decoder.ConvolutionalDecoderConfig(cnn_config=convolution_config,
max_seq_len_target=max_seq_len_target,
num_embed=num_embed_target,
encoder_num_hidden=encoder_num_hidden,
num_layers=decoder_num_layers,
positional_embedding_type=args.cnn_positional_embedding_type,
project_qkv=args.cnn_project_qkv,
hidden_dropout=args.cnn_hidden_dropout)
else:
rnn_attention_num_hidden = args.rnn_num_hidden if args.rnn_attention_num_hidden is None else args.rnn_attention_num_hidden
config_coverage = None
if args.rnn_attention_type == C.ATT_COV:
config_coverage = coverage.CoverageConfig(type=args.rnn_attention_coverage_type,
num_hidden=args.rnn_attention_coverage_num_hidden,
layer_normalization=args.layer_normalization)
config_attention = rnn_attention.AttentionConfig(type=args.rnn_attention_type,
num_hidden=rnn_attention_num_hidden,
input_previous_word=args.rnn_attention_use_prev_word,
source_num_hidden=encoder_num_hidden,
query_num_hidden=args.rnn_num_hidden,
layer_normalization=args.layer_normalization,
config_coverage=config_coverage,
num_heads=args.rnn_attention_mhdot_heads,
is_scaled=args.rnn_scale_dot_attention)
_, decoder_rnn_dropout_inputs = args.rnn_dropout_inputs
_, decoder_rnn_dropout_states = args.rnn_dropout_states
_, decoder_rnn_dropout_recurrent = args.rnn_dropout_recurrent
config_decoder = decoder.RecurrentDecoderConfig(
max_seq_len_source=max_seq_len_source,
rnn_config=rnn.RNNConfig(cell_type=args.rnn_cell_type,
num_hidden=args.rnn_num_hidden,
num_layers=decoder_num_layers,
dropout_inputs=decoder_rnn_dropout_inputs,
dropout_states=decoder_rnn_dropout_states,
dropout_recurrent=decoder_rnn_dropout_recurrent,
residual=args.rnn_residual_connections,
first_residual_layer=args.rnn_first_residual_layer,
forget_bias=args.rnn_forget_bias,
lhuc=args.lhuc is not None and (C.LHUC_DECODER in args.lhuc or C.LHUC_ALL in args.lhuc)),
attention_config=config_attention,
hidden_dropout=args.rnn_decoder_hidden_dropout,
state_init=args.rnn_decoder_state_init,
context_gating=args.rnn_context_gating,
layer_normalization=args.layer_normalization,
attention_in_upper_layers=args.rnn_attention_in_upper_layers,
state_init_lhuc=args.lhuc is not None and (C.LHUC_STATE_INIT in args.lhuc or C.LHUC_ALL in args.lhuc),
enc_last_hidden_concat_to_embedding=args.rnn_enc_last_hidden_concat_to_embedding)
return config_decoder
def check_encoder_decoder_args(args) -> None:
"""
Check possible encoder-decoder argument conflicts.
:param args: Arguments as returned by argparse.
"""
encoder_embed_dropout, decoder_embed_dropout = args.embed_dropout
encoder_rnn_dropout_inputs, decoder_rnn_dropout_inputs = args.rnn_dropout_inputs
encoder_rnn_dropout_states, decoder_rnn_dropout_states = args.rnn_dropout_states
if encoder_embed_dropout > 0 and encoder_rnn_dropout_inputs > 0:
logger.warning("Setting encoder RNN AND source embedding dropout > 0 leads to "
"two dropout layers on top of each other.")
if decoder_embed_dropout > 0 and decoder_rnn_dropout_inputs > 0:
logger.warning("Setting encoder RNN AND source embedding dropout > 0 leads to "
"two dropout layers on top of each other.")
encoder_rnn_dropout_recurrent, decoder_rnn_dropout_recurrent = args.rnn_dropout_recurrent
if encoder_rnn_dropout_recurrent > 0 or decoder_rnn_dropout_recurrent > 0:
check_condition(args.rnn_cell_type == C.LSTM_TYPE,
"Recurrent dropout without memory loss only supported for LSTMs right now.")
def create_model_config(args: argparse.Namespace,
source_vocab_sizes: List[int],
target_vocab_size: int,
max_seq_len_source: int,
max_seq_len_target: int,
config_data: data_io.DataConfig) -> model.ModelConfig:
"""
Create a ModelConfig from the argument given in the command line.
:param args: Arguments as returned by argparse.
:param source_vocab_sizes: The size of the source vocabulary (and source factors).
:param target_vocab_size: The size of the target vocabulary.
:param max_seq_len_source: Maximum source sequence length.
:param max_seq_len_target: Maximum target sequence length.
:param config_data: Data config.
:return: The model configuration.
"""
num_embed_source, num_embed_target = args.num_embed
embed_dropout_source, embed_dropout_target = args.embed_dropout
source_vocab_size, *source_factor_vocab_sizes = source_vocab_sizes
check_encoder_decoder_args(args)
config_conv = None
if args.encoder == C.RNN_WITH_CONV_EMBED_NAME:
config_conv = encoder.ConvolutionalEmbeddingConfig(num_embed=num_embed_source,
max_filter_width=args.conv_embed_max_filter_width,
num_filters=args.conv_embed_num_filters,
pool_stride=args.conv_embed_pool_stride,
num_highway_layers=args.conv_embed_num_highway_layers,
dropout=args.conv_embed_dropout)
if args.encoder == C.TRANSFORMER_WITH_CONV_EMBED_TYPE:
config_conv = encoder.ConvolutionalEmbeddingConfig(num_embed=num_embed_source,
output_dim=num_embed_source,
max_filter_width=args.conv_embed_max_filter_width,
num_filters=args.conv_embed_num_filters,
pool_stride=args.conv_embed_pool_stride,
num_highway_layers=args.conv_embed_num_highway_layers,
dropout=args.conv_embed_dropout)
config_encoder, encoder_num_hidden = create_encoder_config(args, max_seq_len_source, max_seq_len_target,
config_conv)
config_decoder = create_decoder_config(args, encoder_num_hidden, max_seq_len_source, max_seq_len_target)
source_factor_configs = None
if len(source_vocab_sizes) > 1:
source_factor_configs = [encoder.FactorConfig(size, dim) for size, dim in zip(source_factor_vocab_sizes,
args.source_factors_num_embed)]
config_embed_source = encoder.EmbeddingConfig(vocab_size=source_vocab_size,
num_embed=num_embed_source,
dropout=embed_dropout_source,
factor_configs=source_factor_configs)
config_embed_target = encoder.EmbeddingConfig(vocab_size=target_vocab_size,
num_embed=num_embed_target,
dropout=embed_dropout_target)
config_loss = loss.LossConfig(name=args.loss,
vocab_size=target_vocab_size,
normalization_type=args.loss_normalization_type,
label_smoothing=args.label_smoothing)
model_config = model.ModelConfig(config_data=config_data,
vocab_source_size=source_vocab_size,
vocab_target_size=target_vocab_size,
config_embed_source=config_embed_source,
config_embed_target=config_embed_target,
config_encoder=config_encoder,
config_decoder=config_decoder,
config_loss=config_loss,
weight_tying=args.weight_tying,
weight_tying_type=args.weight_tying_type if args.weight_tying else None,
weight_normalization=args.weight_normalization,
lhuc=args.lhuc is not None)
return model_config
def create_training_model(config: model.ModelConfig,
context: List[mx.Context],
output_dir: str,
train_iter: data_io.BaseParallelSampleIter,
args: argparse.Namespace) -> training.TrainingModel:
"""
Create a training model and load the parameters from disk if needed.
:param config: The configuration for the model.
:param context: The context(s) to run on.
:param output_dir: Output folder.
:param train_iter: The training data iterator.
:param args: Arguments as returned by argparse.
:return: The training model.
"""
training_model = training.TrainingModel(config=config,
context=context,
output_dir=output_dir,
provide_data=train_iter.provide_data,
provide_label=train_iter.provide_label,
default_bucket_key=train_iter.default_bucket_key,
bucketing=not args.no_bucketing,
gradient_compression_params=gradient_compression_params(args),
fixed_param_names=args.fixed_param_names)
return training_model
def gradient_compression_params(args: argparse.Namespace) -> Optional[Dict[str, Any]]:
"""
:param args: Arguments as returned by argparse.
:return: Gradient compression parameters or None.
"""
if args.gradient_compression_type is None:
return None
else:
return {'type': args.gradient_compression_type, 'threshold': args.gradient_compression_threshold}
def create_optimizer_config(args: argparse.Namespace, source_vocab_sizes: List[int],
extra_initializers: List[Tuple[str, mx.initializer.Initializer]] = None) -> OptimizerConfig:
"""
Returns an OptimizerConfig.
:param args: Arguments as returned by argparse.
:param source_vocab_sizes: Source vocabulary sizes.
:param extra_initializers: extra initializer to pass to `get_initializer`.
:return: The optimizer type and its parameters as well as the kvstore.
"""
optimizer_params = {'wd': args.weight_decay,
"learning_rate": args.initial_learning_rate}
gradient_clipping_threshold = none_if_negative(args.gradient_clipping_threshold)
if gradient_clipping_threshold is None:
logger.info("Gradient clipping threshold set to negative value. Will not perform gradient clipping.")
gradient_clipping_type = C.GRADIENT_CLIPPING_TYPE_NONE
else:
gradient_clipping_type = args.gradient_clipping_type
# Note: for 'abs' we use the implementation inside of MXNet's optimizer and 'norm_*' we implement ourselves
# inside the TrainingModel.
if gradient_clipping_threshold is not None and gradient_clipping_type == C.GRADIENT_CLIPPING_TYPE_ABS:
optimizer_params["clip_gradient"] = gradient_clipping_threshold
if args.momentum is not None:
optimizer_params["momentum"] = args.momentum
if args.loss_normalization_type == C.LOSS_NORM_VALID:
# When we normalize by the number of non-PAD symbols in a batch we need to disable rescale_grad.
optimizer_params["rescale_grad"] = 1.0
elif args.loss_normalization_type == C.LOSS_NORM_BATCH:
# Making MXNet module API's default scaling factor explicit
optimizer_params["rescale_grad"] = 1.0 / args.batch_size
# Manually specified params
if args.optimizer_params:
optimizer_params.update(args.optimizer_params)
weight_init = initializer.get_initializer(default_init_type=args.weight_init,
default_init_scale=args.weight_init_scale,
default_init_xavier_rand_type=args.weight_init_xavier_rand_type,
default_init_xavier_factor_type=args.weight_init_xavier_factor_type,
embed_init_type=args.embed_weight_init,
embed_init_sigma=source_vocab_sizes[0] ** -0.5,
rnn_init_type=args.rnn_h2h_init,
extra_initializers=extra_initializers)
lr_sched = lr_scheduler.get_lr_scheduler(args.learning_rate_scheduler_type,
args.checkpoint_frequency,
none_if_negative(args.learning_rate_half_life),
args.learning_rate_reduce_factor,
args.learning_rate_reduce_num_not_improved,
args.learning_rate_schedule,
args.learning_rate_warmup)
config = OptimizerConfig(name=args.optimizer,
params=optimizer_params,
kvstore=args.kvstore,
initializer=weight_init,
gradient_clipping_type=gradient_clipping_type,
gradient_clipping_threshold=gradient_clipping_threshold)
config.set_lr_scheduler(lr_sched)
logger.info("Optimizer: %s", config)
logger.info("Gradient Compression: %s", gradient_compression_params(args))
return config
def main():
params = arguments.ConfigArgumentParser(description='Train Sockeye sequence-to-sequence models.')
arguments.add_train_cli_args(params)
args = params.parse_args()
train(args)
def train(args: argparse.Namespace):
if args.dry_run:
# Modify arguments so that we write to a temporary directory and
# perform 0 training iterations
temp_dir = tempfile.TemporaryDirectory() # Will be automatically removed
args.output = temp_dir.name
args.max_updates = 0
utils.seedRNGs(args.seed)
check_arg_compatibility(args)
output_folder = os.path.abspath(args.output)
resume_training = check_resume(args, output_folder)
global logger
logger = setup_main_logger(__name__,
file_logging=True,
console=not args.quiet, path=os.path.join(output_folder, C.LOG_NAME))
utils.log_basic_info(args)
arguments.save_args(args, os.path.join(output_folder, C.ARGS_STATE_NAME))
max_seq_len_source, max_seq_len_target = args.max_seq_len
# The maximum length is the length before we add the BOS/EOS symbols
max_seq_len_source = max_seq_len_source + C.SPACE_FOR_XOS
max_seq_len_target = max_seq_len_target + C.SPACE_FOR_XOS
logger.info("Adjusting maximum length to reserve space for a BOS/EOS marker. New maximum length: (%d, %d)",
max_seq_len_source, max_seq_len_target)
with ExitStack() as exit_stack:
context = determine_context(args, exit_stack)
train_iter, eval_iter, config_data, source_vocabs, target_vocab = create_data_iters_and_vocabs(
args=args,
max_seq_len_source=max_seq_len_source,
max_seq_len_target=max_seq_len_target,
shared_vocab=use_shared_vocab(args),
resume_training=resume_training,
output_folder=output_folder)
max_seq_len_source = config_data.max_seq_len_source
max_seq_len_target = config_data.max_seq_len_target
# Dump the vocabularies if we're just starting up
if not resume_training:
vocab.save_source_vocabs(source_vocabs, output_folder)
vocab.save_target_vocab(target_vocab, output_folder)
source_vocab_sizes = [len(v) for v in source_vocabs]
target_vocab_size = len(target_vocab)
logger.info('Vocabulary sizes: source=[%s] target=%d',
'|'.join([str(size) for size in source_vocab_sizes]),
target_vocab_size)
model_config = create_model_config(args=args,
source_vocab_sizes=source_vocab_sizes, target_vocab_size=target_vocab_size,
max_seq_len_source=max_seq_len_source, max_seq_len_target=max_seq_len_target,
config_data=config_data)
model_config.freeze()
training_model = create_training_model(config=model_config,
context=context,
output_dir=output_folder,
train_iter=train_iter,
args=args)
# Handle options that override training settings
min_updates = args.min_updates
max_updates = args.max_updates
min_samples = args.min_samples
max_samples = args.max_samples
max_num_checkpoint_not_improved = args.max_num_checkpoint_not_improved
min_epochs = args.min_num_epochs
max_epochs = args.max_num_epochs
if min_epochs is not None and max_epochs is not None:
check_condition(min_epochs <= max_epochs,
"Minimum number of epochs must be smaller than maximum number of epochs")
# Fixed training schedule always runs for a set number of updates
if args.learning_rate_schedule:
min_updates = None
max_updates = sum(num_updates for (_, num_updates) in args.learning_rate_schedule)
max_num_checkpoint_not_improved = -1
min_samples = None
max_samples = None
min_epochs = None
max_epochs = None
trainer = training.EarlyStoppingTrainer(model=training_model,
optimizer_config=create_optimizer_config(args, source_vocab_sizes),
max_params_files_to_keep=args.keep_last_params,
source_vocabs=source_vocabs,
target_vocab=target_vocab)
trainer.fit(train_iter=train_iter,
validation_iter=eval_iter,
early_stopping_metric=args.optimized_metric,
metrics=args.metrics,
checkpoint_frequency=args.checkpoint_frequency,
max_num_not_improved=max_num_checkpoint_not_improved,
min_samples=min_samples,
max_samples=max_samples,
min_updates=min_updates,
max_updates=max_updates,
min_epochs=min_epochs,
max_epochs=max_epochs,
lr_decay_param_reset=args.learning_rate_decay_param_reset,
lr_decay_opt_states_reset=args.learning_rate_decay_optimizer_states_reset,
decoder=create_checkpoint_decoder(args, exit_stack, context),
mxmonitor_pattern=args.monitor_pattern,
mxmonitor_stat_func=args.monitor_stat_func,
allow_missing_parameters=args.allow_missing_params or model_config.lhuc,
existing_parameters=args.params)
if __name__ == "__main__":
main()
| {
"content_hash": "7b60f123d593ff5a47a611e66886e8b4",
"timestamp": "",
"source": "github",
"line_count": 867,
"max_line_length": 130,
"avg_line_length": 53.5121107266436,
"alnum_prop": 0.5802564931565901,
"repo_name": "mlperf/training_results_v0.6",
"id": "85393915011ea3f0e24ba23be15808e7c3cbe442",
"size": "46961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Fujitsu/benchmarks/resnet/implementations/mxnet/sockeye/sockeye/train.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13941"
},
{
"name": "C",
"bytes": "208630"
},
{
"name": "C++",
"bytes": "10999411"
},
{
"name": "CMake",
"bytes": "129712"
},
{
"name": "CSS",
"bytes": "64767"
},
{
"name": "Clojure",
"bytes": "396764"
},
{
"name": "Cuda",
"bytes": "2272433"
},
{
"name": "Dockerfile",
"bytes": "67820"
},
{
"name": "Groovy",
"bytes": "62557"
},
{
"name": "HTML",
"bytes": "19753082"
},
{
"name": "Java",
"bytes": "166294"
},
{
"name": "JavaScript",
"bytes": "71846"
},
{
"name": "Julia",
"bytes": "408765"
},
{
"name": "Jupyter Notebook",
"bytes": "2713169"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "115694"
},
{
"name": "Perl",
"bytes": "1535873"
},
{
"name": "Perl 6",
"bytes": "7280"
},
{
"name": "PowerShell",
"bytes": "6150"
},
{
"name": "Python",
"bytes": "24905683"
},
{
"name": "R",
"bytes": "351865"
},
{
"name": "Roff",
"bytes": "293052"
},
{
"name": "Scala",
"bytes": "1189019"
},
{
"name": "Shell",
"bytes": "794096"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "TypeScript",
"bytes": "361164"
}
],
"symlink_target": ""
} |
__revision__ = "test/python-version.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify the behavior of our check for unsupported or deprecated versions
of Python.
"""
import os
import re
import TestCmd
import TestSCons
test = TestSCons.TestSCons(match = TestCmd.match_re_dotall,ignore_python_version=0)
test.write('SConstruct', "\n")
test.write('SetOption-deprecated', "SetOption('warn', 'no-deprecated')\n")
test.write('SetOption-python', "SetOption('warn', ['no-python-version'])\n")
if TestSCons.unsupported_python_version():
error = "scons: \*\*\* SCons version \S+ does not run under Python version %s."
error = error % re.escape(TestSCons.python_version_string()) + "\n"
test.run(arguments = '-Q', status = 1, stderr = error)
else:
if TestSCons.deprecated_python_version():
test.run(arguments = '-Q', stderr = TestSCons.deprecated_python_expr)
else:
test.run(arguments = '-Q')
test.run(arguments = '-Q --warn=no-deprecated')
test.run(arguments = '-f SetOption-deprecated -Q')
test.run(arguments = '-f SetOption-python -Q')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "6230c47fb92261af48e2b9b5b3e3272d",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 96,
"avg_line_length": 24.84,
"alnum_prop": 0.6843800322061192,
"repo_name": "EmanueleCannizzaro/scons",
"id": "a1b19540741c4c6214c641c573c0a83b802e4b63",
"size": "2377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/python-version.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2491"
},
{
"name": "C",
"bytes": "659"
},
{
"name": "C++",
"bytes": "598"
},
{
"name": "CSS",
"bytes": "18502"
},
{
"name": "D",
"bytes": "1997"
},
{
"name": "HTML",
"bytes": "817651"
},
{
"name": "Java",
"bytes": "6860"
},
{
"name": "JavaScript",
"bytes": "215495"
},
{
"name": "Makefile",
"bytes": "3795"
},
{
"name": "Perl",
"bytes": "29978"
},
{
"name": "Python",
"bytes": "7510453"
},
{
"name": "Roff",
"bytes": "556545"
},
{
"name": "Ruby",
"bytes": "11074"
},
{
"name": "Shell",
"bytes": "52682"
},
{
"name": "XSLT",
"bytes": "7567242"
}
],
"symlink_target": ""
} |
__author__ = 'yuens'
################################### PART1 IMPORT ######################################
import MySQLdb
import logging
import time
################################### PART2 CLASS && FUNCTION ###########################
class CreateDatabaseTable(object):
def __init__(self):
self.start = time.clock()
logging.basicConfig(level = logging.INFO,
format = '%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s',
datefmt = '%y-%m-%d %H:%M:%S',
filename = './spider.log',
filemode = 'a')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.info("START CLASS {class_name}.".format(class_name = CreateDatabaseTable.__name__))
try:
self.con = MySQLdb.connect(host='localhost', user='root', passwd='931209', charset='utf8')
logging.info("Success in connecting MySQL.")
except MySQLdb.Error, e:
logging.error("Fail in connecting MySQL.")
logging.error("MySQL Error {error_num}: {error_info}.".format(error_num = e.args[0], error_info = e.args[1]))
def __del__(self):
try:
self.con.close()
logging.info("Success in quiting MySQL.")
except MySQLdb.Error, e:
self.con.rollback()
logging.error("Fail in quiting MySQL.")
logging.error("MySQL Error {error_num}: {error_info}.".format(error_num = e.args[0], error_info = e.args[1]))
logging.info("END CLASS {class_name}.".format(class_name = CreateDatabaseTable.__name__))
self.end = time.clock()
logging.info("The class {class_name} run time is : {delta_time} seconds".format(class_name = CreateDatabaseTable.__name__, delta_time = self.end - self.start))
def create_database(self, database_name):
logging.info("database name: {database_name}".format(database_name = database_name))
cursor = self.con.cursor()
sqls = ['SET NAMES UTF8', 'SELECT VERSION()', "CREATE DATABASE {database_name}".format(database_name = database_name)]
for sql_idx in xrange(len(sqls)):
sql = sqls[sql_idx]
try:
cursor.execute(sql)
if sql_idx == 1:
result = cursor.fetchall()[0]
mysql_version = result[0]
logging.info("MySQL VERSION: {mysql_version}".format(mysql_version = mysql_version))
self.con.commit()
logging.info("Success in creating database {database_name}.".format(database_name = database_name))
except MySQLdb.Error, e:
self.con.rollback()
logging.error("Fail in creating database {database_name}.".format(database_name = database_name))
logging.error("MySQL Error {error_num}: {error_info}.".format(error_num = e.args[0], error_info = e.args[1]))
cursor.close()
def create_table(self, database_name, link_table_name, page_table_name):
cursor = self.con.cursor()
sqls = ["USE {database_name}".format(database_name = database_name), 'SET NAMES UTF8']
sqls.append("ALTER DATABASE {database_name} DEFAULT CHARACTER SET 'utf8'".format(database_name = database_name))
# Create node_table_name
sqls.append("""CREATE TABLE IF NOT EXISTS {page_table_name}(
page_id INT(11) AUTO_INCREMENT PRIMARY KEY,
page_title VARCHAR(200),
page_url VARCHAR(200),
page_html_doc TEXT,
page_content TEXT,
page_link_num INT(11),
UNIQUE (page_url))""".format(page_table_name = page_table_name))
#sqls.append("""CREATE INDEX page_id_idx ON {page_table_name}(page_id)""".format(page_table_name = page_table_name))
# Create connection_table_name
sqls.append("""CREATE TABLE IF NOT EXISTS {link_table_name}(
link_id INT(11) AUTO_INCREMENT PRIMARY KEY,
page1_url VARCHAR(200),
page1_id INT(11),
page1_title VARCHAR(200),
page2_url VARCHAR(200),
page2_id INT(11),
page2_title VARCHAR(200),
UNIQUE (link_id),
CONSTRAINT link_record_id UNIQUE (page1_url, page2_url))""".format(link_table_name = link_table_name))
#sqls.append("""CREATE INDEX link_id_idx ON {link_table_name}(link_id)""".format(link_table_name = link_table_name))
for sql_idx in range(len(sqls)):
sql = sqls[sql_idx]
try:
cursor.execute(sql)
self.con.commit()
logging.info("Success in creating table.")
except MySQLdb.Error, e:
self.con.rollback()
logging.error("Fail in creating table.")
logging.error("MySQL Error {error_num}: {error_info}.".format(error_num = e.args[0], error_info = e.args[1]))
logging.error("Error SQL:{sql}".format(sql = sql))
cursor.close()
################################### PART3 CLASS TEST ##################################
"""
# initial parameters
database_name = "WebDB"
page_table_name = "page_table"
link_table_name = "link_table"
Creater = CreateDatabaseTable()
Creater.create_database(database_name = database_name)
Creater.create_table(database_name = database_name,\
link_table_name = link_table_name,\
page_table_name = page_table_name)
""" | {
"content_hash": "df534974c715d8b17c1ecc2d79cd0211",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 167,
"avg_line_length": 46.38167938931298,
"alnum_prop": 0.5380184331797235,
"repo_name": "ysh329/spider",
"id": "d929e45c4e66fb7854890a66b4960e63d9c75999",
"size": "6354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spider/class_create_database_table.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38228"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.synapse import SynapseManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-synapse
# USAGE
python get_sql_pool_metadata_sync_config.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = SynapseManagementClient(
credential=DefaultAzureCredential(),
subscription_id="01234567-89ab-4def-0123-456789abcdef",
)
response = client.sql_pool_metadata_sync_configs.get(
resource_group_name="ExampleResourceGroup",
workspace_name="ExampleWorkspace",
sql_pool_name="ExampleSqlPool",
)
print(response)
# x-ms-original-file: specification/synapse/resource-manager/Microsoft.Synapse/stable/2021-06-01/examples/GetSqlPoolMetadataSyncConfig.json
if __name__ == "__main__":
main()
| {
"content_hash": "297fa0cb2f8b8fd2ca13409f46109487",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 139,
"avg_line_length": 34.73529411764706,
"alnum_prop": 0.7366638441998307,
"repo_name": "Azure/azure-sdk-for-python",
"id": "b7033687e925989b197446f6a05b23129becfb71",
"size": "1649",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/synapse/azure-mgmt-synapse/generated_samples/get_sql_pool_metadata_sync_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.insert(0, parent)
import grinderpy
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'grinderpy'
copyright = u'2015, Jakub Jarosz'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = grinderpy.__version__
# The full version, including alpha/beta/rc tags.
release = grinderpy.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'grinderpydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'grinderpy.tex', u'grinderpy Documentation',
u'Jakub Jarosz', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'grinderpy', u'grinderpy Documentation',
[u'Jakub Jarosz'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'grinderpy', u'grinderpy Documentation',
u'Jakub Jarosz', 'grinderpy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "64b6cafe409cb82d1827afd02a585b59",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 80,
"avg_line_length": 31.925311203319502,
"alnum_prop": 0.7039251364699766,
"repo_name": "qba73/grinderpy",
"id": "2d551fbcf356c0fde84feb6bddbc64d795750191",
"size": "8115",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1305"
},
{
"name": "Python",
"bytes": "2114"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Clients',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('iban', models.CharField(max_length=34)),
('country', models.CharField(blank=True, max_length=10, null=True)),
],
),
]
| {
"content_hash": "e2da7ce41b2f41893826082a3ab907f8",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 114,
"avg_line_length": 28.916666666666668,
"alnum_prop": 0.5662824207492796,
"repo_name": "rexhepberlajolli/RHChallenge",
"id": "930d73f92b8b860f52f09689c3b354ecf6a45e0a",
"size": "767",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/user_administration/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "8650"
},
{
"name": "Python",
"bytes": "22125"
}
],
"symlink_target": ""
} |
from datetime import timedelta
from icalendar import Calendar as ICalendar
import requests
from .models import EventLocation, Event, OccurringRule
from .utils import extract_date_or_datetime
DATE_RESOLUTION = timedelta(1)
TIME_RESOLUTION = timedelta(0, 0, 1)
class ICSImporter:
def __init__(self, calendar):
self.calendar = calendar
def import_occurrence(self, event, event_data):
# Django will already convert to datetime by setting the time to 0:00,
# but won't add any timezone information. We will convert them to
# aware datetime objects manually.
dt_start = extract_date_or_datetime(event_data['DTSTART'].dt)
dt_end = extract_date_or_datetime(event_data['DTEND'].dt)
# Let's mark those occurrences as 'all-day'.
all_day = (
dt_start.resolution == DATE_RESOLUTION or
dt_end.resolution == DATE_RESOLUTION
)
defaults = {
'dt_start': dt_start,
'dt_end': dt_end - timedelta(days=1) if all_day else dt_end,
'all_day': all_day
}
OccurringRule.objects.update_or_create(event=event, defaults=defaults)
def import_event(self, event_data):
uid = event_data['UID']
title = event_data['SUMMARY']
description = event_data['DESCRIPTION']
location, _ = EventLocation.objects.get_or_create(
calendar=self.calendar,
name=event_data['LOCATION']
)
defaults = {
'title': title,
'description': description,
'description_markup_type': 'html',
'venue': location,
'calendar': self.calendar,
}
event, _ = Event.objects.update_or_create(uid=uid, defaults=defaults)
self.import_occurrence(event, event_data)
def fetch(self, url):
response = requests.get(url)
return response.content
def import_events(self, url=None):
if url is None:
url = self.calendar.url
ical = self.fetch(url)
return self.import_events_from_text(ical)
def get_events(self, ical):
ical = ICalendar.from_ical(ical)
return ical.walk('VEVENT')
def import_events_from_text(self, ical):
events = self.get_events(ical)
for event in events:
self.import_event(event)
| {
"content_hash": "e3f9d2b51efbe2337713970961cbc909",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 78,
"avg_line_length": 32.72222222222222,
"alnum_prop": 0.6103565365025467,
"repo_name": "proevo/pythondotorg",
"id": "847394fa04691ed63194c123e99040f63f1a8fa5",
"size": "2356",
"binary": false,
"copies": "3",
"ref": "refs/heads/dependabot/pip/django-allauth-0.51.0",
"path": "events/importer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "711916"
},
{
"name": "JavaScript",
"bytes": "314514"
},
{
"name": "Makefile",
"bytes": "6811"
},
{
"name": "Python",
"bytes": "1448691"
},
{
"name": "Ruby",
"bytes": "218314"
},
{
"name": "Shell",
"bytes": "6730"
}
],
"symlink_target": ""
} |
"""Workflow execution engines."""
from resolwe.flow.engine import BaseEngine
class BaseExecutionEngine(BaseEngine):
"""A workflow execution engine."""
def evaluate(self, data):
"""Return the code needed to compute a given Data object."""
raise NotImplementedError
def get_expression_engine(self, name):
"""Return an expression engine by its name."""
return self.manager.get_expression_engine(name)
def get_output_schema(self, process):
"""Return any additional output schema for the process."""
return []
def discover_process(self, path):
"""Perform process discovery in given path.
This method will be called during process registration and
should return a list of dictionaries with discovered process
schemas.
"""
return []
| {
"content_hash": "18b580f95db695981a5b58a09174c8d7",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 68,
"avg_line_length": 31.444444444444443,
"alnum_prop": 0.6631330977620731,
"repo_name": "genialis/resolwe",
"id": "8da5fc63c798aa66ccb89a13fe7428cf8b0112cb",
"size": "849",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resolwe/flow/execution_engines/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PLpgSQL",
"bytes": "21533"
},
{
"name": "Python",
"bytes": "1813118"
},
{
"name": "Shell",
"bytes": "6244"
}
],
"symlink_target": ""
} |
from django.test import RequestFactory
from test_plus.test import TestCase
from ..views import (
ComparisonCreateView,
ComparisonUpdateView,
ComparisonDetailView,
ComparisonListView,
ComparisonItemCreateView
)
class BaseComparisonTestCase(TestCase):
def setUp(self):
self.user = self.make_user()
self.factory = RequestFactory()
| {
"content_hash": "33a96dad63534bfb289ca9d5650cf4d9",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 39,
"avg_line_length": 23.1875,
"alnum_prop": 0.7358490566037735,
"repo_name": "EricZaporzan/compare",
"id": "84b95353e7a0136de96640b52664bf82d8141903",
"size": "371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compare/comparisons/tests/test_views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2677"
},
{
"name": "HTML",
"bytes": "29381"
},
{
"name": "JavaScript",
"bytes": "3142"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "64298"
},
{
"name": "Shell",
"bytes": "5317"
}
],
"symlink_target": ""
} |
"""
Copyright 2016 Jeffrey D. Walter
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS ISBASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# 14 Sep 2016, Len Shustek: Added Logout()
# 17 Jul 2017, Andreas Jakl: Port to Python 3 (https://www.andreasjakl.com/using-netgear-arlo-security-cameras-for-periodic-recording/)
# Import helper classes that are part of this library.
try:
import Queue as queue
except ImportError:
import queue as queue
from request import Request
from eventstream import EventStream
# Import all of the other stuff.
from six import string_types, text_type
from datetime import datetime
import sys
import base64
import calendar
import json
#import logging
import math
import os
import pickle
import random
import re
import requests
import signal
import time
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
#logging.basicConfig(level=logging.DEBUG,format='[%(levelname)s] (%(threadName)-10s) %(message)s',)
class Arlo(object):
BASE_URL = 'my.arlo.com'
AUTH_URL = 'ocapi-app.arlo.com'
TRANSID_PREFIX = 'web'
def __init__(self, username, password, google_credential_file=None):
# signals only work in main thread
try:
signal.signal(signal.SIGINT, self.interrupt_handler)
except:
pass
self.event_stream = None
self.request = None
if google_credential_file:
self.LoginMFA(username, password, google_credential_file)
else:
self.Login(username, password)
def interrupt_handler(self, signum, frame):
print("Caught Ctrl-C, exiting.")
os._exit(1)
def to_timestamp(self, dt):
if sys.version[0] == '2':
epoch = datetime.utcfromtimestamp(0)
return int((dt - epoch).total_seconds() * 1e3)
else:
return int(dt.timestamp() * 1e3)
def genTransId(self, trans_type=TRANSID_PREFIX):
def float2hex(f):
MAXHEXADECIMALS = 15
w = f // 1
d = f % 1
# Do the whole:
if w == 0: result = '0'
else: result = ''
while w:
w, r = divmod(w, 16)
r = int(r)
if r > 9: r = chr(r+55)
else: r = str(r)
result = r + result
# And now the part:
if d == 0: return result
result += '.'
count = 0
while d:
d = d * 16
w, d = divmod(d, 1)
w = int(w)
if w > 9: w = chr(w+55)
else: w = str(w)
result += w
count += 1
if count > MAXHEXADECIMALS: break
return result
now = datetime.today()
return trans_type+"!" + float2hex(random.random() * math.pow(2, 32)).lower() + "!" + str(int((time.mktime(now.timetuple())*1e3 + now.microsecond/1e3)))
def Login(self, username, password):
"""
This call returns the following:
{
"userId":"XXX-XXXXXXX",
"email":"user@example.com",
"token":"2_5HicFJMXXXXX-S_7IuK2EqOUHXXXXXXXXXXX1CXKWTThgU18Va_XXXXXX5S00hUafv3PV_if_Bl_rhiFsDHYwhxI3CxlVnR5f3q2XXXXXX-Wnt9F7D82uN1f4cXXXXX-FMUsWF_6tMBqwn6DpzOaIB7ciJrnr2QJyKewbQouGM6",
"paymentId":"XXXXXXXX",
"authenticated":1472961381,
"accountStatus":"registered",
"serialNumber":"XXXXXXXXXXXXX",
"countryCode":"US",
"tocUpdate":false,
"policyUpdate":false,
"validEmail":true
}
"""
self.username = username
self.password = password
self.request = Request()
headers = {
'Access-Control-Request-Headers': 'content-type,source,x-user-device-id,x-user-device-name,x-user-device-type',
'Access-Control-Request-Method': 'POST',
'Origin': f'https://{self.BASE_URL}',
'Referer': f'https://{self.BASE_URL}/',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_1_2 like Mac OS X) AppleWebKit/604.3.5 (KHTML, like Gecko) Mobile/15B202 NETGEAR/v1 (iOS Vuezone)',
}
self.request.options(f'https://{self.AUTH_URL}/api/auth', headers=headers)
headers = {
'DNT': '1',
'schemaVersion': '1',
'Auth-Version': '2',
'Content-Type': 'application/json; charset=UTF-8',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_1_2 like Mac OS X) AppleWebKit/604.3.5 (KHTML, like Gecko) Mobile/15B202 NETGEAR/v1 (iOS Vuezone)',
'Origin': f'https://{self.BASE_URL}',
'Referer': f'https://{self.BASE_URL}/',
'Source': 'arloCamWeb',
}
#body = self.request.post(f'https://{self.BASE_URL}/hmsweb/login/v2', {'email': self.username, 'password': self.password}, headers=headers)
body = self.request.post(
f'https://{self.AUTH_URL}/api/auth',
params={
'email': self.username,
'password': str(base64.b64encode(self.password.encode('utf-8')), 'utf-8'),
'language': 'en',
'EnvSource': 'prod'
},
headers=headers
)
headers['Authorization'] = body['token']
self.request.session.headers.update(headers)
self.user_id = body['userId']
return body
def LoginMFA(self, username, password, google_credential_file):
self.username = username
self.password = password
self.google_credentials = pickle.load(open(google_credential_file, 'rb'))
self.request = Request()
# request MFA token
request_start_time = int(time.time())
headers = {
'DNT': '1',
'schemaVersion': '1',
'Auth-Version': '2',
'Content-Type': 'application/json; charset=UTF-8',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_1_2 like Mac OS X) AppleWebKit/604.3.5 (KHTML, like Gecko) Mobile/15B202 NETGEAR/v1 (iOS Vuezone)',
'Origin': f'https://{self.BASE_URL}',
'Referer': f'https://{self.BASE_URL}/',
'Source': 'arloCamWeb',
'TE': 'Trailers',
}
# Authenticate
auth_body = self.request.post(
f'https://{self.AUTH_URL}/api/auth',
params={
'email': self.username,
'password': str(base64.b64encode(self.password.encode('utf-8')), 'utf-8'),
'language': 'en',
'EnvSource': 'prod'
},
headers=headers,
raw=True
)
self.user_id = auth_body['data']['userId']
self.request.session.headers.update({'Authorization': base64.b64encode(auth_body['data']['token'].encode('utf-8'))})
# Retrieve email factor id
factors_body = self.request.get(
f'https://{self.AUTH_URL}/api/getFactors',
params={'data': auth_body['data']['issued']},
headers=headers,
raw=True
)
email_factor_id = next(i for i in factors_body['data']['items'] if i['factorType'] == 'EMAIL' and i['factorRole'] == "PRIMARY")['factorId']
if email_factor_id is None:
email_factor_id = next(i for i in factors_body['data']['items'] if i['factorType'] == 'EMAIL' and i['factorRole'] == "SECONDARY")['factorId']
# Start factor auth
start_auth_body = self.request.post(
f'https://{self.AUTH_URL}/api/startAuth',
{'factorId': email_factor_id},
headers=headers,
raw=True
)
factor_auth_code = start_auth_body['data']['factorAuthCode']
# search for MFA token in latest emails
pattern = r'\d{6}'
code = None
service = build('gmail', 'v1', credentials = self.google_credentials)
for i in range(0, 10):
time.sleep(5)
messages = service.users().messages().list(
userId='me',
q=f'from:do_not_reply@arlo.com after:{request_start_time}'
).execute()
if messages['resultSizeEstimate'] == 0:
print('no matching emails found')
continue
# only check the latest message
message = service.users().messages().get(userId='me', id=messages['messages'][0]['id']).execute()
search = re.search(pattern, message['snippet'])
if not search:
print('no matching code in email found')
continue
code = search.group(0)
break
"""
code = input("Enter MFA code:\n")
print("CODE", factor_auth_code)
"""
# Complete auth
finish_auth_body = self.request.post(
f'https://{self.AUTH_URL}/api/finishAuth',
{
'factorAuthCode': factor_auth_code,
'otp': code
},
headers=headers,
raw=True
)
# Update Authorization code with new code
headers = {
'Auth-Version': '2',
'Authorization': finish_auth_body['data']['token'].encode('utf-8'),
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_1_2 like Mac OS X) AppleWebKit/604.3.5 (KHTML, like Gecko) Mobile/15B202 NETGEAR/v1 (iOS Vuezone)',
}
self.request.session.headers.update(headers)
self.BASE_URL = 'myapi.arlo.com'
def Logout(self):
self.Unsubscribe()
return self.request.put(f'https://{self.BASE_URL}/hmsweb/logout')
def Subscribe(self, basestation):
"""
Arlo uses the EventStream interface in the browser to do pub/sub style messaging.
Unfortunately, this appears to be the only way Arlo communicates these messages.
This function makes the initial GET request to /subscribe, which returns the EventStream socket.
Once we have that socket, the API requires a POST request to /notify with the "subscriptionsresource.
This call "registersthe device (which should be the basestation) so that events will be sent to the EventStream
when subsequent calls to /notify are made.
Since this interface is asynchronous, and this is a quick and dirty hack to get this working, I'm using a thread
to listen to the EventStream. This thread puts events into a queue. Some polling is required (see NotifyAndGetResponse()) because
the event messages aren't guaranteed to be delivered in any specific order, but I wanted to maintain a synchronous style API.
You generally shouldn't need to call Subscribe() directly, although I'm leaving it "publicfor now.
"""
basestation_id = basestation.get('deviceId')
def Register(self):
if self.event_stream and self.event_stream.connected and not self.event_stream.registered:
self.Notify(basestation, {"action":"set","resource":"subscriptions/"+self.user_id+"_web","publishResponse":False,"properties":{"devices":[basestation_id]}})
event = self.event_stream.Get()
if event is None or self.event_stream.event_stream_stop_event.is_set():
return None
elif event:
self.event_stream.Register()
return event
def QueueEvents(self, event_stream, stop_event):
for event in event_stream:
if event is None or stop_event.is_set():
return None
response = json.loads(event.data)
if self.event_stream and self.event_stream.connected:
if response.get('action') == 'logout':
self.event_stream.Disconnect()
return None
else:
self.event_stream.queue.put(response)
elif response.get('status') == 'connected':
self.event_stream.Connect()
def Heartbeat(self, stop_event):
while not stop_event.wait(30.0):
try:
self.Ping(basestation)
except:
pass
if not self.event_stream or not self.event_stream.connected:
self.event_stream = EventStream(QueueEvents, Heartbeat, args=(self, ))
self.event_stream.Start()
while not self.event_stream.connected and not self.event_stream.event_stream_stop_event.is_set():
time.sleep(0.5)
if not self.event_stream.registered:
Register(self)
def Unsubscribe(self):
""" This method stops the EventStream subscription and removes it from the event_stream collection. """
if self.event_stream and self.event_stream.connected:
self.request.get(f'https://{self.BASE_URL}/hmsweb/client/unsubscribe')
self.event_stream.Disconnect()
self.event_stream = None
def Notify(self, basestation, body):
"""
The following are examples of the json you would need to pass in the body of the Notify() call to interact with Arlo:
##############################################################################################################################
##############################################################################################################################
NOTE: While you can call Notify() directly, responses from these notify calls are sent to the EventStream (see Subscribe()),
and so it's better to use the Get/Set methods that are implemented using the NotifyAndGetResponse() method.
##############################################################################################################################
##############################################################################################################################
Set System Mode (Armed, Disarmed) - {"from":"XXX-XXXXXXX_web","to":"XXXXXXXXXXXXX","action":"set","resource":"modes","transId":"web!XXXXXXXX.XXXXXXXXXXXXXXXXXXXX","publishResponse":true,"properties":{"active":"mode0"}}
Set System Mode (Calendar) - {"from":"XXX-XXXXXXX_web","to":"XXXXXXXXXXXXX","action":"set","resource":"schedule","transId":"web!XXXXXXXX.XXXXXXXXXXXXXXXXXXXX","publishResponse":true,"properties":{"active":true}}
Configure The Schedule (Calendar) - {"from":"XXX-XXXXXXX_web","to":"XXXXXXXXXXXXX","action":"set","resource":"schedule","transId":"web!XXXXXXXX.XXXXXXXXXXXXXXXXXXXX","publishResponse":true,"properties":{"schedule":[{"modeId":"mode0","startTime":0},{"modeId":"mode2","startTime":28800000},{"modeId":"mode0","startTime":64800000},{"modeId":"mode0","startTime":86400000},{"modeId":"mode2","startTime":115200000},{"modeId":"mode0","startTime":151200000},{"modeId":"mode0","startTime":172800000},{"modeId":"mode2","startTime":201600000},{"modeId":"mode0","startTime":237600000},{"modeId":"mode0","startTime":259200000},{"modeId":"mode2","startTime":288000000},{"modeId":"mode0","startTime":324000000},{"modeId":"mode0","startTime":345600000},{"modeId":"mode2","startTime":374400000},{"modeId":"mode0","startTime":410400000},{"modeId":"mode0","startTime":432000000},{"modeId":"mode0","startTime":518400000}]}
Create Mode -
{"from":"XXX-XXXXXXX_web","to":"XXXXXXXXXXXXX","action":"add","resource":"rules","transId":"web!XXXXXXXX.XXXXXXXXXXXXXXXXXXXX","publishResponse":true,"properties":{"name":"Record video on Camera 1 if Camera 1 detects motion","id":"ruleNew","triggers":[{"type":"pirMotionActive","deviceId":"XXXXXXXXXXXXX","sensitivity":80}],"actions":[{"deviceId":"XXXXXXXXXXXXX","type":"recordVideo","stopCondition":{"type":"timeout","timeout":15}},{"type":"sendEmailAlert","recipients":["__OWNER_EMAIL__"]},{"type":"pushNotification"}]}}
{"from":"XXX-XXXXXXX_web","to":"XXXXXXXXXXXXX","action":"add","resource":"modes","transId":"web!XXXXXXXX.XXXXXXXXXXXXXXXXXXXX","publishResponse":true,"properties":{"name":"Test","rules":["rule3"]}}
Delete Mode - {"from":"XXX-XXXXXXX_web","to":"XXXXXXXXXXXXX","action":"delete","resource":"modes/mode3","transId":"web!XXXXXXXX.XXXXXXXXXXXXXXXXXXXX","publishResponse":true}
Camera Off - {"from":"XXX-XXXXXXX_web","to":"XXXXXXXXXXXXX","action":"set","resource":"cameras/XXXXXXXXXXXXX","transId":"web!XXXXXXXX.XXXXXXXXXXXXXXXXXXXX","publishResponse":true,"properties":{"privacyActive":false}}
Night Vision On - {"from":"XXX-XXXXXXX_web","to":"XXXXXXXXXXXXX","action":"set","resource":"cameras/XXXXXXXXXXXXX","transId":"web!XXXXXXXX.XXXXXXXXXXXXXXXXXXXX","publishResponse":true,"properties":{"zoom":{"topleftx":0,"toplefty":0,"bottomrightx":1280,"bottomrighty":720},"mirror":true,"flip":true,"nightVisionMode":1,"powerSaveMode":2}}
Motion Detection Test - {"from":"XXX-XXXXXXX_web","to":"XXXXXXXXXXXXX","action":"set","resource":"cameras/XXXXXXXXXXXXX","transId":"web!XXXXXXXX.XXXXXXXXXXXXXXXXXXXX","publishResponse":true,"properties":{"motionSetupModeEnabled":true,"motionSetupModeSensitivity":80}}
device_id = locations.data.uniqueIds
System Properties: ("resource":"modes")
active (string) - Mode Selection (mode2 = All Motion On, mode1 = Armed, mode0 = Disarmed, etc.)
System Properties: ("resource":"schedule")
active (bool) - Mode Selection (true = Calendar)
Camera Properties: ("resource":"cameras/{id}")
privacyActive (bool) - Camera On/Off
zoom (topleftx (int), toplefty (int), bottomrightx (int), bottomrighty (int)) - Camera Zoom Level
mirror (bool) - Mirror Image (left-to-right or right-to-left)
flip (bool) - Flip Image Vertically
nightVisionMode (int) - Night Mode Enabled/Disabled (1, 0)
powerSaveMode (int) - PowerSaver Mode (3 = Best Video, 2 = Optimized, 1 = Best Battery Life)
motionSetupModeEnabled (bool) - Motion Detection Setup Enabled/Disabled
motionSetupModeSensitivity (int 0-100) - Motion Detection Sensitivity
"""
basestation_id = basestation.get('deviceId')
body['transId'] = self.genTransId()
body['from'] = self.user_id+'_web'
body['to'] = basestation_id
self.request.post(f'https://{self.BASE_URL}/hmsweb/users/devices/notify/'+body['to'], body, headers={"xcloudId":basestation.get('xCloudId')})
return body.get('transId')
def NotifyAndGetResponse(self, basestation, body, timeout=120):
basestation_id = basestation.get('deviceId')
self.Subscribe(basestation)
if self.event_stream and self.event_stream.connected and self.event_stream.registered:
transId = self.Notify(basestation, body)
event = self.event_stream.Get(timeout=timeout)
if event is None or self.event_stream.event_stream_stop_event.is_set():
return None
while self.event_stream.connected and self.event_stream.registered:
tid = event.get('transId', '')
if tid != transId:
if tid.startswith(self.TRANSID_PREFIX):
self.event_stream.queue.put(event)
event = self.event_stream.Get(timeout=timeout)
if event is None or self.event_stream.event_stream_stop_event.is_set():
return None
else: break
return event
def Ping(self, basestation):
basestation_id = basestation.get('deviceId')
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"subscriptions/"+self.user_id+"_web","publishResponse":False,"properties":{"devices":[basestation_id]}})
def SubscribeToMotionEvents(self, basestation, callback, timeout=120):
"""
Use this method to subscribe to motion events. You must provide a callback function which will get called once per motion event.
The callback function should have the following signature:
def callback(self, event)
This is an example of handling a specific event, in reality, you'd probably want to write a callback for HandleEvents()
that has a big switch statement in it to handle all the various events Arlo produces.
"""
def callbackwrapper(self, event):
if event.get('properties', {}).get('motionDetected'):
callback(self, event)
self.HandleEvents(basestation, callbackwrapper, timeout)
def HandleEvents(self, basestation, callback, timeout=120):
"""
Use this method to subscribe to the event stream and provide a callback that will be called for event event received.
This function will allow you to potentially write a callback that can handle all of the events received from the event stream.
"""
if not callable(callback):
raise Exception('The callback(self, event) should be a callable function.')
basestation_id = basestation.get('deviceId')
self.Subscribe(basestation)
if self.event_stream and self.event_stream.connected and self.event_stream.registered:
while self.event_stream.connected:
event = self.event_stream.Get(timeout=timeout)
if event is None or self.event_stream.event_stream_stop_event.is_set():
return None
# If this event has is of resource type "subscriptions", then it's a ping reply event.
# For now, these types of events will be requeued, since they are generated in response to and expected as a reply by the Ping() method.
# HACK: Take a quick nap here to give the Ping() method's thread a chance to get the queued event.
if event.get('resource', '').startswith('subscriptions'):
self.event_stream.queue.put(event)
time.sleep(0.05)
else:
response = callback(self, event)
# NOTE: Not ideal, but this allows you to look for a specific event and break if you want to return it.
if response is not None:
return response
def TriggerAndHandleEvent(self, basestation, trigger, callback, timeout=120):
"""
Use this method to subscribe to the event stream and provide a callback that will be called for event event received.
This function will allow you to potentially write a callback that can handle all of the events received from the event stream.
NOTE: Use this function if you need to run some code after subscribing to the eventstream, but before your callback to handle the events runs.
"""
if not callable(trigger):
raise Exception('The trigger(self, camera) should be a callable function.')
if not callable(callback):
raise Exception('The callback(self, event) should be a callable function.')
self.Subscribe(basestation)
trigger(self)
# NOTE: Calling HandleEvents() calls Subscribe() again, which basically turns into a no-op. Hackie I know, but it cleans up the code a bit.
return self.HandleEvents(basestation, callback, timeout)
def GetBaseStationState(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"get","resource":"basestation","publishResponse":False})
def GetCameraState(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"get","resource":"cameras","publishResponse":False})
def GetRules(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"get","resource":"rules","publishResponse":False})
def GetSmartFeatures(self):
return self.request.get(f'https://{self.BASE_URL}/hmsweb/users/subscription/smart/features')
def GetSmartAlerts(self, camera):
return self.request.get(f'https://{self.BASE_URL}/hmsweb/users/devices/'+camera.get('uniqueId')+'/smartalerts')
def GetAutomationActivityZones(self, camera):
return self.request.get(f'https://{self.BASE_URL}/hmsweb/users/devices/'+camera.get('uniqueId')+'/activityzones')
def RestartBasestation(self, basestation):
return self.request.post(f'https://{self.BASE_URL}/hmsweb/users/devices/restart', {"deviceId":basestation.get('deviceId')})
def SetAutomationActivityZones(self, camera, zone, coords, color):
"""
An activity zone is the area you draw in your video in the UI to tell Arlo what part of the scene to "watch".
This method takes 4 arguments.
camera: the camera you want to set an activity zone for.
name: "Zone 1" - the name of your activity zone.
coords: [{"x":0.37946943483275664,"y":0.3790983606557377},{"x":0.8685121107266436,"y":0.3790983606557377},{"x":0.8685121107266436,"y":1},{"x":0.37946943483275664,"y":1}] - these coordinates are the bonding box for the activity zone.
color: 45136 - the color for your bounding box.
"""
return self.request.post(f'https://{self.BASE_URL}/hmsweb/users/devices/'+camera.get('uniqueId')+'/activityzones', {"name": zone,"coords": coords, "color": color})
def GetAutomationDefinitions(self):
return self.request.get(f'https://{self.BASE_URL}/hmsweb/users/automation/definitions', {'uniqueIds':'all'})
def GetCalendar(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"get","resource":"schedule","publishResponse":False})
def DeleteMode(self, device, mode):
""" device can be any object that has parentId == deviceId. i.e., not a camera """
parentId = device.get('parentId', None)
if device.get('deviceType') == 'arlobridge':
return self.request.delete(f'https://{self.BASE_URL}/hmsweb/users/locations/'+device.get('uniqueId')+'/modes/'+mode)
elif not parentId or device.get('deviceId') == parentId:
return self.NotifyAndGetResponse(device, {"action":"delete","resource":"modes/"+mode,"publishResponse":True})
else:
raise Exception('Only parent device modes and schedules can be deleted.')
def GetModes(self, basestation):
""" DEPRECATED: This is the older API for getting the "mode". It still works, but GetModesV2 is the way the Arlo software does it these days. """
return self.NotifyAndGetResponse(basestation, {"action":"get","resource":"modes","publishResponse":False})
def GetModesV2(self):
"""
This is the newer API for getting the "mode". This method also returns the schedules.
Set a non-schedule mode to be active: {"activeAutomations":[{"deviceId":"XXXXXXXXXXXXX","timestamp":1532015622105,"activeModes":["mode1"],"activeSchedules":[]}]}
Set a schedule to be active: {"activeAutomations":[{"deviceId":"XXXXXXXXXXXXX","timestamp":1532015790139,"activeModes":[],"activeSchedules":["schedule.1"]}]}
"""
return self.request.get(f'https://{self.BASE_URL}/hmsweb/users/devices/automation/active')
def CustomMode(self, device, mode, schedules=[]):
""" device can be any object that has parentId == deviceId. i.e., not a camera """
parentId = device.get('parentId', None)
if device.get('deviceType') == 'arlobridge':
return self.request.post(f'https://{self.BASE_URL}/hmsweb/users/devices/automation/active', {'activeAutomations':[{'deviceId':device.get('deviceId'),'timestamp':self.to_timestamp(datetime.now()),'activeModes':[mode],'activeSchedules':schedules}]})
elif not parentId or device.get('deviceId') == parentId:
return self.NotifyAndGetResponse(device, {"from":self.user_id+"_web", "to": device.get("parentId"), "action":"set","resource":"modes", "transId": self.genTransId(),"publishResponse":True,"properties":{"active":mode}})
else:
raise Exception('Only parent device modes and schedules can be modified.')
def Arm(self, device):
return self.CustomMode(device, "mode1")
def Disarm(self, device):
return self.CustomMode(device, "mode0")
def Calendar(self, basestation, active=True):
"""
DEPRECATED: This API appears to still do stuff, but I don't see it called in the web UI anymore when switching the mode to a schedule.
NOTE: The Arlo API seems to disable calendar mode when switching to other modes, if it's enabled.
You should probably do the same, although, the UI reflects the switch from calendar mode to say armed mode without explicitly setting calendar mode to inactive.
"""
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"schedule","publishResponse":True,"properties":{"active":active}})
def SetSchedule(self, basestation, schedule):
"""
The following json is what was sent to the API when I edited my schedule. It contains all of the data necessary to configure a whole week. It's a little convoluted, but you can just play around with the scheduler in Chrome and watch the schema that gets sent.
{
"schedule": [
{
"duration": 600,
"startActions": {
"disableModes": [
"mode0"
],
"enableModes": [
"mode1"
]
},
"days": [
"Mo",
"Tu",
"We",
"Th",
"Fr",
"Sa",
"Su"
],
"startTime": 0,
"type": "weeklyAction",
"endActions": {
"disableModes": [
"mode1"
],
"enableModes": [
"mode0"
]
}
},
{
"duration": 360,
"startActions": {
"disableModes": [
"mode0"
],
"enableModes": [
"mode2"
]
},
"days": [
"Mo",
"Tu",
"We",
"Th",
"Fr",
"Sa",
"Su"
],
"startTime": 1080,
"type": "weeklyAction",
"endActions": {
"disableModes": [
"mode2"
],
"enableModes": [
"mode0"
]
}
},
{
"duration": 480,
"startActions": {
"disableModes": [
"mode0"
],
"enableModes": [
"mode3"
]
},
"days": [
"Tu"
],
"startTime": 600,
"type": "weeklyAction",
"endActions": {
"disableModes": [
"mode3"
],
"enableModes": [
"mode0"
]
}
}
],
"name": "",
"id": "schedule.1",
"enabled": true
}
"""
return self.request.post(f'https://{self.BASE_URL}/hmsweb/users/locations/'+basestation.get('uniqueId')+'/schedules', )
def AdjustBrightness(self, basestation, camera, brightness=0):
"""
NOTE: Brightness is between -2 and 2 in increments of 1 (-2, -1, 0, 1, 2).
Setting it to an invalid value has no effect.
Returns:
{
"action": "is",
"from": "XXXXXXXXXXXXX",
"properties": {
"brightness": -2
},
"resource": "cameras/XXXXXXXXXXXXX",
"to": "336-XXXXXXX_web",
"transId": "web!XXXXXXXX.389518!1514956240683"
}
"""
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+camera.get('deviceId'),"publishResponse":True,"properties":{"brightness":brightness}})
def ToggleCamera(self, basestation, camera, active=True):
"""
active: True - Camera is off.
active: False - Camera is on.
"""
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+camera.get('deviceId'),"publishResponse":True,"properties":{"privacyActive":active}})
def PushToTalk(self, camera):
return self.request.get(f'https://{self.BASE_URL}/hmsweb/users/devices/'+camera.get('uniqueId')+'/pushtotalk')
""" General alert toggles """
def SetMotionAlertsOn(self, basestation, sensitivity=5):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId'),"publishResponse":True,"properties":{"motionDetection":{"armed":True,"sensitivity":sensitivity,"zones":[]}}})
def SetMotionAlertsOff(self, basestation, sensitivity=5):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId'),"publishResponse":True,"properties":{"motionDetection":{"armed":False,"sensitivity":sensitivity,"zones":[]}}})
def SetAudioAlertsOn(self, basestation, sensitivity=3):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId'),"publishResponse":True,"properties":{"audioDetection":{"armed":True,"sensitivity":sensitivity}}})
def SetAudioAlertsOff(self, basestation, sensitivity=3):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId'),"publishResponse":True,"properties":{"audioDetection":{"armed":False,"sensitivity":sensitivity}}})
def AlertNotificationMethods(self, basestation, action="disabled", email=False, push=False):
""" action : disabled OR recordSnapshot OR recordVideo """
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId'),"publishResponse":True,"properties":{"eventAction":{"actionType":action,"stopType":"timeout","timeout":15,"emailNotification":{"enabled":email,"emailList":["__OWNER_EMAIL__"]},"pushNotification":push}}})
""" Arlo Baby Audio Control """
def GetAudioPlayback(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"get","resource":"audioPlayback","publishResponse":False})
def PlayTrack(self, basestation, track_id="2391d620-e491-4412-99f6-e9a40d6046ed", position=0):
"""
Defaulting to 'hugh little baby', which is a supplied track. I hope the ID is the same for all
"""
return self.Notify(basestation, {"action":"playTrack","resource":"audioPlayback/player","properties":{"trackId":track_id,"position":position}})
def PauseTrack(self, basestation):
return self.Notify(basestation, {"action":"pause","resource":"audioPlayback/player"})
def UnPauseTrack(self, basestation):
return self.Notify(basestation, {"action":"play","resource":"audioPlayback/player"})
def SkipTrack(self, basestation):
return self.Notify(basestation, {"action":"nextTrack","resource":"audioPlayback/player"})
def SetSleepTimerOn(self, basestation, time=calendar.timegm(time.gmtime()) + 300, timediff=0):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"audioPlayback/config","publishResponse":True,"properties":{"config":{"sleepTime":time,"sleepTimeRel":timediff}}})
def SetSleepTimerOff(self, basestation, time=0, timediff=300):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"audioPlayback/config","publishResponse":True,"properties":{"config":{"sleepTime": time,"sleepTimeRel":timediff}}})
def SetLoopBackModeContinuous(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"audioPlayback/config","publishResponse":True,"properties":{"config":{"loopbackMode":"continuous"}}})
def SetLoopBackModeSingleTrack(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"audioPlayback/config","publishResponse":True,"properties":{"config":{"loopbackMode":"singleTrack"}}})
def SetShuffleOn(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"audioPlayback/config","publishResponse":True,"properties":{"config":{"shuffleActive":True}}})
def SetShuffleOff(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"audioPlayback/config","publishResponse":True,"properties":{"config":{"shuffleActive":False}}})
def SetVolume(self, basestation, mute=False, volume=50):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId'),"publishResponse":True,"properties":{"speaker":{"mute":mute,"volume":volume}}})
""" Baby Arlo Nightlight, (current state is in the arlo.GetCameraState(cameras[0]["properties"][0]["nightLight"]) """
def SetNightLightOn(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId'),"publishResponse":True,"properties":{"nightLight":{"enabled":True}}})
def SetNightLightOff(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId'),"publishResponse":True,"properties":{"nightLight":{"enabled":False}}})
def SetNightLightBrightness(self, basestation, level=200):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId'),"publishResponse":True,"properties":{"nightLight":{"brightness":level}}})
def SetNightLightMode(self, basestation, mode="rainbow"):
""" mode: rainbow or rgb. """
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId'),"publishResponse":True,"properties":{"nightLight":{"mode":mode}}})
def SetNightLightColor(self, basestation, red=255, green=255, blue=255):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId'),"publishResponse":True,"properties":{"nightLight":{"rgb":{"blue":blue,"green":green,"red":red}}}})
def SetNightLightTimerOn(self, basestation, time=calendar.timegm(time.gmtime()) + 300, timediff=0):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId'),"publishResponse":True,"properties":{"nightLight":{"sleepTime":time,"sleepTimeRel":timediff}}})
def SetNightLightTimerOff(self, basestation, time=0, timediff=300):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId'),"publishResponse":True,"properties":{"nightLight":{"sleepTime":time,"sleepTimeRel":timediff}}})
""" Baby Arlo Sensors """
def GetCameraTempReading(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"get","resource":"cameras/"+basestation.get('deviceId')+"/ambientSensors/history","publishResponse":False})
def GetSensorConfig(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"get","resource":"cameras/"+basestation.get('deviceId')+"/ambientSensors/config","publishResponse":False})
def SetAirQualityAlertOn(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId')+"/ambientSensors/config","publishResponse":True,"properties":{"airQuality":{"alertsEnabled":True}}})
def SetAirQualityAlertOff(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId')+"/ambientSensors/config","publishResponse":True,"properties":{"airQuality":{"alertsEnabled":False}}})
def SetAirQualityAlertThresholdMin(self, basestation, number=400):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId')+"/ambientSensors/config","publishResponse":True,"properties":{"airQuality":{"minThreshold":number}}})
def SetAirQualityAlertThresholdMax(self, basestation, number=700):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId')+"/ambientSensors/config","publishResponse":True,"properties":{"airQuality":{"maxThreshold":number}}})
def SetAirQualityRecordingOn(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId')+"/ambientSensors/config","publishResponse":True,"properties":{"airQuality":{"recordingEnabled":True}}})
def SetAirQualityRecordingOff(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId')+"/ambientSensors/config","publishResponse":True,"properties":{"airQuality":{"recordingEnabled":False}}})
def SetHumidityAlertOn(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId')+"/ambientSensors/config","publishResponse":True,"properties":{"humidity":{"alertsEnabled":True}}})
def SetHumidityAlertOff(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId')+"/ambientSensors/config","publishResponse":True,"properties":{"humidity":{"alertsEnabled":False}}})
def SetHumidityAlertThresholdMin(self, basestation, number=400):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId')+"/ambientSensors/config","publishResponse":True,"properties":{"humidity":{"minThreshold":number}}})
def SetHumidityAlertThresholdMax(self, basestation, number=800):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId')+"/ambientSensors/config","publishResponse":True,"properties":{"humidity":{"maxThreshold":number}}})
def SetHumidityRecordingOn(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId')+"/ambientSensors/config","publishResponse":True,"properties":{"humidity":{"recordingEnabled":True}}})
def SetHumidityRecordingOff(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId')+"/ambientSensors/config","publishResponse":True,"properties":{"humidity":{"recordingEnabled":False}}})
def SetTempAlertOn(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId')+"/ambientSensors/config","publishResponse":True,"properties":{"temperature":{"alertsEnabled":True}}})
def SetTempAlertOff(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId')+"/ambientSensors/config","publishResponse":True,"properties":{"temperature":{"alertsEnabled":False}}})
def SetTempAlertThresholdMin(self, basestation, number=200):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId')+"/ambientSensors/config","publishResponse":True,"properties":{"temperature":{"minThreshold":number}}})
def SetTempAlertThresholdMax(self, basestation, number=240):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId')+"/ambientSensors/config","publishResponse":True,"properties":{"temperature":{"maxThreshold":number}}})
def SetTempRecordingOn(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId')+"/ambientSensors/config","publishResponse":True,"properties":{"temperature":{"recordingEnabled":True}}})
def SetTempRecordingOff(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+basestation.get('deviceId')+"/ambientSensors/config","publishResponse":True,"properties":{"temperature":{"recordingEnabled":False}}})
def SetTempUnit(self, uniqueId, unit="C"):
return self.request.post(f'https://{self.BASE_URL}/hmsweb/users/devices/'+uniqueId+'/tempUnit', {"tempUnit":unit})
def SirenOn(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"siren","publishResponse":True,"properties":{"sirenState":"on","duration":300,"volume":8,"pattern":"alarm"}})
def SirenOff(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"siren","publishResponse":True,"properties":{"sirenState":"off","duration":300,"volume":8,"pattern":"alarm"}})
def Reset(self):
return self.request.get(f'https://{self.BASE_URL}/hmsweb/users/library/reset')
def GetServiceLevelSettings(self):
return self.request.get(f'https://{self.BASE_URL}/hmsweb/users/serviceLevel/settings')
def GetServiceLevel(self):
return self.request.get(f'https://{self.BASE_URL}/hmsweb/users/serviceLevel')
def GetServiceLevelV2(self):
""" DEPRECATED: This API still works, but I don't see it being called in the web UI anymore. """
return self.request.get(f'https://{self.BASE_URL}/hmsweb/users/serviceLevel/v2')
def GetServiceLevelV3(self):
""" DEPRECATED: This API still works, but I don't see it being called in the web UI anymore. """
return self.request.get(f'https://{self.BASE_URL}/hmsweb/users/serviceLevel/v3')
def GetServiceLevelV4(self):
return self.request.get(f'https://{self.BASE_URL}/hmsweb/users/serviceLevel/v4')
def GetUpdateFeatures(self):
return self.request.get(f'https://{self.BASE_URL}/hmsweb/users/devices/updateFeatures/feature')
def GetPaymentBilling(self):
return self.request.get(f'https://{self.BASE_URL}/hmsweb/users/payment/billing/'+self.user_id)
def GetPaymentOffers(self):
""" DEPRECATED: This API still works, but I don't see it being called in the web UI anymore. """
return self.request.get(f'https://{self.BASE_URL}/hmsweb/users/payment/offers')
def GetPaymentOffersV2(self):
""" DEPRECATED: This API still works, but I don't see it being called in the web UI anymore. """
return self.request.get(f'https://{self.BASE_URL}/hmsweb/users/payment/offers/v2')
def GetPaymentOffersV3(self):
""" DEPRECATED: This API still works, but I don't see it being called in the web UI anymore. """
return self.request.get(f'https://{self.BASE_URL}/hmsweb/users/payment/offers/v3')
def GetPaymentOffersV4(self):
return self.request.get(f'https://{self.BASE_URL}/hmsweb/users/payment/offers/v4')
def SetOCProfile(self, firstName, lastName, country='United States', language='en', spam_me=0):
return self.request.post(f'https://{self.BASE_URL}/hmsweb/users/ocprofile', {"firstName":"Jeffrey","lastName":"Walter","country":country,"language":language,"mailProgram":spam_me})
def GetOCProfile(self):
return self.request.get(f'https://{self.BASE_URL}/hmsweb/users/ocprofile')
def GetProfile(self):
"""
This call returns the following:
{
"data": {
"_type": "User",
"firstName": "Joe",
"lastName": "Bloggs",
"language": "en",
"country": "GB",
"acceptedPolicy": 1,
"currentPolicy": 1,
"validEmail": true
},
"success": true
}
"""
return self.request.get(f'https://{self.BASE_URL}/hmsweb/users/profile')
def GetAccount(self):
"""
This call returns the following:
{
"data": {
"userId": "XXX-XXXXXXX",
"email": "joe.bloggs@gmail.com",
"dateCreated": 1585157000819,
"dateDeviceRegistered": 1585161139527,
"countryCode": "GB",
"language": "en-gb",
"firstName": "Joe",
"lastName": "Bloggs",
"s3StorageId": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
"tosVersion": "5",
"tosAgreeDate": 1593126066795,
"tosShownVersion": "5",
"lastModified": 1585161137898,
"accountStatus": "registered",
"paymentId": "xxxxxxxx",
"serialNumber": "xxxxxxxxxxxxx",
"mobilePushData": {
"mobilePushOsMap": {
"android": [
{
"token": "xxxxxxxxxxxxxxxxxxx",
"endpoint": "arn:aws:sns:eu-west-1:xxxxxxxxxxxx:endpoint/GCM/Arlo_Android_Prod/xxxxxxxxxxxxxxxxxxxxxx",
"createdDate": "20201310_0622",
"iosDebugModeFlag": false
},
{
"token": "xxxxxxxxxxxxxxxxxxxx",
"endpoint": "arn:aws:sns:eu-west-1:xxxxxxxxxxxx:endpoint/GCM/Arlo_Android_Prod/xxxxxxxxxxxxxxxxxxxxxxx",
"createdDate": "20210801_0335",
"iosDebugModeFlag": false
}
]
}
},
"recycleBinQuota": 0,
"favoriteQuota": 0,
"validEmail": true,
"locationCreated": false,
"readyToClose": false,
"lastMessageTimeToBS": 1608375685602
},
"success": true
}
"""
return self.request.get(f'https://{self.BASE_URL}/hmsweb/users/account')
def GetSession(self):
"""
Returns something like the following:
{
"userId": "XXX-XXXXXXX",
"email": "jeffreydwalter@gmail.com",
"token": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"paymentId": "XXXXXXXX",
"accountStatus": "registered",
"serialNumber": "XXXXXXXXXXXXXX",
"countryCode": "US",
"tocUpdate": false,
"policyUpdate": false,
"validEmail": true,
"arlo": true,
"dateCreated": 1463975008658
}
"""
return self.request.get(f'https://{self.BASE_URL}/hmsweb/users/session')
def GetSessionV2(self):
"""
Returns something like the following:
{
"userId": "XXX-XXXXXXX",
"email": "jeffreydwalter@gmail.com",
"token": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"paymentId": "XXXXXXXX",
"accountStatus": "registered",
"serialNumber": "XXXXXXXXXXXXXX",
"countryCode": "US",
"tocUpdate": false,
"policyUpdate": false,
"validEmail": true,
"arlo": true,
"dateCreated": 1463975008658
}
"""
return self.request.get(f'https://{self.BASE_URL}/hmsweb/users/session/v2')
def GetFriends(self):
return self.request.get(f'https://{self.BASE_URL}/hmsweb/users/friends')
def GetLocations(self):
"""
This call returns the following:
{
"id":"XXX-XXXXXXX_20160823042047",
"name":"Home",
"ownerId":"XXX-XXXXXXX",
"longitude":X.XXXXXXXXXXXXXXXX,
"latitude":X.XXXXXXXXXXXXXXXX,
"address":"123 Middle Of Nowhere Bumbfuck, EG, 12345",
"homeMode":"schedule",
"awayMode":"mode1",
"geoEnabled":false,
"geoRadius":150.0,
"uniqueIds":[
"XXX-XXXXXXX_XXXXXXXXXXXXX"
],
"smartDevices":[
"XXXXXXXXXX",
"XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
],
"pushNotifyDevices":[
"XXXXXXXXXX"
]
}
"""
return self.request.get(f'https://{self.BASE_URL}/hmsweb/users/locations')
def GetEmergencyLocations(self):
return self.request.get(f'https://{self.BASE_URL}/hmsweb/users/emergency/locations')
def Geofencing(self, location_id, active=True):
"""
Get location_id is the id field from the return of GetLocations()
NOTE: The Arlo API seems to disable geofencing mode when switching to other modes, if it's enabled.
You should probably do the same, although, the UI reflects the switch from calendar mode to say armed mode without explicitly setting calendar mode to inactive.
"""
return self.request.put(f'https://{self.BASE_URL}/hmsweb/users/locations/'+location_id, {'geoEnabled':active})
def GetDevice(self, device_name):
def is_device(device):
return device.get('deviceName') == device_name
return list(filter(is_device, self.GetDevices()))[0]
def GetDevices(self, device_type=None, filter_provisioned=None):
"""
This method returns an array that contains the basestation, cameras, etc. and their metadata.
If you pass in a valid device type, as a string or a list, this method will return an array of just those devices that match that type. An example would be ['basestation', 'camera']
To filter provisioned or unprovisioned devices pass in a True/False value for filter_provisioned. By default both types are returned.
"""
devices = self.request.get(f'https://{self.BASE_URL}/hmsweb/users/devices')
if device_type:
devices = [ device for device in devices if device.get('deviceType') in device_type]
if filter_provisioned is not None:
if filter_provisioned:
devices = [ device for device in devices if device.get("state") == 'provisioned']
else:
devices = [ device for device in devices if device.get("state") != 'provisioned']
return devices
def GetDeviceSupport(self):
"""
DEPRECATED: This API still works, but I don't see it being called in the web UI anymore.
This API looks like it's mainly used by the website, but I'm including it for completeness sake.
It returns something like the following:
{
"devices": [
{
"deviceType": "arloq",
"urls": {
"troubleshoot": "https://vzs3-prod-common.s3.amazonaws.com/static/html/en/pc_troubleshoot.html",
"plugin": "https://vzs3-prod-common.s3.amazonaws.com/static/html/en/pc_plugin.html",
"connection": "https://vzs3-prod-common.s3.amazonaws.com/static/html/en/pc_connection.html",
"connectionFailed": "https://vzs3-prod-common.s3.amazonaws.com/static/html/en/pc_connection_fail.html",
"press_sync": "https://vzs3-prod-common.s3. amazonaws.com/static/html/en/pc_press_sync.html",
"resetDevice": "https://vzs3-prod-common.s3.amazonaws.com/static/html/en/reset_arloq.html",
"qr_how_to": "https://vzs3-prod-common.s3.amazonaws.com/static/html/en/pc_qr_how_to.html"
}
},
{
"deviceType": "basestation",
"urls": {
"troubleshoot": "https://vzs3-prod-common.s3.amazonaws.com/static/html/en/bs_troubleshoot.html",
"connection": "https://vzs3-prod-common.s3.amazonaws.com/static/html/en/bs_connection.html",
"sync": "https://vzs3-prod-common.s3.amazonaws.com/static/html/en/bs_sync_camera.html"
}
},
{
"deviceType": "arloqs",
"urls": {
"ethernetSetup": "https://vzs3-prod-common.s3.amazonaws.com/static/html/en/arloqs/ethernet_setup.html",
"plugin": "https:// vzs3-prod-common.s3.amazonaws.com/static/html/en/arloqs/aqp_plugin.html",
"connectionWiFi": "https://vzs3-prod-common.s3.amazonaws.com/static/html/en/arloqs/connection_in_progress_wifi.html",
"poeSetup": "https://vzs3-prod-common.s3. amazonaws.com/static/html/en/arloqs/poe_setup.html",
"connection": "https://vzs3-prod-common.s3.amazonaws.com/static/html/en/arloqs/connection_in_progress.html",
"connectionFailed": "https://vzs3-prod-common.s3.amazonaws.com/static/html/en/arloqs/connection_fail.html",
"press_sync": "https://vzs3-prod-common.s3.amazonaws.com/static/html/en/arloqs/press_sync.html",
"connectionType": "https://vzs3-prod-common.s3.amazonaws.com/static/html/en/arloqs/connection_type.html",
"resetDevice": "https://vzs3-prod-common.s3.amazonaws.com/static/html/en/arloqs/reset_device.html",
"qr_how_to": "https://vzs3-prod-common.s3.amazonaws.com/static/html/en/arloqs/qr_how_to.html"
}
}
]
}
"""
return self.request.get(f'https://{self.BASE_URL}/hmsweb/devicesupport')
def GetDeviceSupportv2(self):
"""
DEPRECATED: This API still works, but I don't see it being called in the web UI anymore.
It returns something like the following:
{
"devices": [
{
"deviceType": "arloq",
"modelId": [
"VMC3040"
],
"urls": {
"troubleshoot": "arloq/troubleshoot.html",
"plugin": "arloq/plugin.html",
"qrHowTo": "arloq/qrHowTo.html",
"connection": "arloq/connection.html",
"connectionInProgress": "arloq/connectionInProgress.html",
"connectionFailed": "arloq/connectionFailed.html",
"pressSync": "arloq/pressSync.html",
"resetDevice": "arloq/resetDevice.html"
}
},
{
"deviceType": "basestation",
"modelId": [
"VMB3010",
"VMB3010r2",
"VMB3500",
"VMB4000",
"VMB4500",
"VZB3010"
],
"urls": {
"troubleshoot": "basestation/troubleshoot.html",
"plugin": "basestation/plugin.html",
"sync3": "basestation/sync3.html",
"troubleshootBS": "basestation/troubleshootBS.html",
"connection": "basestation/connection.html",
"connectionInProgress": "basestation/connectionInProgress.html",
"sync2": "basestation/sync2.html",
"connectionFailed": "basestation/connectionFailed.html",
"sync1": "basestation/sync1.html",
"resetDevice": "basestation/resetDevice.html",
"syncComplete": "basestation/syncComplete.html"
}
},
{
"deviceType": "arlobaby",
"modelId": [
"ABC1000"
],
"urls": {
"bleSetupError": "arlobaby/bleSetupError.html",
"troubleshoot": "arlobaby/troubleshoot.html",
"homekitCodeInstruction": "arlobaby/homekitCodeInstruction.html",
"connectionInProgress": "arlobaby/connectionInProgress.html",
"connectionFailed": "arlobaby/connectionFailed.html",
"resetDevice": "arlobaby/resetDevice.html",
"plugin": "arlobaby/plugin.html",
"qrHowTo": "arlobaby/qrHowTo.html",
"warning": "arlobaby/warning.html",
"connection": "arlobaby/connection.html",
"pressSync": "arlobaby/pressSync.html",
"bleInactive": "arlobaby/bleInactive.html",
"pluginIOS": "arlobaby/pluginIOS.html",
"homekitSetup": "arlobaby/homekitSetup.html"
}
},
{
"deviceType": "lteCamera",
"modelId": [
"VML4030"
],
"urls": {
"troubleshoot": "lteCamera/troubleshoot.html",
"resetHowTo": "lteCamera/resetHowTo.html",
"plugin": "lteCamera/plugin.html",
"qrHowTo": "lteCamera/qrHowTo.html",
"connectionInProgress": "lteCamera/connectionInProgress.html",
"connectionFailed": "lteCamera/connectionFailed.html",
"resetDevice": "lteCamera/resetHowTo.html",
"resetComplete": "lteCamera/resetComplete.html",
"syncComplete": "lteCamera/syncComplete.html"
}
},
{
"deviceType": "arloqs",
"modelId": [
"VMC3040S"
],
"urls": {
"ethernetSetup": "arloqs/ethernetSetup.html",
"troubleshoot": "arloqs/troubleshoot.html",
"plugin": "arloqs/plugin.html",
"poeSetup": "arloqs/poeSetup.html",
"connectionInProgressWiFi": "arloqs/connectionInProgressWifi.html",
"qrHowTo": "arloqs/qrHowTo.html",
"connectionInProgress": "arloqs/connectionInProgress.html",
"connectionFailed": "arloqs/connectionFailed.html",
"pressSync": "arloqs/pressSync.html",
"connectionType": "arloqs/connectionType.html",
"resetDevice": "arloqs/resetDevice.html"
}
},
{
"deviceType": "bridge",
"modelId": [
"ABB1000"
],
"urls": {
"troubleshoot": "bridge/troubleshoot.html",
"fwUpdateInProgress": "bridge/fwUpdateInProgress.html",
"qrHowToUnplug": "bridge/qrHowToUnplug.html",
"fwUpdateDone": "bridge/fwUpdateDone.html",
"fwUpdateAvailable": "bridge/fwUpdateAvailable.html",
"needHelp": "https://www.arlo.com/en-us/support/#support_arlo_light",
"wifiError": "bridge/wifiError.html",
"bleAndroid": "bridge/bleInactiveAND.html",
"bleIOS": "bridge/bleInactiveIOS.html",
"connectionInProgress": "bridge/connectionInProgress.html",
"connectionFailed": "bridge/connectionFailed.html",
"manualPair": "bridge/manualPairing.html",
"resetDevice": "bridge/resetDevice.html",
"lowPower": "bridge/lowPowerZoneSetup.html",
"fwUpdateFailed": "bridge/fwUpdateFailed.html",
"fwUpdateCheckFailed": "bridge/fwUpdateCheckFailed.html",
"plugin": "bridge/plugin.html",
"qrHowTo": "bridge/qrHowTo.html",
"pressSync": "bridge/pressSync.html",
"pluginNoLED": "bridge/pluginNoLED.html",
"fwUpdateCheck": "bridge/fwUpdateCheck.html"
}
},
{
"deviceType": "lights",
"modelId": [
"AL1101"
],
"urls": {
"troubleshoot": "lights/troubleshoot.html",
"needHelp": "https://kb.netgear.com/000053159/Light-discovery-failed.html",
"bleInactiveAND": "lights/bleInactiveAND.html",
"connectionInProgress": "lights/connectionInProgress.html",
"connectionFailed": "lights/connectionFailed.html",
"addBattery": "lights/addBattery.html",
"tutorial1": "lights/tutorial1.html",
"plugin": "lights/plugin.html",
"tutorial2": "lights/tutorial2.html",
"tutorial3": "lights/tutorial3.html",
"configurationInProgress": "lights/configurationInProgress.html",
"qrHowTo": "lights/qrHowTo.html",
"pressSync": "lights/pressSync.html",
"bleInactiveIOS": "lights/bleInactiveIOS.html",
"syncComplete": "lights/syncComplete.html"
}
},
{
"deviceType": "routerM1",
"modelId": [
"MR1100"
],
"urls": {
"troubleshoot": "routerM1/troubleshoot.html",
"help": "routerM1/help.html",
"pairingFailed": "routerM1/pairingFailed.html",
"needHelp": "https://acupdates.netgear.com/help/redirect.aspx?url=m1arlo-kbb",
"plugin": "routerM1/plugin.html",
"pairing": "routerM1/pairing.html",
"connectionInProgress": "routerM1/connectionInProgress.html",
"sync2": "routerM1/sync2.html",
"connectionFailed": "routerM1/connectionFailed.html",
"sync1": "routerM1/sync1.html",
"sync": "routerM1/sync.html",
"syncComplete": "routerM1/syncComplete.html"
}
}
],
"selectionUrls": {
"addDevice": "addDeviceBsRuAqAqpLteAbcMrBgLt.html",
"selectBasestation": "selectBsMr.html",
"deviceSelection": "deviceBsAqAqpLteAbcMrLtSelection.html",
"selectLights": "selectBgLt.html"
},
"baseUrl": "https://vzs3-prod-common.s3.amazonaws.com/static/v2/html/en/"
}
"""
return self.request.get(f'https://{self.BASE_URL}/hmsweb/devicesupport/v2')
def GetDeviceSupportV3(self):
"""
This is the latest version of the device support api.
It returns something like the following:
{
"data": {
"devices": {
"camera": {
"modelIds": [
"VMC3010",
"VMC3030",
"VMC4030",
"VMC4030P",
"VMC5040",
"VZC3010",
"VZC3030"
],
"connectionTypes": {
"WPS": true,
"BLE": true
},
"kbArticles": {
"insertBatteries": "https://kb.arlo.com/980150/Safety-Rules-for-Arlo-Wire-Free-Camera-Batteries",
"syncBasestation": "https://kb.arlo.com/987/How-do-I-set-up-and-sync-my-Arlo-Wire-Free-cameras",
"sync": "https://kb.arlo.com/987/How-do-I-set-up-and-sync-my-Arlo-Wire-Free-camera",
"firmwareUpdate": "https://kb.arlo.com/4736/How-do-I-update-my-Arlo-firmware-manually"
}
},
"arloq": {
"modelIds": [
"VMC3040",
"VMC3040S"
],
"kbArticles": {
"power": "https://kb.arlo.com/1001944/How-do-I-set-up-Arlo-Q-on-iOS",
"qrCode": "https://kb.arlo.com/1001944/How-do-I-set-up-Arlo-Q-on-iOS",
"power_android": "https://kb.arlo.com/1002006/How-do-I-set-up-Arlo-Q-on-Android",
"qrCode_android": "https://kb.arlo.com/1002006/How-do-I-set-up-Arlo-Q-on-Android"
}
},
"basestation": {
"modelIds": [
"VMB3010",
"VMB4000",
"VMB3010r2",
"VMB3500",
"VZB3010",
"VMB4500",
"VMB5000"
],
"smartHubs": [
"VMB5000"
],
"kbArticles": {
"pluginNetworkCable": "https://kb.arlo.com/1179139/How-do-I-connect-my-Arlo-or-Arlo-Pro-base-station-to-the-Internet",
"power": "https://kb.arlo.com/1179139/How-do-I-connect-my-Arlo-or-Arlo-Pro-base-station-to-the-Internet",
"led": "https://kb.arlo.com/1179139/How-do-I-connect-my-Arlo-or-Arlo-Pro-base-station-to-the-Internet",
"learnMore": "https://kb.arlo.com/000062124/How-do-I-record-4K-videos-to-a-microSD-card"
}
},
"arlobaby": {
"modelIds": [
"ABC1000"
],
"kbArticles": {
"power": "https://kb.arlo.com/1282682/How-do-I-power-cycle-my-Arlo-Baby-camera",
"qrCode": "https://kb.arlo.com/1282700/How-do-I-set-up-my-Arlo-Baby-camera"
}
},
"lteCamera":{
"modelIds":[
"VML4030"
],
"kbArticles":{
"servicePlan":"https://kb.arlo.com/1286865/What-Arlo-Mobile-service-plans-are-available",
"simActivation":"https://kb.arlo.com/1286865/What-Arlo-Mobile-service-plans-are-available",
"qrCode":"https://kb.arlo.com/1201822/How-do-I-set-up-my-Arlo-Go-camera"
}
},
"bridge": {
"modelIds": [
"ABB1000"
],
"kbArticles": {
"power": "https://kb.arlo.com/000062047",
"sync": "https://kb.arlo.com/000062037",
"qrCode": "https://kb.arlo.com/000061886",
"factoryReset": "https://kb.arlo.com/000061837"
}
},
"lights": {
"modelIds": [
"AL1101"
],
"kbArticles": {
"sync": "https://kb.arlo.com/000062005",
"insertBatteries": "https://kb.arlo.com/000061952",
"qrCode": "https://kb.arlo.com/000061886"
}
},
"routerM1":{
"modelIds":[
"MR1100"
],
"kbArticles":{
"lookupFailed":"https://kb.arlo.com/1179130/Arlo-can-t-discover-my-base-station-during-installation-what-do-I-do"
}
},
"chime": {
"modelIds": [
"AC1001"
],
"kbArticles": {
"ledNotBlinking":"https://kb.arlo.com/000061924",
"led":"https://kb.arlo.com/000061847",
"factoryReset":"https://kb.arlo.com/000061879",
"connectionFailed":"https://kb.arlo.com/000061880"
}
},
"doorbell": {
"modelIds": [
"AAD1001"
],
"kbArticles": {
"led":"https://kb.arlo.com/000061847",
"factoryReset":"https://kb.arlo.com/000061842",
"pairCamera":"https://kb.arlo.com/000061897",
"existingChime":"https://kb.arlo.com/000061856",
"noWiring":"https://kb.arlo.com/000061859",
"connectionFailed":"https://kb.arlo.com/000061868",
"pairCameraFailed":"https://kb.arlo.com/000061893",
"testChimeFailed":"https://kb.arlo.com/000061944"
},
"videos": {
"chimeType": "https://youtu.be/axytuF63VC0",
"wireDoorbell": "https://youtu.be/_5D2n3iPqW0",
"switchSetting": "https://youtu.be/BUmd4fik2RE"
},
"arloVideos": {
"chimeType": "https://vzs3-prod-common.s3.amazonaws.com/static/devicesupport/Arlo_Audio_Doorbell_Chime.mp4",
"wireDoorbell": "https://vzs3-prod-common.s3.amazonaws.com/static/devicesupport/Arlo_Audio_Doorbell_Wired.mp4",
"switchSetting": "https://vzs3-prod-common.s3.amazonaws.com/static/devicesupport/Arlo_Audio_Doorbell_Switch.mp4"
}
}
},
"arlosmart": {
"kbArticles": {
"e911": "https://www.arlo.com/en-us/landing/arlosmart/",
"callFriend": "https://www.arlo.com/en-us/landing/arlosmart/",
"4kAddOnPopup": "https://www.arlo.com/en-us/landing/arlosmart/",
"cloudRecording": "https://www.arlo.com/en-us/landing/arlosmart/",
"manageArloSmart": "https://kb.arlo.com/000062115",
"otherVideo": "https://kb.arlo.com/000062115",
"packageDetection": "https://kb.arlo.com/000062114",
"whereIsBasicSubscriptionGone": "https://kb.arlo.com/000062163"
}
}
},
"success":true
}
"""
return self.request.get(f'https://{self.BASE_URL}/hmsweb/devicesupport/v3')
def GetDeviceCapabilities(self, device):
model = device.get('modelId').lower()
return self.request.get(f'https://{self.BASE_URL}/resources/capabilities/'+model+'/'+model+'_'+device.get('interfaceVersion')+'.json', raw=True)
def GetLibraryMetaData(self, from_date, to_date):
return self.request.post(f'https://{self.BASE_URL}/hmsweb/users/library/metadata', {'dateFrom':from_date, 'dateTo':to_date})
def UpdateProfile(self, first_name, last_name):
return self.request.put(f'https://{self.BASE_URL}/hmsweb/users/profile', {'firstName': first_name, 'lastName': last_name})
def UpdatePassword(self, password):
r = self.request.post(f'https://{self.BASE_URL}/hmsweb/users/changePassword', {'currentPassword':self.password,'newPassword':password})
self.password = password
return r
def UpdateFriend(self, body):
"""
This is an example of the json you would pass in the body:
{
"firstName":"Some",
"lastName":"Body",
"devices":{
"XXXXXXXXXXXXX":"Camera 1",
"XXXXXXXXXXXXX":"Camera 2 ",
"XXXXXXXXXXXXX":"Camera 3"
},
"lastModified":1463977440911,
"adminUser":true,
"email":"user@example.com",
"id":"XXX-XXXXXXX"
}
"""
return self.request.put(f'https://{self.BASE_URL}/hmsweb/users/friends', body)
def RemoveFriend(self, email):
"""
Removes a person you've granted access to.
email: email of user you want to revoke access from.
"""
return self.request.post(f'https://{self.BASE_URL}/hmsweb/users/friends/remove', {"email":email})
def AddFriend(self, firstname, lastname, email, devices={}, admin=False):
"""
This API will send an email to a user and if they accept, will give them access to the devices you specify.
NOTE: XXX-XXXXXXX_XXXXXXXXXXXX is the uniqueId field in your device object.
{adminUser:false,firstName:John,lastName:Doe,email:john.doe@example.com,devices:{XXX-XXXXXXX_XXXXXXXXXXXX:Camera1,XXX-XXXXXXX_XXXXXXXXXXXX:Camera2}}
"""
return self.request.post(f'https://{self.BASE_URL}/hmsweb/users/friends', {"adminUser":admin,"firstName":firstname,"lastName":lastname,"email":email,"devices":devices})
def ResendFriendInvite(self, friend):
"""
This API will resend an invitation email to a user that you've AddFriend'd. You will need to get the friend object by calling GetFriend() because it includes a token that must be passed to this API.
friend: {"ownerId":"XXX-XXXXXXX","token":"really long string that you get from the GetFriends() API","firstName":"John","lastName":"Doe","devices":{"XXX-XXXXXXX_XXXXXXXXXXXX":"Camera1","XXX-XXXXXXX_XXXXXXXXXXXX":"Camera2"},"lastModified":1548470485419,"adminUser":false,"email":"john.doe@example.com"}
"""
return self.request.post(f'https://{self.BASE_URL}/hmsweb/users/friends', friend)
def UpdateDeviceName(self, device, name):
return self.request.put(f'https://{self.BASE_URL}/hmsweb/users/devices/renameDevice', {'deviceId':device.get('deviceId'), 'deviceName':name, 'parentId':device.get('parentId')})
def UpdateDisplayOrder(self, body):
"""
This is an example of the json you would pass in the body to UpdateDisplayOrder() of your devices in the UI.
XXXXXXXXXXXXX is the device id of each camera. You can get this from GetDevices().
{
"devices":{
"XXXXXXXXXXXXX":1,
"XXXXXXXXXXXXX":2,
"XXXXXXXXXXXXX":3
}
}
"""
return self.request.post(f'https://{self.BASE_URL}/hmsweb/users/devices/displayOrder', body)
def GetLibrary(self, from_date, to_date):
"""
This call returns the following:
presignedContentUrl is a link to the actual video in Amazon AWS.
presignedThumbnailUrl is a link to the thumbnail .jpg of the actual video in Amazon AWS.
[
{
"mediaDurationSecond": 30,
"contentType": "video/mp4",
"name": "XXXXXXXXXXXXX",
"presignedContentUrl": "https://arlos3-prod-z2.s3.amazonaws.com/XXXXXXX_XXXX_XXXX_XXXX_XXXXXXXXXXXXX/XXX-XXXXXXX/XXXXXXXXXXXXX/recordings/XXXXXXXXXXXXX.mp4?AWSAccessKeyId=XXXXXXXXXXXXXXXXXXXX&Expires=1472968703&Signature=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"lastModified": 1472881430181,
"localCreatedDate": XXXXXXXXXXXXX,
"presignedThumbnailUrl": "https://arlos3-prod-z2.s3.amazonaws.com/XXXXXXX_XXXX_XXXX_XXXX_XXXXXXXXXXXXX/XXX-XXXXXXX/XXXXXXXXXXXXX/recordings/XXXXXXXXXXXXX_thumb.jpg?AWSAccessKeyId=XXXXXXXXXXXXXXXXXXXX&Expires=1472968703&Signature=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"reason": "motionRecord",
"deviceId": "XXXXXXXXXXXXX",
"createdBy": "XXXXXXXXXXXXX",
"createdDate": "20160903",
"timeZone": "America/Chicago",
"ownerId": "XXX-XXXXXXX",
"utcCreatedDate": XXXXXXXXXXXXX,
"currentState": "new",
"mediaDuration": "00:00:30"
}
]
"""
return self.request.post(f'https://{self.BASE_URL}/hmsweb/users/library', {'dateFrom':from_date, 'dateTo':to_date})
def DeleteRecording(self, recording):
"""
Delete a single video recording from Arlo.
All of the date info and device id you need to pass into this method are given in the results of the GetLibrary() call.
"""
return self.request.post(f'https://{self.BASE_URL}/hmsweb/users/library/recycle', {'data':[{'createdDate':recording.get('createdDate'),'utcCreatedDate':recording.get('createdDate'),'deviceId':recording.get('deviceId')}]})
def BatchDeleteRecordings(self, recordings):
"""
Delete a batch of video recordings from Arlo.
The GetLibrary() call response json can be passed directly to this method if you'd like to delete the same list of videos you queried for.
If you want to delete some other batch of videos, then you need to send an array of objects representing each video you want to delete.
[
{
"createdDate":"20160904",
"utcCreatedDate":1473010280395,
"deviceId":"XXXXXXXXXXXXX"
},
{
"createdDate":"20160904",
"utcCreatedDate":1473010280395,
"deviceId":"XXXXXXXXXXXXX"
}
]
"""
if recordings:
return self.request.post(f'https://{self.BASE_URL}/hmsweb/users/library/recycle', {'data':recordings})
def GetRecording(self, url, chunk_size=4096):
""" Returns the whole video from the presignedContentUrl. """
video = ''
r = requests.get(url, stream=True)
r.raise_for_status()
for chunk in r.iter_content(chunk_size):
if chunk: video += chunk
return video
def StreamRecording(self, url, chunk_size=4096):
"""
Returns a generator that is the chunked video stream from the presignedContentUrl.
url: presignedContentUrl
"""
r = requests.get(url, stream=True)
r.raise_for_status()
for chunk in r.iter_content(chunk_size):
yield chunk
def DownloadRecording(self, url, to):
"""
Writes a video to a given local file path.
url: presignedContentUrl
to: path where the file should be written
"""
stream = self.StreamRecording(url)
with open(to, 'wb') as fd:
for chunk in stream:
fd.write(chunk)
fd.close()
def DownloadSnapshot(self, url, to, chunk_size=4096):
"""
Writes a snapshot to a given local file path.
url: presignedContentUrl or presignedFullFrameSnapshotUrl
to: path where the file should be written
"""
r = Request().get(url, stream=True)
with open(to, 'wb') as fd:
for chunk in r.iter_content(chunk_size):
fd.write(chunk)
fd.close()
def StartStream(self, basestation, camera):
"""
This function returns the url of the rtsp video stream.
This stream needs to be called within 30 seconds or else it becomes invalid.
It can be streamed with: ffmpeg -re -i 'rtsps://<url>' -acodec copy -vcodec copy test.mp4
The request to /users/devices/startStream returns: { url:rtsp://<url>:443/vzmodulelive?egressToken=b<xx>&userAgent=iOS&cameraId=<camid>}
"""
# nonlocal variable hack for Python 2.x.
class nl:
stream_url_dict = None
def trigger(self):
nl.stream_url_dict = self.request.post(f'https://{self.BASE_URL}/hmsweb/users/devices/startStream', {"to":camera.get('parentId'),"from":self.user_id+"_web","resource":"cameras/"+camera.get('deviceId'),"action":"set","responseUrl":"", "publishResponse":True,"transId":self.genTransId(),"properties":{"activityState":"startUserStream","cameraId":camera.get('deviceId')}}, headers={"xcloudId":camera.get('xCloudId')})
def callback(self, event):
if event.get("from") == basestation.get("deviceId") and event.get("resource") == "cameras/"+camera.get("deviceId") and event.get("properties", {}).get("activityState") == "userStreamActive":
return nl.stream_url_dict['url'].replace("rtsp://", "rtsps://")
return None
return self.TriggerAndHandleEvent(basestation, trigger, callback)
def StopStream(self, basestation, camera):
# nonlocal variable hack for Python 2.x.
class nl:
stream_url_dict = None
def trigger(self):
self.request.post(f'https://{self.BASE_URL}/hmsweb/users/devices/stopStream', {"to":camera.get('parentId'),"from":self.user_id+"_web","resource":"cameras/"+camera. get('deviceId'),"action":"set","responseUrl":"", "publishResponse":True,"transId":self.genTransId(),"properties":{"activityState":"stopUserStream","cameraId":camera.get('deviceId')}}, headers={"xcloudId": camera.get('xCloudId')})
def callback(self, event):
if event.get("from") == basestation.get("deviceId") and event.get("resource") == "cameras/"+camera.get("deviceId") and event.get("properties", {}).get("activityState") == "userStreamActive":
return nl.stream_url_dict['url'].replace("rtsp://", "rtsps://")
return None
return self.TriggerAndHandleEvent(basestation, trigger, callback)
def TriggerStreamSnapshot(self, basestation, camera):
"""
This function causes the camera to snapshot while recording.
NOTE: You MUST call StartStream() before calling this function.
If you call StartStream(), you have to start reading data from the stream, or streaming will be cancelled
and taking a snapshot may fail (since it requires the stream to be active).
NOTE: You should not use this function is you just want a snapshot and aren't intending to stream.
Use TriggerFullFrameSnapshot() instead.
NOTE: Use DownloadSnapshot() to download the actual image file.
"""
def trigger(self):
self.request.post(f'https://{self.BASE_URL}/hmsweb/users/devices/takeSnapshot', {'xcloudId':camera.get('xCloudId'),'parentId':camera.get('parentId'),'deviceId':camera.get('deviceId'),'olsonTimeZone':camera.get('properties', {}).get('olsonTimeZone')}, headers={"xcloudId":camera.get('xCloudId')})
def callback(self, event):
if event.get("deviceId") == camera.get("deviceId") and event.get("resource") == "mediaUploadNotification":
presigned_content_url = event.get("presignedContentUrl")
if presigned_content_url is not None:
return presigned_content_url
return None
return self.TriggerAndHandleEvent(basestation, trigger, callback)
def TriggerFullFrameSnapshot(self, basestation, camera):
"""
This function causes the camera to record a fullframe snapshot.
The presignedFullFrameSnapshotUrl url is returned.
Use DownloadSnapshot() to download the actual image file.
"""
def trigger(self):
self.request.post("https://my.arlo.com/hmsweb/users/devices/fullFrameSnapshot", {"to":camera.get("parentId"),"from":self.user_id+"_web","resource":"cameras/"+camera.get("deviceId"),"action":"set","publishResponse":True,"transId":self.genTransId(),"properties":{"activityState":"fullFrameSnapshot"}}, headers={"xcloudId":camera.get("xCloudId")})
def callback(self, event):
if event.get("from") == basestation.get("deviceId") and event.get("resource") == "cameras/"+camera.get("deviceId") and event.get("action") == "fullFrameSnapshotAvailable":
return event.get("properties", {}).get("presignedFullFrameSnapshotUrl")
return None
return self.TriggerAndHandleEvent(basestation, trigger, callback)
def StartRecording(self, basestation, camera):
"""
This function causes the camera to start recording.
You can get the timezone from GetDevices().
"""
stream_url = self.StartStream(basestation, camera)
self.request.post(f'https://{self.BASE_URL}/hmsweb/users/devices/startRecord', {'xcloudId':camera.get('xCloudId'),'parentId':camera.get('parentId'),'deviceId':camera.get('deviceId'),'olsonTimeZone':camera.get('properties', {}).get('olsonTimeZone')}, headers={"xcloudId":camera.get('xCloudId')})
return stream_url
def StopRecording(self, camera):
"""
This function causes the camera to stop recording.
You can get the timezone from GetDevices().
"""
return self.request.post(f'https://{self.BASE_URL}/hmsweb/users/devices/stopRecord', {'xcloudId':camera.get('xCloudId'),'parentId':camera.get('parentId'),'deviceId':camera.get('deviceId'),'olsonTimeZone':camera.get('properties', {}).get('olsonTimeZone')}, headers={"xcloudId":camera.get('xCloudId')})
def GetCvrPlaylist(self, camera, fromDate, toDate):
""" This function downloads a Cvr Playlist file for the period fromDate to toDate. """
return self.request.get(f'https://{self.BASE_URL}/hmsweb/users/devices/'+camera.get('uniqueId')+'/playlist?fromDate='+fromDate+'&toDate='+toDate)
| {
"content_hash": "5696345cadfb3e1138ae58a4653b66c6",
"timestamp": "",
"source": "github",
"line_count": 1760,
"max_line_length": 910,
"avg_line_length": 50.055113636363636,
"alnum_prop": 0.5973529178065088,
"repo_name": "jeffreydwalter/arlo",
"id": "34ee1ffc88e1614bb6549fef5849801a3db26de3",
"size": "88097",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "arlo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1109"
},
{
"name": "Python",
"bytes": "137031"
}
],
"symlink_target": ""
} |
from model import model
import numpy as np
import tensorflow as tf
import os
import sys
import time
TRAIN = 'train.csv'
tf.app.flags.DEFINE_string('data_directory', './data/', 'directory containing the data sets')
tf.app.flags.DEFINE_float('validation_fraction', 0.2, 'fraction of training data set aside for validation')
tf.app.flags.DEFINE_integer('batch_size', 1000, 'batch size for training')
tf.app.flags.DEFINE_integer('num_epochs', 10, 'number of epochs trained')
tf.app.flags.DEFINE_float('learning_rate', 0.0001, 'learning rate in training')
tf.app.flags.DEFINE_string('model_name', 'mnist', 'name of the saved model')
tf.app.flags.DEFINE_string('models_directory', './models/', 'directory to save the tensorflow model')
tf.app.flags.DEFINE_string('logs_directory', './logs/', 'directory to save logs')
tf.app.flags.DEFINE_integer('run_id', 1, 'id of this run, affects logs')
FLAGS = tf.app.flags.FLAGS
def train(train_images, train_labels, validation_images=None, validation_labels=None):
assert train_images.shape[1] == train_images.shape[2]
train_size, image_size, _ = train_images.shape
images_ph = tf.placeholder(tf.float32, shape=(None, image_size, image_size))
labels_ph = tf.placeholder(tf.int64, shape=(None,))
keep_prob = tf.placeholder(tf.float32)
logits = model(images_ph, keep_prob, reuse=None)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels_ph))
accuracy = tf.contrib.metrics.accuracy(tf.argmax(logits, 1), labels_ph)
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
train_op = optimizer.minimize(loss)
init_op = tf.global_variables_initializer()
merged_summary = tf.merge_summary([
tf.scalar_summary('loss', loss),
tf.scalar_summary('accuracy', accuracy)
])
params_summary = tf.merge_summary([
tf.scalar_summary('hyper-parameters/learning_rate', FLAGS.learning_rate),
tf.scalar_summary('hyper-parameters/num_epochs', FLAGS.num_epochs),
tf.scalar_summary('hyper-parameters/batch_size', FLAGS.batch_size)
])
logs_path = os.path.join(FLAGS.logs_directory, 'run%d' % FLAGS.run_id)
if not os.path.exists(logs_path):
os.mkdir(logs_path)
summary_writer = tf.train.SummaryWriter(logs_path)
with tf.Session() as session:
session.run(init_op)
t0 = time.time()
summary_writer.add_summary(session.run(params_summary))
for e in range(FLAGS.num_epochs):
for i in range(0, train_size, FLAGS.batch_size):
batch_images = train_images[i:i + FLAGS.batch_size, :, :]
batch_labels = train_labels[i:i + FLAGS.batch_size]
step_loss, step_accuracy, _ = session.run([loss, accuracy, train_op], feed_dict={
images_ph: batch_images,
labels_ph: batch_labels,
keep_prob: 0.5
})
progress = ((e + (min(i + FLAGS.batch_size, train_size) / train_size)) / FLAGS.num_epochs)
eta = (time.time() - t0) * (1 / progress - 1)
sys.stdout.write('\r')
sys.stdout.write('\033[K')
sys.stdout.write('progress: %.2f%%, loss: %.3f, accuracy: %.3f, eta: %.3f' %
(progress * 100, step_loss, 100 * step_accuracy, eta))
sys.stdout.flush()
if validation_images is not None:
step_merged_summary = session.run(merged_summary, feed_dict={
images_ph: validation_images,
labels_ph: validation_labels,
keep_prob: 1.0
})
summary_writer.add_summary(step_merged_summary, e)
sys.stdout.write('\n')
saver = tf.train.Saver(write_version=tf.train.SaverDef.V2)
save_path = os.path.join(FLAGS.models_directory, FLAGS.model_name + '.ckpt')
saver.save(sess=session, save_path=save_path)
print('saved tensorflow model at %s' % save_path, flush=True)
def main(_):
train_path = os.path.join(FLAGS.data_directory, TRAIN)
print('reading data from %s ...' % train_path, end='', flush=True)
train_data = np.genfromtxt(train_path, dtype=np.uint8, delimiter=',', skip_header=1)
print(' done', flush=True)
labels = train_data[:, 0].astype(np.int64)
image_size = int(np.sqrt(train_data.shape[1] - 1))
images = train_data[:, 1:].reshape((-1, image_size, image_size)).astype(np.float32)
images = np.array(images * 2.0 / 255 - 1, copy=False)
train_size = int(np.round(images.shape[0] * (1 - FLAGS.validation_fraction)))
train_images = images[:train_size, :, :]
train_labels = labels[:train_size]
train_images = np.concatenate((train_images, np.fliplr(train_images)))
train_labels = np.concatenate((train_labels, train_labels))
validation_images = images[train_size:, :, :] if train_size != images.shape[0] else None
validation_labels = labels[train_size:] if train_size != images.shape[0] else None
print('beginning training...', flush=True)
train(train_images, train_labels, validation_images, validation_labels)
print('training done', flush=True)
if __name__ == '__main__':
tf.app.run()
| {
"content_hash": "c921832fc92f8bcdf11ecd183e91aaae",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 107,
"avg_line_length": 43.553719008264466,
"alnum_prop": 0.6335863377609108,
"repo_name": "priyathamkat/digit-recognizer",
"id": "931a72e6f52c9723b490d30781e7eb6f0c03b8f5",
"size": "5270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "train.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12899"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.template import Library
register = Library()
@register.inclusion_tag("aldryn_google_tag_manager/google_tag_manager.html")
def google_tag_manager(tag_id=''):
if not tag_id and hasattr(settings, 'GOOGLE_TAG_MANAGER_ID'):
tag_id = settings.GOOGLE_TAG_MANAGER_ID
return {
'tag_id': tag_id
}
| {
"content_hash": "b7e2d40463baff520795947d1e9e7bf7",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 76,
"avg_line_length": 26.933333333333334,
"alnum_prop": 0.7029702970297029,
"repo_name": "aldryn/aldryn-google-tag-manager",
"id": "9d308ff31e86188ae9f9176caaabb341d22fd9d2",
"size": "429",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aldryn_google_tag_manager/templatetags/google_tag_manager_tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "570"
},
{
"name": "Python",
"bytes": "2144"
}
],
"symlink_target": ""
} |
from ..schemas import Schema, fields
from ..utils import FrozenMixin
from ..enums import EnumField, CardTypeEnum
class CardSchema(Schema):
CardNumber = fields.String()
ExpirationMonth = fields.String()
ExpirationYear = fields.String()
CVV = fields.String()
Track1Data = fields.String()
Track2Data = fields.String()
PaypageRegistrationID = fields.String()
AccountNumber = fields.String()
Type = EnumField(CardTypeEnum, by_value=True)
class Card(FrozenMixin):
__schema__ = CardSchema
CardNumber = None
ExpirationMonth = None
ExpirationYear = None
CVV = None
Track1Data = None
Track2Data = None
PaypageRegistrationID = None
AccountNumber = None
Type = None
| {
"content_hash": "dbc2e2c1ce78cff00d90114e3776769a",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 49,
"avg_line_length": 25.344827586206897,
"alnum_prop": 0.6965986394557823,
"repo_name": "SambaDemon/python_vantiv",
"id": "bc7f34ad80f4fa638ce0778b164fc196540cdf1f",
"size": "735",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vantiv/request/model/card.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "92718"
}
],
"symlink_target": ""
} |
from typing import TYPE_CHECKING
from airflow.providers.google.cloud.links.base import BaseGoogleLink
if TYPE_CHECKING:
from airflow.utils.context import Context
BASE_LINK = "https://console.cloud.google.com"
VERTEX_AI_BASE_LINK = BASE_LINK + "/vertex-ai"
VERTEX_AI_MODEL_LINK = (
VERTEX_AI_BASE_LINK + "/locations/{region}/models/{model_id}/deploy?project={project_id}"
)
VERTEX_AI_MODEL_LIST_LINK = VERTEX_AI_BASE_LINK + "/models?project={project_id}"
VERTEX_AI_MODEL_EXPORT_LINK = (
BASE_LINK + "/storage/browser/{bucket_name}/model-{model_id}?project={project_id}"
)
VERTEX_AI_TRAINING_LINK = (
VERTEX_AI_BASE_LINK + "/locations/{region}/training/{training_id}/cpu?project={project_id}"
)
VERTEX_AI_TRAINING_PIPELINES_LINK = VERTEX_AI_BASE_LINK + "/training/training-pipelines?project={project_id}"
VERTEX_AI_DATASET_LINK = (
VERTEX_AI_BASE_LINK + "/locations/{region}/datasets/{dataset_id}/analyze?project={project_id}"
)
VERTEX_AI_DATASET_LIST_LINK = VERTEX_AI_BASE_LINK + "/datasets?project={project_id}"
VERTEX_AI_HYPERPARAMETER_TUNING_JOB_LIST_LINK = (
VERTEX_AI_BASE_LINK + "/training/hyperparameter-tuning-jobs?project={project_id}"
)
VERTEX_AI_BATCH_PREDICTION_JOB_LINK = (
VERTEX_AI_BASE_LINK
+ "/locations/{region}/batch-predictions/{batch_prediction_job_id}?project={project_id}"
)
VERTEX_AI_BATCH_PREDICTION_JOB_LIST_LINK = VERTEX_AI_BASE_LINK + "/batch-predictions?project={project_id}"
VERTEX_AI_ENDPOINT_LINK = (
VERTEX_AI_BASE_LINK + "/locations/{region}/endpoints/{endpoint_id}?project={project_id}"
)
VERTEX_AI_ENDPOINT_LIST_LINK = VERTEX_AI_BASE_LINK + "/endpoints?project={project_id}"
class VertexAIModelLink(BaseGoogleLink):
"""Helper class for constructing Vertex AI Model link"""
name = "Vertex AI Model"
key = "model_conf"
format_str = VERTEX_AI_MODEL_LINK
@staticmethod
def persist(
context: "Context",
task_instance,
model_id: str,
):
task_instance.xcom_push(
context=context,
key=VertexAIModelLink.key,
value={
"model_id": model_id,
"region": task_instance.region,
"project_id": task_instance.project_id,
},
)
class VertexAIModelListLink(BaseGoogleLink):
"""Helper class for constructing Vertex AI Models Link"""
name = "Model List"
key = "models_conf"
format_str = VERTEX_AI_MODEL_LIST_LINK
@staticmethod
def persist(
context: "Context",
task_instance,
):
task_instance.xcom_push(
context=context,
key=VertexAIModelListLink.key,
value={
"project_id": task_instance.project_id,
},
)
class VertexAIModelExportLink(BaseGoogleLink):
"""Helper class for constructing Vertex AI Model Export Link"""
name = "Export Model"
key = "export_conf"
format_str = VERTEX_AI_MODEL_EXPORT_LINK
@staticmethod
def extract_bucket_name(config):
"""Returns bucket name from output configuration."""
return config["artifact_destination"]["output_uri_prefix"].rpartition("gs://")[-1]
@staticmethod
def persist(
context: "Context",
task_instance,
):
task_instance.xcom_push(
context=context,
key=VertexAIModelExportLink.key,
value={
"project_id": task_instance.project_id,
"model_id": task_instance.model_id,
"bucket_name": VertexAIModelExportLink.extract_bucket_name(task_instance.output_config),
},
)
class VertexAITrainingLink(BaseGoogleLink):
"""Helper class for constructing Vertex AI Training link"""
name = "Vertex AI Training"
key = "training_conf"
format_str = VERTEX_AI_TRAINING_LINK
@staticmethod
def persist(
context: "Context",
task_instance,
training_id: str,
):
task_instance.xcom_push(
context=context,
key=VertexAITrainingLink.key,
value={
"training_id": training_id,
"region": task_instance.region,
"project_id": task_instance.project_id,
},
)
class VertexAITrainingPipelinesLink(BaseGoogleLink):
"""Helper class for constructing Vertex AI Training Pipelines link"""
name = "Vertex AI Training Pipelines"
key = "pipelines_conf"
format_str = VERTEX_AI_TRAINING_PIPELINES_LINK
@staticmethod
def persist(
context: "Context",
task_instance,
):
task_instance.xcom_push(
context=context,
key=VertexAITrainingPipelinesLink.key,
value={
"project_id": task_instance.project_id,
},
)
class VertexAIDatasetLink(BaseGoogleLink):
"""Helper class for constructing Vertex AI Dataset link"""
name = "Dataset"
key = "dataset_conf"
format_str = VERTEX_AI_DATASET_LINK
@staticmethod
def persist(context: "Context", task_instance, dataset_id: str):
task_instance.xcom_push(
context=context,
key=VertexAIDatasetLink.key,
value={
"dataset_id": dataset_id,
"region": task_instance.region,
"project_id": task_instance.project_id,
},
)
class VertexAIDatasetListLink(BaseGoogleLink):
"""Helper class for constructing Vertex AI Datasets Link"""
name = "Dataset List"
key = "datasets_conf"
format_str = VERTEX_AI_DATASET_LIST_LINK
@staticmethod
def persist(
context: "Context",
task_instance,
):
task_instance.xcom_push(
context=context,
key=VertexAIDatasetListLink.key,
value={
"project_id": task_instance.project_id,
},
)
class VertexAIHyperparameterTuningJobListLink(BaseGoogleLink):
"""Helper class for constructing Vertex AI HyperparameterTuningJobs Link"""
name = "Hyperparameter Tuning Job List"
key = "hyperparameter_tuning_jobs_conf"
format_str = VERTEX_AI_HYPERPARAMETER_TUNING_JOB_LIST_LINK
@staticmethod
def persist(
context: "Context",
task_instance,
):
task_instance.xcom_push(
context=context,
key=VertexAIHyperparameterTuningJobListLink.key,
value={
"project_id": task_instance.project_id,
},
)
class VertexAIBatchPredictionJobLink(BaseGoogleLink):
"""Helper class for constructing Vertex AI BatchPredictionJob link"""
name = "Batch Prediction Job"
key = "batch_prediction_job_conf"
format_str = VERTEX_AI_BATCH_PREDICTION_JOB_LINK
@staticmethod
def persist(
context: "Context",
task_instance,
batch_prediction_job_id: str,
):
task_instance.xcom_push(
context=context,
key=VertexAIBatchPredictionJobLink.key,
value={
"batch_prediction_job_id": batch_prediction_job_id,
"region": task_instance.region,
"project_id": task_instance.project_id,
},
)
class VertexAIBatchPredictionJobListLink(BaseGoogleLink):
"""Helper class for constructing Vertex AI BatchPredictionJobList link"""
name = "Batch Prediction Job List"
key = "batch_prediction_jobs_conf"
format_str = VERTEX_AI_BATCH_PREDICTION_JOB_LIST_LINK
@staticmethod
def persist(
context: "Context",
task_instance,
):
task_instance.xcom_push(
context=context,
key=VertexAIBatchPredictionJobListLink.key,
value={
"project_id": task_instance.project_id,
},
)
class VertexAIEndpointLink(BaseGoogleLink):
"""Helper class for constructing Vertex AI Endpoint link"""
name = "Endpoint"
key = "endpoint_conf"
format_str = VERTEX_AI_ENDPOINT_LINK
@staticmethod
def persist(
context: "Context",
task_instance,
endpoint_id: str,
):
task_instance.xcom_push(
context=context,
key=VertexAIEndpointLink.key,
value={
"endpoint_id": endpoint_id,
"region": task_instance.region,
"project_id": task_instance.project_id,
},
)
class VertexAIEndpointListLink(BaseGoogleLink):
"""Helper class for constructing Vertex AI EndpointList link"""
name = "Endpoint List"
key = "endpoints_conf"
format_str = VERTEX_AI_ENDPOINT_LIST_LINK
@staticmethod
def persist(
context: "Context",
task_instance,
):
task_instance.xcom_push(
context=context,
key=VertexAIEndpointListLink.key,
value={
"project_id": task_instance.project_id,
},
)
| {
"content_hash": "ef7a1b34bc1d068c32912e2667c62af8",
"timestamp": "",
"source": "github",
"line_count": 306,
"max_line_length": 109,
"avg_line_length": 29.30065359477124,
"alnum_prop": 0.6127593129600714,
"repo_name": "Acehaidrey/incubator-airflow",
"id": "910049c81a5a07024102d60c3c6959e4493e0d64",
"size": "9752",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "airflow/providers/google/cloud/links/vertex_ai.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21727510"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495253"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
} |
"""Main module."""
import time
import threading
class SingletonMixin(object):
__singleton_lock = threading.Lock()
__singleton_instance = None
@classmethod
def instance(cls):
if not cls.__singleton_instance:
with cls.__singleton_lock:
if not cls.__singleton_instance:
cls.__singleton_instance = cls()
return cls.__singleton_instance
class Counter(SingletonMixin):
counter = 0
__lock = threading.Lock()
def incr(self):
with self.__lock:
self.counter += 1
return self.counter
counter = Counter()
def gen_guid(shard_id=1):
incr = counter.incr()
guid = (int(time.time() * 1000) << (64 - 41))
guid |= (shard_id << (64 - 41 - 13))
guid |= incr % 1024
return guid
| {
"content_hash": "e2c33b6c2df287732d51e2dc9c14ddb3",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 52,
"avg_line_length": 19.75609756097561,
"alnum_prop": 0.571604938271605,
"repo_name": "yoophi/guid-server",
"id": "096c442a8ca950681a1ae05e034b1ad1eac75c9f",
"size": "810",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "fluffy_id/fluffy_id.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3216"
}
],
"symlink_target": ""
} |
import sys
from collections import namedtuple
from avroknife.error import error
class EqualitySelection:
"""Specification of a desired value assigned to a key"
The key can be nested, e.g. 'opt1.opt2'.
"""
def __init__(self, string_):
"""
Args:
string_: a string in a form of "opt1.opt2=val"
"""
key, value = string_.split('=')
self.__key = key
self.__value = value
def get_key_parts(self):
"""Returns:
a list of all nesting components of the key
"""
return self.__key.split('.')
def get_value(self):
"""Returns:
value assigned to the key
"""
return self.__value
class PositionWrtRange(object):
SMALLER = 1
INSIDE = 2
LARGER = 3
class Range:
"""Numerical range"""
def __init__(self, string):
"""
Args:
string: range given in format, e.g., 1-6, -6, 1-. The string
can be also set to None, which means that range is
(-infinity, infinity). """
self.range_ = [None, None]
if string is not None:
parts = string.split("-")
if len(parts) > 2:
raise Exception(
"Too many elements in range specification '{}'.".format(string))
elif len(parts) == 1:
number = int(parts[0])
self.range_ = [number, number]
else: ## len(parts) == 2:
self.range_ = [None, None]
for i in range(2):
if len(parts[i]) != 0:
self.range_[i] = int(parts[i])
def get_position(self, number):
"""@return position of given number with respect to the range"""
if self.range_[0] is not None:
if number < self.range_[0]:
return PositionWrtRange.SMALLER
else:
if self.range_[1] is not None:
if number > self.range_[1]:
return PositionWrtRange.LARGER
else:
return PositionWrtRange.INSIDE
else:
return PositionWrtRange.INSIDE
else:
if self.range_[1] is not None:
if number <= self.range_[1]:
return PositionWrtRange.INSIDE
else:
return PositionWrtRange.LARGER
else:
return PositionWrtRange.INSIDE
Record = namedtuple('Record', ['index', 'content'], verbose=False)
class RecordSelector:
"""Allows to iterate over records according to certain selection criteria
"""
def __init__(self, range_=None, selection=None, limit=None):
"""
Args:
range_: a Range object. It defines range of accepted record
indexes.
selection: an EqualitySelection object. It defines values of
accepted properties of records.
limit: specifies that only this number of records matching all other
constraints should be returned.
"""
if range_ is None:
range_ = Range(None)
self.__range = range_
self.__selection = selection
if limit is None:
limit = sys.maxint
self.__limit = limit
@staticmethod
def __records_in_range(data_store, range_):
"""
Generates records in the specified range from the given data store
Args:
data_store: a DataStore object with records
range_: a Range object specifying the desired records
Returns:
records from the given range
"""
for index, content in enumerate(data_store):
position = range_.get_position(index)
if position == PositionWrtRange.SMALLER:
pass
elif position == PositionWrtRange.INSIDE:
yield Record(index, content)
else: ## position == PositionWrtRange.LARGER:
raise StopIteration
@staticmethod
def __record_fulfills_condition(record, selection):
"""Checks whether a field in Avro records fulfills a condition
Args:
record: an Avro record as a Python dictionary
selection: an EqualitySelection object (optional)
Returns:
True if a record fulfill the condition
False otherwise
"""
if selection is None:
return True
key_parts = selection.get_key_parts()
value = selection.get_value()
content = record.content
try:
for key_part in key_parts:
content = content[key_part]
except KeyError:
error("Specified key not found in the schema: {}"\
.format(".".join(key_parts)))
raise
if content is None and value=="null":
return True
if unicode(content) == unicode(value):
return True
else:
return False
def get_records(self, data_store):
"""
Args:
data_store: a DataStore object with records
Returns:
Record objects.
"""
records = self.__records_in_range(data_store, self.__range)
count = 0
for record in records:
if count < self.__limit:
if self.__record_fulfills_condition(record, self.__selection):
count = count+1
yield record
else:
raise StopIteration
| {
"content_hash": "d6d0751ca6594205389671ba5174ebd7",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 84,
"avg_line_length": 31.594444444444445,
"alnum_prop": 0.519606119219272,
"repo_name": "CeON/avroknife",
"id": "ee65c38bdb332c72e71da5d95fda4efd15f8fcad",
"size": "6280",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "avroknife/record_selector.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2177"
},
{
"name": "Python",
"bytes": "87765"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Scholien', '0003_auto_20171027_1709'),
]
operations = [
migrations.AddField(
model_name='buechlein',
name='admin_kaeufe',
field=models.TextField(default=''),
),
]
| {
"content_hash": "b9e8883051712fbbfc458c52745668a3",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 48,
"avg_line_length": 21.27777777777778,
"alnum_prop": 0.5900783289817232,
"repo_name": "valuehack/scholarium.at",
"id": "2839b18208338d7c99714d00d985e27224f5f364",
"size": "456",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Scholien/migrations/0004_buechlein_admin_kaeufe.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "6190"
},
{
"name": "CSS",
"bytes": "144109"
},
{
"name": "HTML",
"bytes": "109787"
},
{
"name": "JavaScript",
"bytes": "702"
},
{
"name": "Python",
"bytes": "216135"
}
],
"symlink_target": ""
} |
import logging
import unittest
class TestCloudLoggingHandler(unittest.TestCase):
PROJECT = "PROJECT"
@staticmethod
def _get_target_class():
from google.cloud.logging.handlers.handlers import CloudLoggingHandler
return CloudLoggingHandler
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
client = _Client(self.PROJECT)
handler = self._make_one(client, transport=_Transport)
self.assertEqual(handler.client, client)
def test_emit(self):
from google.cloud.logging.logger import _GLOBAL_RESOURCE
client = _Client(self.PROJECT)
handler = self._make_one(
client, transport=_Transport, resource=_GLOBAL_RESOURCE
)
logname = "loggername"
message = "hello world"
record = logging.LogRecord(logname, logging, None, None, message, None, None)
handler.emit(record)
self.assertEqual(
handler.transport.send_called_with,
(record, message, _GLOBAL_RESOURCE, None),
)
class TestSetupLogging(unittest.TestCase):
def _call_fut(self, handler, excludes=None):
from google.cloud.logging.handlers.handlers import setup_logging
if excludes:
return setup_logging(handler, excluded_loggers=excludes)
else:
return setup_logging(handler)
def test_setup_logging(self):
handler = _Handler(logging.INFO)
self._call_fut(handler)
root_handlers = logging.getLogger().handlers
self.assertIn(handler, root_handlers)
def test_setup_logging_excludes(self):
INCLUDED_LOGGER_NAME = "includeme"
EXCLUDED_LOGGER_NAME = "excludeme"
handler = _Handler(logging.INFO)
self._call_fut(handler, (EXCLUDED_LOGGER_NAME,))
included_logger = logging.getLogger(INCLUDED_LOGGER_NAME)
self.assertTrue(included_logger.propagate)
excluded_logger = logging.getLogger(EXCLUDED_LOGGER_NAME)
self.assertNotIn(handler, excluded_logger.handlers)
self.assertFalse(excluded_logger.propagate)
def setUp(self):
self._handlers_cache = logging.getLogger().handlers[:]
def tearDown(self):
# cleanup handlers
logging.getLogger().handlers = self._handlers_cache[:]
class _Handler(object):
def __init__(self, level):
self.level = level
def acquire(self):
pass # pragma: NO COVER
def release(self):
pass # pragma: NO COVER
class _Client(object):
def __init__(self, project):
self.project = project
class _Transport(object):
def __init__(self, client, name):
pass
def send(self, record, message, resource, labels=None):
self.send_called_with = (record, message, resource, labels)
| {
"content_hash": "c3b725a8dc0f1e5434c2fcd90a68ff6a",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 85,
"avg_line_length": 28.44,
"alnum_prop": 0.6427566807313643,
"repo_name": "dhermes/google-cloud-python",
"id": "ff738046d892f952f0034f60c08767667b74ac40",
"size": "3419",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "logging/tests/unit/handlers/test_handlers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "936"
},
{
"name": "Makefile",
"bytes": "1779"
},
{
"name": "Python",
"bytes": "13118304"
},
{
"name": "Shell",
"bytes": "8606"
}
],
"symlink_target": ""
} |
"""Factorial Trailing Zeroes
Given an integer n, return the number of trailing zeroes in n!.
Note: Your solution should be in logarithmic time complexity.
Created on Tue Dec 30 23:17:20 2014
@author: shaotch
"""
class Solution:
# @return an integer
def trailingZeroes(self, n):
r = 0
while n > 0:
n /= 5
r += n
return r
if __name__ == "__main__":
s = Solution()
assert 7 == s.trailingZeroes(32) | {
"content_hash": "3398a91ab1aa8afb8a037af69a03c989",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 63,
"avg_line_length": 19.375,
"alnum_prop": 0.5935483870967742,
"repo_name": "au9ustine/org.au9ustine.puzzles.leetcode",
"id": "24c24a91f05da65211e474ee495265ea71ffd7a0",
"size": "1075",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/py/factorial-trailing-zeroes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "12365"
},
{
"name": "Java",
"bytes": "65445"
},
{
"name": "Python",
"bytes": "17702"
},
{
"name": "SQLPL",
"bytes": "1603"
},
{
"name": "Shell",
"bytes": "3142"
}
],
"symlink_target": ""
} |
from pathlib import Path
import pytest
from _pytest.tmpdir import TempPathFactory
from rasa.core.agent import Agent
from rasa.core.policies.policy import Policy
from rasa.engine.storage.local_model_storage import LocalModelStorage
from rasa.shared.nlu.training_data.formats import RasaYAMLReader
from rasa.utils.tensorflow.constants import EPOCHS
from typing import Any, Dict, List, Tuple, Text, Union, Optional
import rasa.model_training
import rasa.shared.utils.io
import rasa.engine.recipes.default_components
COMPONENTS_TEST_PARAMS = {
"DIETClassifier": {EPOCHS: 1},
"ResponseSelector": {EPOCHS: 1},
"LanguageModelFeaturizer": {
"model_name": "bert",
"model_weights": "bert-base-uncased",
},
}
def get_test_params_for_component(component: Text) -> Dict[Text, Union[Text, int]]:
return (
COMPONENTS_TEST_PARAMS[component] if component in COMPONENTS_TEST_PARAMS else {}
)
def as_pipeline(*components) -> List[Dict[Text, Dict]]:
return [
{"name": c, **get_test_params_for_component(c)} if isinstance(c, str) else c
for c in components
]
def pipelines_for_tests() -> List[Tuple[Text, List[Dict[Text, Any]]]]:
# these pipelines really are just for testing
# every component should be in here so train-persist-load-use cycle can be
# tested they still need to be in a useful order - hence we can not simply
# generate this automatically.
# Create separate test pipelines for dense featurizers
# because they can't co-exist in the same pipeline together,
# as their tokenizers break the incoming message into different number of tokens.
# first is language followed by list of components
return [
("en", as_pipeline("KeywordIntentClassifier")),
(
"en",
as_pipeline(
"WhitespaceTokenizer",
"RegexFeaturizer",
"LexicalSyntacticFeaturizer",
"CountVectorsFeaturizer",
"CRFEntityExtractor",
"DucklingEntityExtractor",
"DIETClassifier",
"ResponseSelector",
"EntitySynonymMapper",
),
),
(
"en",
as_pipeline(
{"name": "SpacyNLP", "model": "en_core_web_md"},
"SpacyTokenizer",
"SpacyFeaturizer",
"SpacyEntityExtractor",
"SklearnIntentClassifier",
),
),
(
"en",
as_pipeline(
"WhitespaceTokenizer", "LanguageModelFeaturizer", "DIETClassifier",
),
),
("fallback", as_pipeline("KeywordIntentClassifier", "FallbackClassifier")),
]
def pipelines_for_non_windows_tests() -> List[Tuple[Text, List[Dict[Text, Any]]]]:
# these templates really are just for testing
# because some of the components are not available on Windows, we specify pipelines
# containing them separately
# first is language followed by list of components
return [
(
"en",
as_pipeline(
{"name": "SpacyNLP", "model": "en_core_web_md"},
"SpacyTokenizer",
"SpacyFeaturizer",
"DIETClassifier",
),
),
(
"en",
as_pipeline(
"MitieNLP",
"MitieTokenizer",
"MitieFeaturizer",
"MitieIntentClassifier",
"RegexEntityExtractor",
),
),
(
"zh",
as_pipeline(
"MitieNLP", "JiebaTokenizer", "MitieFeaturizer", "MitieEntityExtractor"
),
),
]
def test_all_components_are_in_at_least_one_test_pipeline():
"""There is a template that includes all components to
test the train-persist-load-use cycle. Ensures that
really all components are in there.
"""
all_pipelines = pipelines_for_tests() + pipelines_for_non_windows_tests()
all_components = [c["name"] for _, p in all_pipelines for c in p]
all_registered_components = (
rasa.engine.recipes.default_components.DEFAULT_COMPONENTS
)
all_registered_nlu_components = [
c for c in all_registered_components if not issubclass(c, Policy)
]
for cls in all_registered_nlu_components:
if "convert" in cls.__name__.lower():
# TODO
# skip ConveRTFeaturizer as the ConveRT model is not
# publicly available anymore
# (see https://github.com/RasaHQ/rasa/issues/6806)
continue
assert (
cls.__name__ in all_components
), "`all_components` template is missing component."
@pytest.mark.timeout(600, func_only=True)
@pytest.mark.parametrize("language, pipeline", pipelines_for_tests())
async def test_train_persist_load_parse(
language: Optional[Text],
pipeline: List[Dict],
tmp_path: Path,
nlu_as_json_path: Text,
):
config_file = tmp_path / "config.yml"
rasa.shared.utils.io.dump_obj_as_json_to_file(
config_file, {"pipeline": pipeline, "language": language}
)
persisted_path = rasa.model_training.train_nlu(
str(config_file), nlu_as_json_path, output=str(tmp_path),
)
assert Path(persisted_path).is_file()
agent = Agent.load(persisted_path)
assert agent.processor
assert agent.is_ready()
assert await agent.parse_message("Rasa is great!") is not None
@pytest.mark.timeout(600, func_only=True)
@pytest.mark.parametrize("language, pipeline", pipelines_for_non_windows_tests())
@pytest.mark.skip_on_windows
def test_train_persist_load_parse_non_windows(
language, pipeline, tmp_path, nlu_as_json_path: Text
):
test_train_persist_load_parse(language, pipeline, tmp_path, nlu_as_json_path)
def test_train_model_empty_pipeline(nlu_as_json_path: Text, tmp_path: Path):
config_file = tmp_path / "config.yml"
rasa.shared.utils.io.dump_obj_as_json_to_file(config_file, {"pipeline": [],})
with pytest.raises(ValueError):
rasa.model_training.train_nlu(
str(config_file), nlu_as_json_path, output=str(tmp_path),
)
def test_handles_pipeline_with_non_existing_component(
tmp_path: Path, pretrained_embeddings_spacy_config: Dict, nlu_as_json_path: Text
):
pretrained_embeddings_spacy_config["pipeline"].append(
{"name": "my_made_up_component"}
)
config_file = tmp_path / "config.yml"
rasa.shared.utils.io.dump_obj_as_json_to_file(
config_file, pretrained_embeddings_spacy_config
)
with pytest.raises(
Exception, match="Can't load class for name 'my_made_up_component'",
):
rasa.model_training.train_nlu(
str(config_file), nlu_as_json_path, output=str(tmp_path),
)
def test_train_model_training_data_persisted(
tmp_path: Path, nlu_as_json_path: Text, tmp_path_factory: TempPathFactory
):
config_file = tmp_path / "config.yml"
rasa.shared.utils.io.dump_obj_as_json_to_file(
config_file,
{"pipeline": [{"name": "KeywordIntentClassifier"}], "language": "en"},
)
persisted_path = rasa.model_training.train_nlu(
str(config_file),
nlu_as_json_path,
output=str(tmp_path),
persist_nlu_training_data=True,
)
assert Path(persisted_path).is_file()
model_dir = tmp_path_factory.mktemp("loaded")
storage, _ = LocalModelStorage.from_model_archive(model_dir, Path(persisted_path))
nlu_data_dir = model_dir / "nlu_training_data_provider"
assert nlu_data_dir.is_dir()
assert not RasaYAMLReader().read(nlu_data_dir / "training_data.yml").is_empty()
def test_train_model_no_training_data_persisted(
tmp_path: Path, nlu_as_json_path: Text, tmp_path_factory: TempPathFactory
):
config_file = tmp_path / "config.yml"
rasa.shared.utils.io.dump_obj_as_json_to_file(
config_file,
{"pipeline": [{"name": "KeywordIntentClassifier"}], "language": "en"},
)
persisted_path = rasa.model_training.train_nlu(
str(config_file),
nlu_as_json_path,
output=str(tmp_path),
persist_nlu_training_data=False,
)
assert Path(persisted_path).is_file()
model_dir = tmp_path_factory.mktemp("loaded")
storage, _ = LocalModelStorage.from_model_archive(model_dir, Path(persisted_path))
nlu_data_dir = model_dir / "nlu_training_data_provider"
assert not nlu_data_dir.is_dir()
| {
"content_hash": "2ce090ca0f60069e9f55826fb540e620",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 88,
"avg_line_length": 32.34090909090909,
"alnum_prop": 0.6223940032794566,
"repo_name": "RasaHQ/rasa_nlu",
"id": "ecc544a0354ed4e1ed8ce9456faf1020638c7a08",
"size": "8538",
"binary": false,
"copies": "1",
"ref": "refs/heads/emptystring_10504",
"path": "tests/nlu/test_train.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "705"
},
{
"name": "HTML",
"bytes": "3462"
},
{
"name": "Makefile",
"bytes": "1044"
},
{
"name": "Python",
"bytes": "1467067"
},
{
"name": "Shell",
"bytes": "941"
}
],
"symlink_target": ""
} |
from thriftpy.protocol import TCyBinaryProtocolFactory
from thriftpy.thrift import TClient
from thriftpy.transport import (
TCyBufferedTransportFactory,
TSocket)
PROTO_FACTORY = TCyBinaryProtocolFactory
TRANS_FACTORY = TCyBufferedTransportFactory
def make_client(service, host, port, timeout=None):
"""
:param service: thrift service type instance
:param timeout: seconds after which the client would expire
:return: a client proxy instance that can call the remote api
directly, without using a context directly
"""
if timeout is None:
timeout = 120 * 1000
return ClientProxy(service, host, port, timeout)
def _wrapper_api(api, transport):
def wrapper(*args, **kwargs):
try:
transport.open()
return api(*args, **kwargs)
finally:
transport.close()
return wrapper
class ClientProxy(object):
def __init__(self, service, host, port, timeout):
self.service = service
self.host = host
self.port = port
self.timeout = timeout
def __getattr__(self, item):
socket = TSocket(self.host, self.port)
socket.set_timeout(self.timeout)
transport = TRANS_FACTORY().get_transport(socket)
protocol = PROTO_FACTORY().get_protocol(transport)
client = TClient(self.service, protocol)
attr = getattr(client, item)
return _wrapper_api(attr, transport)
| {
"content_hash": "d015f685dec3bdda205ba548795de6fa",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 65,
"avg_line_length": 29.448979591836736,
"alnum_prop": 0.6645876645876646,
"repo_name": "eleme/archer",
"id": "083809e35edd044ae0cdcff6650f2bfb3833f572",
"size": "1469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "archer/helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38053"
},
{
"name": "Thrift",
"bytes": "299"
}
],
"symlink_target": ""
} |
from rest_framework import serializers
from stats.models import *
from collections import OrderedDict, Counter
import json
import stats
static_path = stats.__path__[0]+'/static/'
with open(static_path+'city_location.json', 'r') as f:
map_locations = json.load(f)
def cast_value(s):
if (type(s) != unicode):
return s
try:
return int(s)
except ValueError:
pass
try:
return float(s)
except ValueError:
return s
class SurveyProjectSerializer(serializers.ModelSerializer):
project = serializers.CharField(source='get_project_display')
class Meta:
model = SurveyProject
fields = (
'project',
'total_n_classifications',
'project_duration_1_days',
'project_duration_2_days',
'total_n_sessions',
'max_classifications_per_session',
'mean_classifications_per_session',
'project_duration_hours',
'total_n_unique_days',
'mean_duration_session_hours',
'longest_active_session_hours',
'longest_inactive_session_hours',
'mean_duration_classification_hours'
)
class QuestionCategoryField(serializers.RelatedField):
def to_representation(self, value):
return '{0}'.format(value.get_category_display())
class QuestionTypeField(serializers.RelatedField):
def to_representation(self, value):
return '{0}'.format(value.get_kind_display())
class QuestionContextField(serializers.RelatedField):
def to_representation(self, value):
return '{0}'.format(value.context)
class QuestionSerializer(serializers.ModelSerializer):
category = QuestionCategoryField(read_only=True)
kind = QuestionTypeField(read_only=True)
context = QuestionContextField(read_only=True)
class Meta:
model = Question
fields = ('number', 'category', 'kind', 'context', 'question_text')
class AnswerEthnicitySerializer(serializers.ModelSerializer):
answer = serializers.CharField(source='get_answer_display')
class Meta:
model = AnswerEthnicity
fields = ('answer', 'specify')
class AnswerQuizSerializer(serializers.ModelSerializer):
confidence = serializers.CharField(source='get_confidence_display')
class Meta:
model = AnswerQuiz
fields = ('score', 'percent', 'maxScore', 'confidence')
class AnswerField(serializers.RelatedField):
def to_representation(self, value):
try:
return '{0}'.format(value.get_answer_display())
except AttributeError:
return value.answer
class AnswerSerializer(serializers.ModelSerializer):
answerOpen = AnswerField(read_only=True)
answerBool = AnswerField(read_only=True)
answerAD = AnswerField(read_only=True)
answerGender = AnswerField(read_only=True)
answerEdu = AnswerField(read_only=True)
answerEthnicity = AnswerEthnicitySerializer()
answerQuiz = AnswerQuizSerializer()
class Meta:
model = Answer
fields = ('answerOpen', 'answerBool', 'answerAD', 'answerGender', 'answerEdu', 'answerEthnicity', 'answerQuiz', 'question')
def to_representation(self, instance):
ret = OrderedDict()
fields = self._readable_fields
for field in fields:
try:
attribute = field.get_attribute(instance)
except SkipField:
continue
if attribute is not None:
represenation = field.to_representation(attribute)
if 'answer' in field.field_name:
ret['answer'] = represenation
else:
ret[field.field_name] = represenation
return ret
answer_lut = {
'OP': 'answerOpen',
'QU': 'answerQuiz',
'YN': 'answerBool',
'AD': 'answerAD',
'GN': 'answerGender',
'ET': 'answerEthnicity',
'ED': 'answerEdu'
}
class QuestionCountSerializer(serializers.ModelSerializer):
category = QuestionCategoryField(read_only=True)
kind = QuestionTypeField(read_only=True)
context = QuestionContextField(read_only=True)
class Meta:
model = Question
def to_representation(self, instance):
ret = OrderedDict()
fields = self._readable_fields
for field in fields:
try:
attribute = field.get_attribute(instance)
except SkipField:
continue
if attribute is not None:
if field.field_name != 'answer_set':
ret[field.field_name] = field.to_representation(attribute)
answer_set = instance.answer_set.all()
ret['count'] = len(answer_set)
plot_type = instance.plot_type
if plot_type == 'P':
try:
ret['results'] = Counter([ans.__getattribute__(answer_lut[instance.kind.kind]).get_answer_display() for ans in answer_set])
except AttributeError:
ret['results'] = Counter([ans.__getattribute__(answer_lut[instance.kind.kind]).answer for ans in answer_set])
elif plot_type == 'H':
ret['results'] = [cast_value(ans.__getattribute__(answer_lut[instance.kind.kind]).answer) for ans in answer_set]
elif plot_type == 'Q':
confidence = [ans.__getattribute__(answer_lut[instance.kind.kind]).confidence for ans in answer_set]
score = [ans.__getattribute__(answer_lut[instance.kind.kind]).score for ans in answer_set]
ret['results'] = {
'confidence': confidence,
'scores': score,
'confidence_map': {
1: 'Very unconfident',
2: 'Unconfident',
3: 'Somewhat unconfident',
4: 'Neither confident or unconfident',
5: 'Somewhat confident',
6: 'Confident',
7: 'Very confident',
},
'count': Counter(['{0}, {1}'.format(ans[0], ans[1]) for ans in zip(score, confidence)]),
}
else:
lat_all = []
lon_all = []
city_all = []
for ans in answer_set:
city = ans.__getattribute__(answer_lut[instance.kind.kind]).answer.lower()
if city in map_locations:
geo = map_locations[city]
lat_all.append(geo['lat'])
lon_all.append(geo['lng'])
city_all.append(city)
c = Counter(zip(lat_all, lon_all, city_all))
lat = []
lon = []
city = []
count = []
for key, value in c.iteritems():
lat.append(key[0])
lon.append(key[1])
city.append(key[2])
count.append(value)
ret['results'] = {
'lat': lat,
'lon': lon,
'city': city,
'count': count,
}
return ret
class UserSerializer(serializers.ModelSerializer):
survey_project = SurveyProjectSerializer()
answer_list = AnswerSerializer(many=True)
country = serializers.CharField(source='get_country_display')
class Meta:
model = User
def to_representation(self, instance):
ret = OrderedDict()
fields = self._readable_fields
for field in fields:
try:
attribute = field.get_attribute(instance)
except SkipField:
continue
check_for_none = attribute
if check_for_none is None:
ret[field.field_name] = None
else:
ret[field.field_name] = field.to_representation(attribute)
project_list = instance.project_list.all()
ret['projects'] = [project.get_project_display() for project in project_list]
ret['projects_classification_count'] = [project.classifications for project in project_list]
ret['home_project'] = [project.get_project_display() for project in project_list if project.home_project]
answer_list = ret.pop('answer_list')
answer_dict = OrderedDict(('question_{0}'.format(answer['question']), cast_value(answer['answer'])) for answer in answer_list)
ret.update(answer_dict)
return ret
| {
"content_hash": "034d5c5e73786dc6989e335b0e4f0e59",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 139,
"avg_line_length": 34.9,
"alnum_prop": 0.583810888252149,
"repo_name": "CKrawczyk/volcrowe",
"id": "82417906e3c8b95bc5a0b4edf75b9818af7486ae",
"size": "8376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stats/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2697"
},
{
"name": "HTML",
"bytes": "353"
},
{
"name": "JavaScript",
"bytes": "64080"
},
{
"name": "Python",
"bytes": "77995"
}
],
"symlink_target": ""
} |
import os
from numpy.random import randint
import json
try:
import cPickle as pickle
except:
import pickle
import warnings
class Vocabulary(object):
"""An implementation that manages the interface between a token dataset and the
machine learning algorithm.
[1] Tim Vieira; https://github.com/timvieira/arsenal
"""
def __init__(self, random_int=None, use_mask=False, mask_symbol='<MASK>',
file_type="json", name=None, mode=None):
self._mapping = {} # str -> int
self._flip = {} # int -> str; timv: consider using array or list
self._i = 0
self._frozen = False
self._growing = True
self._random_int = random_int # if non-zero, will randomly assign
# integers (between 0 and randon_int) as
# index (possibly with collisions)
self.unk_symbol = "<UNK>"
self.mask_symbol = mask_symbol
self.start_symbol = "<START>"
self.emit_unks = False
self.use_mask = use_mask
if self.use_mask:
self.add(self.mask_symbol)
self.file_type = file_type
self.name = name or "anonymous"
def __repr__(self):
return 'Vocabulary(size=%s,frozen=%s)' % (len(self), self._frozen)
def freeze(self, emit_unks=False):
self.emit_unks = emit_unks
if emit_unks and self.unk_symbol not in self:
self.add(self.unk_symbol)
self._frozen = True
def unfreeze(self):
self._frozen = False
def stop_growth(self):
self._growing = False
@property
def unk_id(self):
return self[self.unk_symbol]
@property
def mask_id(self):
return self[self.mask_symbol]
@classmethod
def from_iterable(cls, s, *args, **kwargs):
inst = cls(*args, **kwargs)
inst.add_many(s)
return inst
@classmethod
def from_nlp_data(cls, iterable):
''' ugly api... '''
vocab = cls()
vocab.use_mask = True
vocab.add(vocab.mask_symbol)
vocab.add(vocab.unk_symbol)
vocab.add_many(iterable)
return vocab
def keyset(self):
keys = set(self._mapping.keys())
if self.mask_symbol in keys:
keys.remove(self.mask_symbol)
return keys
def iterkeys(self):
for k in self._mapping.iterkeys():
if (k==self.unk_symbol or k==self.mask_symbol):
continue
else:
yield k
def fullkeys(self):
return list(self._mapping.keys())
def keys(self):
return [k for k in list(self._mapping.keys()) if (k!=self.unk_symbol and
k!=self.mask_symbol)]
### items
#### iter items, fullitems and items
def iteritems(self):
for k,v in self._mapping.items():
if k==self.unk_symbol or k==self.mask_symbol:
continue
yield k,v
def fullitems(self):
return list(self._mapping.items())
def items(self):
return [(k,v) for k,v in list(self._mapping.items()) if (k!=self.unk_symbol and
k!=self.mask_symbol)]
def values(self):
return [v for k,v in list(self._mapping.items()) if (k!=self.unk_symbol and
k!=self.mask_symbol)]
def fullvalues(self):
return list(self._mapping.values())
def filter_generator(self, seq, emit_none=False):
"""
Apply Vocabulary to sequence while filtering. By default, `None` is not
emitted, so please note that the output sequence may have fewer items.
"""
if emit_none:
for s in seq:
yield self[s]
else:
for s in seq:
x = self[s]
if x is not None:
yield x
def filter(self, seq, *args, **kwargs):
return list(self.filter_generator(seq, *args, **kwargs))
def add_many(self, x):
return [self.add(k) for k in x]
def lookup(self, i):
if i is None:
return None
return self._flip[i]
def lookup_many(self, x):
for k in x:
yield self.lookup(k)
def __contains__(self, k):
return k in self._mapping
def __getitem__(self, k):
try:
return self._mapping[k]
except KeyError:
if self._frozen and self.emit_unks:
return self._mapping[self.unk_symbol]
elif self._frozen:
raise ValueError('Vocabulary is frozen. Key "%s" not found.' % (k,))
elif not self._growing:
return None
else:
if self._random_int:
x = self._mapping[k] = randint(0, self._random_int)
else:
x = self._mapping[k] = self._i
self._i += 1
self._flip[x] = k
return x
add = __getitem__
def __setitem__(self, k, v):
assert k not in self._mapping
if self._frozen: raise ValueError("Vocabulary is frozen. Key '%s' cannot be changed")
assert isinstance(v, int)
self._mapping[k] = v
self._flip[v] = k
def __iter__(self):
for i in xrange(len(self)):
yield self._flip[i]
def enum(self):
for i in xrange(len(self)):
yield (i, self._flip[i])
def __len__(self):
return len(self._mapping)
@classmethod
def from_config(cls, config):
data = dict(recursive_tuple_fix(config.pop('data')))
new_vocab = cls()
new_vocab.__dict__.update(config)
for k,v in data.items():
new_vocab._mapping[k] = v
new_vocab._flip[v] = k
new_vocab._i = len(new_vocab) + 1
return new_vocab
@classmethod
def load(cls, filename, file_type='json'):
""" config types supported: json, pickle """
if not os.path.exists(filename):
warnings.warn("file not found", RuntimeWarning)
return cls()
if file_type == 'json':
with open(filename, 'r') as fp:
config = json.load(fp)
elif file_type == 'pickle':
with open(filename, 'rb') as fp:
config = pickle.load(fp)
else:
warnings.warn("Configuration type not understood", RuntimeWarning)
return cls()
return cls.from_config(config)
def _config(self):
config = {"emit_unks": self.emit_unks,
"use_mask": self.use_mask,
"_frozen": self._frozen,
"_growing": self._growing,
"file_type": self.file_type,
"name": self.name}
return config
def save(self, filename):
with open(filename, 'wb') as fp:
config = self._config()
config['data'] = tuple(self._mapping.items())
if self.file_type == 'json':
json.dump(config, fp)
elif self.file_type == 'pickle':
pickle.dump(config, fp)
else:
warnings.warn("Vocabulary {} not saved; unknown save method".format(self.name),
RuntimeWarning)
def recursive_tuple_fix(item):
if isinstance(item, list):
return tuple([recursive_tuple_fix(subitem) for subitem in item])
else:
return item | {
"content_hash": "93f3044b21ad4896e379674b2c908253",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 96,
"avg_line_length": 30.670682730923694,
"alnum_prop": 0.5166950373183187,
"repo_name": "braingineer/ikelos",
"id": "b93eb5953b88f7d5ae1fd261d3bb123afe21f3cd",
"size": "7637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ikelos/data/vocabulary.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "120333"
}
],
"symlink_target": ""
} |
"""A simple grep flow."""
import stat
from grr.lib import aff4
from grr.lib import flow
from grr.lib import rdfvalue
from grr.proto import flows_pb2
class SearchFileContentArgs(rdfvalue.RDFProtoStruct):
protobuf = flows_pb2.SearchFileContentArgs
class SearchFileContent(flow.GRRFlow):
"""A flow that runs a glob first and then issues a grep on the results.
DEPRECATED.
This flow is now deprecated in favor of FileFinder. To use FileFinder instead
of SearchFileContent:
Specify list of glob expressions corresponding to the files you want to
search in. Add conditions that will be applied to found files. You can
use "literal match" and "regex match" conditions. Set "action" to
"Stat" if you're just interested in matches, or "Download" if you want to
also download the matching files.
------------------------------------------------------------------------------
This flow can be used to search for files by specifying a filename glob.
e.g. this glob will search recursively under the environment directory for
files called notepad with any extension:
%%KnowledgeBase.environ_windir%%/**notepad.*
The default ** recursion depth is 3 levels, and can be modified using a number
after the ** like this:
%%KnowledgeBase.environ_windir%%/**10notepad.*
Optionally you can also specify File Content Search parameters to search file
contents.
"""
category = "/Filesystem/"
friendly_name = "Search In Files"
args_type = rdfvalue.SearchFileContentArgs
behaviours = flow.GRRFlow.behaviours + "ADVANCED"
@classmethod
def GetDefaultArgs(cls, token=None):
_ = token
return cls.args_type(paths=[r"%%Users.homedir%%/.bash_history"])
@flow.StateHandler(next_state=["Grep"])
def Start(self):
"""Run the glob first."""
if self.runner.output is not None:
self.runner.output = aff4.FACTORY.Create(
self.runner.output.urn, "GrepResultsCollection", mode="rw",
token=self.token)
self.runner.output.Set(self.runner.output.Schema.DESCRIPTION(
"SearchFiles {0}".format(self.__class__.__name__)))
self.CallFlow("Glob", next_state="Grep", root_path=self.args.root_path,
paths=self.args.paths, pathtype=self.args.pathtype)
@flow.StateHandler(next_state=["WriteHits"])
def Grep(self, responses):
if responses.success:
# Grep not specified - just list all hits.
if not self.args.grep:
msgs = [rdfvalue.BufferReference(pathspec=r.pathspec)
for r in responses]
self.CallStateInline(messages=msgs, next_state="WriteHits")
else:
# Grep specification given, ask the client to grep the files.
for response in responses:
# Only fetch regular files here.
if not stat.S_ISDIR(response.st_mode):
# Cast the BareGrepSpec to a GrepSpec type.
request = rdfvalue.GrepSpec(target=response.pathspec,
**self.args.grep.AsDict())
self.CallClient("Grep", request=request, next_state="WriteHits",
request_data=dict(pathspec=response.pathspec))
@flow.StateHandler(next_state="End")
def WriteHits(self, responses):
"""Sends replies about the hits."""
hits = list(responses)
for hit in hits:
# Old clients do not send pathspecs in the Grep response so we add them.
if not hit.pathspec:
hit.pathspec = responses.request_data.GetItem("pathspec")
self.SendReply(hit)
if self.args.also_download:
self.CallFlow("MultiGetFile", pathspecs=[x.pathspec for x in hits],
next_state="End")
| {
"content_hash": "3d816a5c6a2f2c8f88eba5f55b3efd77",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 80,
"avg_line_length": 36.54,
"alnum_prop": 0.6655719759168035,
"repo_name": "pchaigno/grreat",
"id": "018df9b4309c82664e56b2bb5b353a94b388927d",
"size": "3676",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "lib/flows/general/grep.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "C++",
"bytes": "55149"
},
{
"name": "CSS",
"bytes": "36573"
},
{
"name": "JavaScript",
"bytes": "831111"
},
{
"name": "Makefile",
"bytes": "5482"
},
{
"name": "Perl",
"bytes": "483"
},
{
"name": "Python",
"bytes": "4517593"
},
{
"name": "Shell",
"bytes": "31210"
}
],
"symlink_target": ""
} |
import os
import platform
import sys
def is_active():
return True
def get_name():
return "LinuxBSD"
def can_build():
if os.name != "posix" or sys.platform == "darwin":
return False
# Check the minimal dependencies
x11_error = os.system("pkg-config --version > /dev/null")
if x11_error:
print("Error: pkg-config not found. Aborting.")
return False
x11_error = os.system("pkg-config x11 --modversion > /dev/null")
if x11_error:
print("Error: X11 libraries not found. Aborting.")
return False
x11_error = os.system("pkg-config xcursor --modversion > /dev/null")
if x11_error:
print("Error: Xcursor library not found. Aborting.")
return False
x11_error = os.system("pkg-config xinerama --modversion > /dev/null")
if x11_error:
print("Error: Xinerama library not found. Aborting.")
return False
x11_error = os.system("pkg-config xext --modversion > /dev/null")
if x11_error:
print("Error: Xext library not found. Aborting.")
return False
x11_error = os.system("pkg-config xrandr --modversion > /dev/null")
if x11_error:
print("Error: XrandR library not found. Aborting.")
return False
x11_error = os.system("pkg-config xrender --modversion > /dev/null")
if x11_error:
print("Error: XRender library not found. Aborting.")
return False
x11_error = os.system("pkg-config xi --modversion > /dev/null")
if x11_error:
print("Error: Xi library not found. Aborting.")
return False
return True
def get_opts():
from SCons.Variables import BoolVariable, EnumVariable
return [
BoolVariable("use_llvm", "Use the LLVM compiler", False),
BoolVariable("use_lld", "Use the LLD linker", False),
BoolVariable("use_thinlto", "Use ThinLTO", False),
BoolVariable("use_static_cpp", "Link libgcc and libstdc++ statically for better portability", True),
BoolVariable("use_coverage", "Test Godot coverage", False),
BoolVariable("use_ubsan", "Use LLVM/GCC compiler undefined behavior sanitizer (UBSAN)", False),
BoolVariable("use_asan", "Use LLVM/GCC compiler address sanitizer (ASAN)", False),
BoolVariable("use_lsan", "Use LLVM/GCC compiler leak sanitizer (LSAN)", False),
BoolVariable("use_tsan", "Use LLVM/GCC compiler thread sanitizer (TSAN)", False),
BoolVariable("use_msan", "Use LLVM compiler memory sanitizer (MSAN)", False),
BoolVariable("pulseaudio", "Detect and use PulseAudio", True),
BoolVariable("dbus", "Detect and use D-Bus to handle screensaver", True),
BoolVariable("udev", "Use udev for gamepad connection callbacks", True),
BoolVariable("x11", "Enable X11 display", True),
BoolVariable("debug_symbols", "Add debugging symbols to release/release_debug builds", True),
BoolVariable("separate_debug_symbols", "Create a separate file containing debugging symbols", False),
BoolVariable("touch", "Enable touch events", True),
BoolVariable("execinfo", "Use libexecinfo on systems where glibc is not available", False),
]
def get_flags():
return []
def configure(env):
## Build type
if env["target"] == "release":
if env["optimize"] == "speed": # optimize for speed (default)
env.Prepend(CCFLAGS=["-O3"])
elif env["optimize"] == "size": # optimize for size
env.Prepend(CCFLAGS=["-Os"])
if env["debug_symbols"]:
env.Prepend(CCFLAGS=["-g2"])
elif env["target"] == "release_debug":
if env["optimize"] == "speed": # optimize for speed (default)
env.Prepend(CCFLAGS=["-O2"])
elif env["optimize"] == "size": # optimize for size
env.Prepend(CCFLAGS=["-Os"])
env.Prepend(CPPDEFINES=["DEBUG_ENABLED"])
if env["debug_symbols"]:
env.Prepend(CCFLAGS=["-g2"])
elif env["target"] == "debug":
env.Prepend(CCFLAGS=["-g3"])
env.Prepend(CPPDEFINES=["DEBUG_ENABLED"])
env.Append(LINKFLAGS=["-rdynamic"])
## Architecture
is64 = sys.maxsize > 2 ** 32
if env["bits"] == "default":
env["bits"] = "64" if is64 else "32"
## Compiler configuration
if "CXX" in env and "clang" in os.path.basename(env["CXX"]):
# Convenience check to enforce the use_llvm overrides when CXX is clang(++)
env["use_llvm"] = True
if env["use_llvm"]:
if "clang++" not in os.path.basename(env["CXX"]):
env["CC"] = "clang"
env["CXX"] = "clang++"
env.extra_suffix = ".llvm" + env.extra_suffix
if env["use_lld"]:
if env["use_llvm"]:
env.Append(LINKFLAGS=["-fuse-ld=lld"])
if env["use_thinlto"]:
# A convenience so you don't need to write use_lto too when using SCons
env["use_lto"] = True
else:
print("Using LLD with GCC is not supported yet. Try compiling with 'use_llvm=yes'.")
sys.exit(255)
if env["use_coverage"]:
env.Append(CCFLAGS=["-ftest-coverage", "-fprofile-arcs"])
env.Append(LINKFLAGS=["-ftest-coverage", "-fprofile-arcs"])
if env["use_ubsan"] or env["use_asan"] or env["use_lsan"] or env["use_tsan"] or env["use_msan"]:
env.extra_suffix += "s"
if env["use_ubsan"]:
env.Append(
CCFLAGS=[
"-fsanitize=undefined,shift,shift-exponent,integer-divide-by-zero,unreachable,vla-bound,null,return,signed-integer-overflow,bounds,float-divide-by-zero,float-cast-overflow,nonnull-attribute,returns-nonnull-attribute,bool,enum,vptr,pointer-overflow,builtin"
]
)
env.Append(LINKFLAGS=["-fsanitize=undefined"])
if env["use_llvm"]:
env.Append(
CCFLAGS=[
"-fsanitize=nullability-return,nullability-arg,function,nullability-assign,implicit-integer-sign-change"
]
)
else:
env.Append(CCFLAGS=["-fsanitize=bounds-strict"])
if env["use_asan"]:
env.Append(CCFLAGS=["-fsanitize=address,pointer-subtract,pointer-compare"])
env.Append(LINKFLAGS=["-fsanitize=address"])
if env["use_lsan"]:
env.Append(CCFLAGS=["-fsanitize=leak"])
env.Append(LINKFLAGS=["-fsanitize=leak"])
if env["use_tsan"]:
env.Append(CCFLAGS=["-fsanitize=thread"])
env.Append(LINKFLAGS=["-fsanitize=thread"])
if env["use_msan"] and env["use_llvm"]:
env.Append(CCFLAGS=["-fsanitize=memory"])
env.Append(CCFLAGS=["-fsanitize-memory-track-origins"])
env.Append(CCFLAGS=["-fsanitize-recover=memory"])
env.Append(LINKFLAGS=["-fsanitize=memory"])
if env["use_lto"]:
if not env["use_llvm"] and env.GetOption("num_jobs") > 1:
env.Append(CCFLAGS=["-flto"])
env.Append(LINKFLAGS=["-flto=" + str(env.GetOption("num_jobs"))])
else:
if env["use_lld"] and env["use_thinlto"]:
env.Append(CCFLAGS=["-flto=thin"])
env.Append(LINKFLAGS=["-flto=thin"])
else:
env.Append(CCFLAGS=["-flto"])
env.Append(LINKFLAGS=["-flto"])
if not env["use_llvm"]:
env["RANLIB"] = "gcc-ranlib"
env["AR"] = "gcc-ar"
env.Append(CCFLAGS=["-pipe"])
env.Append(LINKFLAGS=["-pipe"])
## Dependencies
env.ParseConfig("pkg-config x11 --cflags --libs")
env.ParseConfig("pkg-config xcursor --cflags --libs")
env.ParseConfig("pkg-config xinerama --cflags --libs")
env.ParseConfig("pkg-config xext --cflags --libs")
env.ParseConfig("pkg-config xrandr --cflags --libs")
env.ParseConfig("pkg-config xrender --cflags --libs")
env.ParseConfig("pkg-config xi --cflags --libs")
if env["touch"]:
env.Append(CPPDEFINES=["TOUCH_ENABLED"])
# FIXME: Check for existence of the libs before parsing their flags with pkg-config
# freetype depends on libpng and zlib, so bundling one of them while keeping others
# as shared libraries leads to weird issues
if (
env["builtin_freetype"]
or env["builtin_libpng"]
or env["builtin_zlib"]
or env["builtin_graphite"]
or env["builtin_harfbuzz"]
):
env["builtin_freetype"] = True
env["builtin_libpng"] = True
env["builtin_zlib"] = True
env["builtin_graphite"] = True
env["builtin_harfbuzz"] = True
if not env["builtin_freetype"]:
env.ParseConfig("pkg-config freetype2 --cflags --libs")
if not env["builtin_graphite"]:
env.ParseConfig("pkg-config graphite2 --cflags --libs")
if not env["builtin_icu"]:
env.ParseConfig("pkg-config icu-uc --cflags --libs")
if not env["builtin_harfbuzz"]:
env.ParseConfig("pkg-config harfbuzz harfbuzz-icu --cflags --libs")
if not env["builtin_libpng"]:
env.ParseConfig("pkg-config libpng16 --cflags --libs")
if not env["builtin_bullet"]:
# We need at least version 2.90
min_bullet_version = "2.90"
import subprocess
bullet_version = subprocess.check_output(["pkg-config", "bullet", "--modversion"]).strip()
if str(bullet_version) < min_bullet_version:
# Abort as system bullet was requested but too old
print(
"Bullet: System version {0} does not match minimal requirements ({1}). Aborting.".format(
bullet_version, min_bullet_version
)
)
sys.exit(255)
env.ParseConfig("pkg-config bullet --cflags --libs")
if False: # not env['builtin_assimp']:
# FIXME: Add min version check
env.ParseConfig("pkg-config assimp --cflags --libs")
if not env["builtin_enet"]:
env.ParseConfig("pkg-config libenet --cflags --libs")
if not env["builtin_squish"]:
env.ParseConfig("pkg-config libsquish --cflags --libs")
if not env["builtin_zstd"]:
env.ParseConfig("pkg-config libzstd --cflags --libs")
# Sound and video libraries
# Keep the order as it triggers chained dependencies (ogg needed by others, etc.)
if not env["builtin_libtheora"]:
env["builtin_libogg"] = False # Needed to link against system libtheora
env["builtin_libvorbis"] = False # Needed to link against system libtheora
env.ParseConfig("pkg-config theora theoradec --cflags --libs")
else:
list_of_x86 = ["x86_64", "x86", "i386", "i586"]
if any(platform.machine() in s for s in list_of_x86):
env["x86_libtheora_opt_gcc"] = True
if not env["builtin_libvpx"]:
env.ParseConfig("pkg-config vpx --cflags --libs")
if not env["builtin_libvorbis"]:
env["builtin_libogg"] = False # Needed to link against system libvorbis
env.ParseConfig("pkg-config vorbis vorbisfile --cflags --libs")
if not env["builtin_opus"]:
env["builtin_libogg"] = False # Needed to link against system opus
env.ParseConfig("pkg-config opus opusfile --cflags --libs")
if not env["builtin_libogg"]:
env.ParseConfig("pkg-config ogg --cflags --libs")
if not env["builtin_libwebp"]:
env.ParseConfig("pkg-config libwebp --cflags --libs")
if not env["builtin_mbedtls"]:
# mbedTLS does not provide a pkgconfig config yet. See https://github.com/ARMmbed/mbedtls/issues/228
env.Append(LIBS=["mbedtls", "mbedcrypto", "mbedx509"])
if not env["builtin_wslay"]:
env.ParseConfig("pkg-config libwslay --cflags --libs")
if not env["builtin_miniupnpc"]:
# No pkgconfig file so far, hardcode default paths.
env.Prepend(CPPPATH=["/usr/include/miniupnpc"])
env.Append(LIBS=["miniupnpc"])
# On Linux wchar_t should be 32-bits
# 16-bit library shouldn't be required due to compiler optimisations
if not env["builtin_pcre2"]:
env.ParseConfig("pkg-config libpcre2-32 --cflags --libs")
if not env["builtin_embree"]:
# No pkgconfig file so far, hardcode expected lib name.
env.Append(LIBS=["embree3"])
## Flags
if os.system("pkg-config --exists alsa") == 0: # 0 means found
env["alsa"] = True
env.Append(CPPDEFINES=["ALSA_ENABLED", "ALSAMIDI_ENABLED"])
else:
print("Warning: ALSA libraries not found. Disabling the ALSA audio driver.")
if env["pulseaudio"]:
if os.system("pkg-config --exists libpulse") == 0: # 0 means found
env.Append(CPPDEFINES=["PULSEAUDIO_ENABLED"])
env.ParseConfig("pkg-config --cflags libpulse")
else:
print("Warning: PulseAudio development libraries not found. Disabling the PulseAudio audio driver.")
if env["dbus"]:
if os.system("pkg-config --exists dbus-1") == 0: # 0 means found
env.Append(CPPDEFINES=["DBUS_ENABLED"])
env.ParseConfig("pkg-config --cflags --libs dbus-1")
else:
print("Warning: D-Bus development libraries not found. Disabling screensaver prevention.")
if platform.system() == "Linux":
env.Append(CPPDEFINES=["JOYDEV_ENABLED"])
if env["udev"]:
if os.system("pkg-config --exists libudev") == 0: # 0 means found
env.Append(CPPDEFINES=["UDEV_ENABLED"])
else:
print("Warning: libudev development libraries not found. Disabling controller hotplugging support.")
else:
env["udev"] = False # Linux specific
# Linkflags below this line should typically stay the last ones
if not env["builtin_zlib"]:
env.ParseConfig("pkg-config zlib --cflags --libs")
env.Prepend(CPPPATH=["#platform/linuxbsd"])
if env["x11"]:
if not env["vulkan"]:
print("Error: X11 support requires vulkan=yes")
env.Exit(255)
env.Append(CPPDEFINES=["X11_ENABLED"])
env.Append(CPPDEFINES=["UNIX_ENABLED"])
env.Append(CPPDEFINES=[("_FILE_OFFSET_BITS", 64)])
if env["vulkan"]:
env.Append(CPPDEFINES=["VULKAN_ENABLED"])
if not env["use_volk"]:
env.ParseConfig("pkg-config vulkan --cflags --libs")
if not env["builtin_glslang"]:
# No pkgconfig file for glslang so far
env.Append(LIBS=["glslang", "SPIRV"])
# env.Append(CPPDEFINES=['OPENGL_ENABLED'])
env.Append(LIBS=["GL"])
env.Append(LIBS=["pthread"])
if platform.system() == "Linux":
env.Append(LIBS=["dl"])
if platform.system().find("BSD") >= 0:
env["execinfo"] = True
if env["execinfo"]:
env.Append(LIBS=["execinfo"])
if not env["tools"]:
import subprocess
import re
linker_version_str = subprocess.check_output([env.subst(env["LINK"]), "-Wl,--version"]).decode("utf-8")
gnu_ld_version = re.search("^GNU ld [^$]*(\d+\.\d+)$", linker_version_str, re.MULTILINE)
if not gnu_ld_version:
print(
"Warning: Creating template binaries enabled for PCK embedding is currently only supported with GNU ld, not gold or LLD."
)
else:
if float(gnu_ld_version.group(1)) >= 2.30:
env.Append(LINKFLAGS=["-T", "platform/linuxbsd/pck_embed.ld"])
else:
env.Append(LINKFLAGS=["-T", "platform/linuxbsd/pck_embed.legacy.ld"])
## Cross-compilation
if is64 and env["bits"] == "32":
env.Append(CCFLAGS=["-m32"])
env.Append(LINKFLAGS=["-m32", "-L/usr/lib/i386-linux-gnu"])
elif not is64 and env["bits"] == "64":
env.Append(CCFLAGS=["-m64"])
env.Append(LINKFLAGS=["-m64", "-L/usr/lib/i686-linux-gnu"])
# Link those statically for portability
if env["use_static_cpp"]:
env.Append(LINKFLAGS=["-static-libgcc", "-static-libstdc++"])
if env["use_llvm"]:
env["LINKCOM"] = env["LINKCOM"] + " -l:libatomic.a"
else:
if env["use_llvm"]:
env.Append(LIBS=["atomic"])
| {
"content_hash": "ce2de316068256bbf6e9ac7a0001fbab",
"timestamp": "",
"source": "github",
"line_count": 433,
"max_line_length": 276,
"avg_line_length": 37.46420323325635,
"alnum_prop": 0.5979533966218715,
"repo_name": "DmitriySalnikov/godot",
"id": "8eb22c1c72265ddaecf82a5126cac7d5ed593e46",
"size": "16222",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "platform/linuxbsd/detect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "50004"
},
{
"name": "C#",
"bytes": "175747"
},
{
"name": "C++",
"bytes": "17474662"
},
{
"name": "GLSL",
"bytes": "1271"
},
{
"name": "Java",
"bytes": "499031"
},
{
"name": "JavaScript",
"bytes": "9580"
},
{
"name": "Makefile",
"bytes": "451"
},
{
"name": "Objective-C",
"bytes": "2644"
},
{
"name": "Objective-C++",
"bytes": "169329"
},
{
"name": "Python",
"bytes": "293239"
},
{
"name": "Shell",
"bytes": "11043"
}
],
"symlink_target": ""
} |
from lib import BaseTest
class ListMirror1Test(BaseTest):
"""
list mirrors: regular list
"""
fixtureCmds = [
"aptly mirror create --ignore-signatures mirror1 http://mirror.yandex.ru/debian/ wheezy",
"aptly mirror create -with-sources --ignore-signatures mirror2 http://mirror.yandex.ru/debian/ squeeze contrib",
"aptly -architectures=i386 mirror create --ignore-signatures mirror3 http://mirror.yandex.ru/debian/ squeeze non-free",
"aptly mirror create -ignore-signatures mirror4 http://download.opensuse.org/repositories/home:/DeepDiver1975/xUbuntu_10.04/ ./",
]
runCmd = "aptly mirror list"
class ListMirror2Test(BaseTest):
"""
list mirrors: empty list
"""
runCmd = "aptly mirror list"
class ListMirror3Test(BaseTest):
"""
list mirrors: raw list
"""
fixtureDB = True
runCmd = "aptly -raw mirror list"
class ListMirror4Test(BaseTest):
"""
list mirrors: raw empty list
"""
runCmd = "aptly -raw mirror list"
| {
"content_hash": "f54cf34f72e7be66ee0da23f49970e58",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 137,
"avg_line_length": 28.416666666666668,
"alnum_prop": 0.6715542521994134,
"repo_name": "github/aptly",
"id": "f97759f3ce43f8442c06ae271e86d1bf794af273",
"size": "1023",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "system/t04_mirror/list.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from ckan.plugins import toolkit
import ckan.lib.helpers as h
class ModerationController(toolkit.BaseController):
def all_reported_issues(self, organization_id):
'''show all issues over max_strikes and are not moderated'''
try:
issues, organization = all_reported_issues(organization_id)
extra_vars = {
'issues': issues.get('results', []),
'organization': organization,
}
return toolkit.render("issues/moderation.html",
extra_vars=extra_vars)
except toolkit.ObjectNotFound:
toolkit.abort(404, toolkit._('Organization not found'))
def moderate(self, organization_id):
if toolkit.request.method == 'POST':
if not toolkit.c.user:
msg = toolkit._('You must be logged in to moderate issues')
toolkit.abort(401, msg)
data_dict = toolkit.request.POST.mixed()
try:
if data_dict.get('abuse_status') == 'abuse':
toolkit.get_action('issue_report')(data_dict=data_dict)
h.flash_success(toolkit._('Issue permanently hidden'))
elif data_dict.get('abuse_status') == 'not_abuse':
toolkit.get_action('issue_report_clear')(
data_dict=data_dict)
h.flash_success(toolkit._('All issue reports cleared'))
except toolkit.ValidationError:
toolkit.abort(404)
h.redirect_to('issues_moderate_reported_issues',
organization_id=organization_id)
def all_reported_issues(organization_id, include_sub_organizations=False):
organization = toolkit.get_action('organization_show')(data_dict={
'id': organization_id,
})
issues = toolkit.get_action('issue_search')(data_dict={
'organization_id': organization['id'],
'abuse_status': 'unmoderated',
'include_reports': True,
'include_sub_organizations': include_sub_organizations,
'visibility': 'hidden',
})
return issues, organization
class CommentModerationController(toolkit.BaseController):
def reported_comments(self, organization_id):
try:
organization = toolkit.get_action('organization_show')(data_dict={
'id': organization_id,
})
comments = toolkit.get_action('issue_comment_search')(data_dict={
'organization_id': organization['id'],
'only_hidden': True,
})
return toolkit.render(
'issues/comment_moderation.html',
extra_vars={
'comments': comments,
'organization': organization,
}
)
except toolkit.ObjectNotFound:
toolkit.abort(404, toolkit._('Organization not found'))
def moderate(self, organization_id):
if toolkit.request.method == 'POST':
if not toolkit.c.user:
msg = toolkit._('You must be logged in to moderate comment')
toolkit.abort(401, msg)
data_dict = toolkit.request.POST.mixed()
try:
if data_dict.get('abuse_status') == 'abuse':
toolkit.get_action('issue_comment_report')(data_dict=data_dict)
h.flash_success(toolkit._('Comment permanently hidden'))
elif data_dict.get('abuse_status') == 'not_abuse':
toolkit.get_action('issue_comment_report_clear')(
data_dict=data_dict)
h.flash_success(toolkit._('All comment reports cleared'))
except toolkit.ValidationError:
toolkit.abort(404)
h.redirect_to('issues_moderate_reported_comments',
organization_id=organization_id)
| {
"content_hash": "3f38bf14e98a3c11dc6630791ec7642c",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 83,
"avg_line_length": 40.4639175257732,
"alnum_prop": 0.5620382165605096,
"repo_name": "datagovuk/ckanext-issues",
"id": "8c2e3a887dbea8fe514d9dfbbe87a2512aafefa5",
"size": "3925",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ckanext/issues/controller/moderation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4500"
},
{
"name": "HTML",
"bytes": "38876"
},
{
"name": "JavaScript",
"bytes": "8945"
},
{
"name": "Python",
"bytes": "226935"
},
{
"name": "Shell",
"bytes": "1546"
}
],
"symlink_target": ""
} |
""" Puppet plugin operations. The code that handles puppet_config is split
into two parts. The part that does transformations such as extract
per-operation info resides in this file. Rest of the properties are handled
in manager.py """
import copy
from cloudify.decorators import operation as _operation
from puppet_plugin.manager import (PuppetParamsError,
PuppetManager,
PuppetAgentRunner,
PuppetStandaloneRunner,
PUPPET_TAG_RE)
EXPECTED_OP_PREFIXES = (
'cloudify.interfaces.lifecycle',
'cloudify.interfaces.relationship_lifecycle')
def _extract_op(ctx):
prefix, _, op = ctx.operation.name.rpartition('.')
if prefix not in EXPECTED_OP_PREFIXES:
ctx.logger.warn("Node operation is expected to start with '{0}' "
"but starts with '{1}'".format(
' or '.join(EXPECTED_OP_PREFIXES), prefix))
return op
def _op_specifc(ctx, props, op, prop):
if prop in props:
e = props[prop]
ctx.logger.info("Found {0} in properties".format(prop))
if isinstance(e, dict):
ctx.logger.info("Detected per-operation '{0}' in properties".
format(prop))
if op in e:
e = e[op]
ctx.logger.info("Found '{0}' for operation '{1}'".
format(prop, op))
else:
e = None
ctx.logger.info("No '{0}' for operation '{1}'".
format(prop, op))
else:
e = None
ctx.logger.info("Have not found {0} in properties".format(prop))
return e
def _prepare_tags(ctx, props, op):
tags = copy.deepcopy(props.get('tags', []))
for tag in tags:
if not PUPPET_TAG_RE.match(tag):
raise PuppetParamsError(
"puppet_config.tags[*] must match {0}, you gave "
"'{1}'".format(PUPPET_TAG_RE, tag))
if props.get('add_operation_tag', False):
tags += ['cloudify_operation_' + op]
ops_tags = props.get('operations_tags')
if ops_tags:
op_tags = ops_tags.get(op, [])
if isinstance(op_tags, basestring):
op_tags = [op_tags]
if not isinstance(op_tags, list):
raise PuppetParamsError(
"Operation tags must be a list, not {0}".format(op_tags))
if op_tags:
ctx.logger.info("Operation '{0}' -> tags {1}".format(
op, op_tags))
tags += op_tags
else:
return None
return tags
@_operation
def operation(ctx, **kwargs):
op = _extract_op(ctx)
props = ctx.node.properties['puppet_config']
mgr = PuppetManager(ctx)
# print(mgr, isinstance(mgr, PuppetStandaloneRunner))
tags = _prepare_tags(ctx, props, op)
if isinstance(mgr, PuppetAgentRunner):
if op != 'start' and tags is None:
ctx.logger.info("No tags specific to operation '{0}', skipping".
format(op))
return
mgr.run(tags=(tags or []))
return
if isinstance(mgr, PuppetStandaloneRunner):
e = _op_specifc(ctx, props, op, 'execute')
m = _op_specifc(ctx, props, op, 'manifest')
if e and m:
raise RuntimeError("Either 'execute' or 'manifest' " +
"must be specified for given operation. " +
"Both are specified for operation {0}".format(
op))
if e or m:
mgr.run(tags=(tags or []), execute=e, manifest=m)
return
raise RuntimeError("Internal error: unknown Puppet Runner")
| {
"content_hash": "15a77d6e59fc2f798c72aa5dc09323ea",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 77,
"avg_line_length": 33.64601769911504,
"alnum_prop": 0.5376117832719621,
"repo_name": "cloudify-cosmo/cloudify-puppet-plugin",
"id": "1ab48d4d6e7b0e60614ebc01ee26148e245a5275",
"size": "3802",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "puppet_plugin/operations.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1033"
},
{
"name": "Puppet",
"bytes": "154"
},
{
"name": "Python",
"bytes": "47086"
},
{
"name": "Ruby",
"bytes": "970"
}
],
"symlink_target": ""
} |
import unittest
from StringIO import StringIO
import time
# third party
from mock import MagicMock
# this package
from theape.parts.wifi.iwconfig import IwconfigQuery, IwconfigExpressions
from theape.parts.wifi.iwconfig import IwconfigEnum
from theape import ApeError
try:
source = open('iwconfig.txt').read()
source2 = source.replace('simio_claustro', 'great_ape_ape')
source_not_associated = open('iwconfig_not_associated.txt').read()
except IOError:
# sphinx will crash on trying to import this module
pass
class TestIwconfig(unittest.TestCase):
def setUp(self):
self.interface = 'wlan2'
self.connection = MagicMock()
self.connection.iwconfig.return_value = (None,
StringIO(source),
'')
self.disconnected = (None,
StringIO(source_not_associated),
'')
self.connection2 = MagicMock()
self.connection2.iwconfig.return_value = self.disconnected
self.iwconfig_disconnected = IwconfigQuery(interface=self.interface,
connection=self.connection2)
self.iwconfig = IwconfigQuery(interface=self.interface,
connection=self.connection)
return
def test_constructor(self):
"""
Does it build correctly?
"""
self.assertEqual(self.interface, self.iwconfig.interface)
self.assertEqual(self.connection, self.iwconfig.connection)
return
def test_command(self):
"""
Does it send the right command?
"""
self.assertEqual('iwconfig wlan2', self.iwconfig.command)
return
def test_call(self):
"""
Does it send the command and return the output?
"""
output = self.iwconfig()
self.assertEqual(output, StringIO(source).readlines())
return
def test_check_errors(self):
"""
Does it raise the ApeError if appropriate?
"""
# non-existent interface
self.connection.iwconfig.return_value = (None, StringIO(source),
StringIO('wlan0 No such device'))
self.assertRaises(ApeError, self.iwconfig)
# non-wireless interface
self.connection.iwconfig.return_value = (None, StringIO(source),
StringIO('wlan0 no wireless extensions.'))
self.assertRaises(ApeError, self.iwconfig)
# device doesn't have iwconfig
# non-wireless interface
self.connection.iwconfig.return_value = (None, StringIO(source),
StringIO('iwconfig: command not found'))
self.assertRaises(ApeError, self.iwconfig)
return
def test_output(self):
"""
Does the output get set if checked after a certain time interval?
"""
# two calls in rapid succession
outputs = [(None, StringIO(source2),''),
(None, StringIO(source), '')]
def side_effect(*args, **kwargs):
return outputs.pop()
self.connection.iwconfig.side_effect = side_effect
self.assertEqual(self.iwconfig.output,
StringIO(source).readlines())
self.assertEqual(self.iwconfig.output,
StringIO(source).readlines())
# two calls outside the time
outputs = [(None, StringIO(source2),''),
(None, StringIO(source), '')]
def side_effect(*args, **kwargs):
return outputs.pop()
self.iwconfig._event_timer = None
self.iwconfig.interval = 0
self.connection.iwconfig.side_effect = side_effect
self.assertEqual(self.iwconfig.output,
StringIO(source).readlines())
time.sleep(0.1)
self.assertEqual(self.iwconfig.output,
StringIO(source2).readlines())
return
def test_essid(self):
"""
Does it get the name of the ap?
"""
self.assert_equal('"simio_claustro"', self.iwconfig.essid)
# disconnected case
self.assert_equal("off/any", self.iwconfig_disconnected.essid)
return
def test_mac_protocol(self):
"""
Does it get the MAC-protocol?
"""
self.assert_equal('IEEE 802.11abgn', self.iwconfig.mac_protocol)
return
def test_mode(self):
"""
Does it get the mode?
"""
self.assert_equal('Managed', self.iwconfig.mode)
return
def test_frequency(self):
"""
Does it get the frequency or NA if not connected?
"""
self.assert_equal('2.462 GHz', self.iwconfig.frequency)
# field disappears if not connected
self.assert_equal(self.iwconfig.missing_data,
self.iwconfig_disconnected.frequency)
return
def test_access_point(self):
"""
Does it get the MAC address (or 'Not-Associated')
"""
self.assert_equal('00:30:44:07:B2:92', self.iwconfig.access_point)
# check Not-associated
self.assert_equal('Not-Associated', self.iwconfig_disconnected.access_point)
return
def test_bit_rate(self):
"""
Does it get the bit-rate or NA?
"""
self.assert_equal('36 Mb/s', self.iwconfig.bit_rate)
return
def test_tx_power(self):
"""
Does it get the tx-power?
"""
self.assert_equal('15 dBm', self.iwconfig.tx_power)
return
def test_link_quality(self):
"""
Does it get the link-quality?
"""
self.assertEqual('40/70', self.iwconfig.link_quality)
return
def test_signal_level(self):
"""
Does it get the RSSI?
"""
self.assert_equal('-70 dBm', self.iwconfig.signal_level)
return
def test_rx_invalid_nwid(self):
"""
Does it get the count of invalid SSID's?
"""
self.assert_equal('0', self.iwconfig.rx_invalid_nwid)
return
def test_rx_invalid_crypt(self):
self.assertEqual('0', self.iwconfig.rx_invalid_crypt)
return
def test_rx_invalid_frag(self):
self.assertEqual('0', self.iwconfig.rx_invalid_frag)
return
def test_tx_excessive_retries(self):
self.assertEqual('65239', self.iwconfig.tx_excessive_retries)
return
def test_invalid_misc(self):
self.assertEqual('855', self.iwconfig.invalid_misc)
return
def test_missed_beacons(self):
self.assertEqual('0', self.iwconfig.missed_beacons)
return
def test_missing_data(self):
"""
Does it use the 'missing_data' property if the field is missing?
"""
self.assertEqual(self.iwconfig.missing_data,
self.iwconfig_disconnected.signal_level)
return
def test_str(self):
"""
Does the __str__ just return all the lines?
"""
self.assert_equal(source, str(self.iwconfig))
def assert_equal(self, expected, actual):
self.assertEqual(expected, actual,
msg="Expected: {0}, Actual: {1}".format(expected, actual))
class TestIwconfigExpressions(unittest.TestCase):
def setUp(self):
self.expressions = IwconfigExpressions(interface='wlan2')
return
def test_constructor(self):
"""
Will it build?
"""
self.assertEqual('wlan2', self.expressions.interface)
return
def test_essid(self):
"""
Does it match the essid?
"""
match = self.expressions.essid.search(source)
self.assertEqual('"simio_claustro"', match.group(IwconfigEnum.essid).strip())
# disconnected
match = self.expressions.essid.search(source_not_associated)
self.assertEqual('off/any', match.group(IwconfigEnum.essid).strip())
return
def test_mac_protocol(self):
"""
Does it get the protocol?
"""
match = self.expressions.mac_protocol.search(source)
self.assertEqual('IEEE 802.11abgn ', match.group(IwconfigEnum.mac_protocol))
return
def test_mode(self):
"""
Does in get the mode?
"""
match = self.expressions.mode.search(source)
self.assertEqual('Managed', match.group(IwconfigEnum.mode))
return
def test_frequency(self):
"""
Does it match the frequency?
"""
match = self.expressions.frequency.search(source)
self.assertEqual('2.462 GHz', match.group(IwconfigEnum.frequency))
return
def test_access_point(self):
"""
Does it get the access points mac?
"""
match = self.expressions.access_point.search(source)
self.assertEqual("00:30:44:07:B2:92",
match.group(IwconfigEnum.access_point))
return
def test_bit_rate(self):
"""
Does it get the bit rate?
"""
match = self.expressions.bit_rate.search(source)
self.assertEqual(match.group(IwconfigEnum.bit_rate),
"36 Mb/s")
return
def test_tx_power(self):
"""
Does it get the tx-power?
"""
match = self.expressions.tx_power.search(source)
self.assertEqual('15 dBm', match.group(IwconfigEnum.tx_power))
return
def test_link_quality(self):
"""
Does it get the link quality?
"""
match = self.expressions.link_quality.search(source)
self.assertEqual('40/70', match.group(IwconfigEnum.link_quality))
return
def test_signal_level(self):
"""
Does it get the signal level?
"""
match = self.expressions.signal_level.search(source)
self.assertEqual('-70 dBm', match.group(IwconfigEnum.signal_level))
return
def test_rx_invalid_nwid(self):
"""
Does it get the count of invalid network ids?
"""
match = self.expressions.rx_invalid_nwid.search(source)
self.assertEqual(match.group(IwconfigEnum.rx_invalid_nwid), '0')
return
def test_rx_invalid_crypt(self):
"""
Does it get count of packets not decryptable?
"""
match = self.expressions.rx_invalid_crypt.search(source)
self.assertEqual(match.group(IwconfigEnum.rx_invalid_crypt),
'0')
return
def test_rx_invalid_frag(self):
"""
Does it get count of packets that couldn't be reassembled?
"""
match = self.expressions.rx_invalid_frag.search(source)
return
def test_tx_excessive_retries(self):
"""
Does it get number of packets which hardware failed to deliver?
"""
match = self.expressions.tx_excessive_retries.search(source)
self.assertEqual(match.group(IwconfigEnum.tx_excessive_retries),
'65239')
return
def test_invalid_misc(self):
"""
Does it get count of other packets lost because of wireless stuff?
"""
match = self.expressions.invalid_misc.search(source)
self.assertEqual(match.group(IwconfigEnum.invalid_misc),
'855')
return
def test_missed_beacons(self):
"""
Does it get the count of missed beacons?
"""
match = self.expressions.missed_beacons.search(source)
return | {
"content_hash": "4395a539301b103a62ff834da45cd05f",
"timestamp": "",
"source": "github",
"line_count": 369,
"max_line_length": 96,
"avg_line_length": 31.77777777777778,
"alnum_prop": 0.5747057820228552,
"repo_name": "rsnakamura/theape",
"id": "99070a2ccd093d166ad24fdd05fb2435f0a0f64a",
"size": "11753",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "theape/parts/wifi/tests/testiwconfig.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6002"
},
{
"name": "Python",
"bytes": "549815"
},
{
"name": "Shell",
"bytes": "183"
}
],
"symlink_target": ""
} |
import sys
if (sys.version_info >= (2,7)):
import unittest
else:
import unittest2 as unittest
import pydevtest_sessions as s
from pydevtest_common import assertiCmd, assertiCmdFail, interruptiCmd
from resource_suite import ResourceBase
import commands
import os, stat
import datetime
import time
import shutil
import random
class ChunkyDevTest(ResourceBase):
def test_beginning_from_devtest(self):
# build expected variables with similar devtest names
progname = __file__
myssize = str(os.stat(progname).st_size)
username = s.adminsession.getUserName()
irodszone = s.adminsession.getZoneName()
testuser1 = s.sessions[1].getUserName()
irodshome = "/"+irodszone+"/home/rods/"+s.adminsession.sessionId
irodsdefresource = s.adminsession.getDefResource()
dir_w = "."
sfile2 = dir_w+"/sfile2"
commands.getstatusoutput( "cat "+progname+" "+progname+" > "+sfile2 )
mysdir = "/tmp/irodssdir"
myldir = dir_w+"/ldir"
if os.path.exists( myldir ):
shutil.rmtree( myldir )
assertiCmd(s.adminsession,"imkdir icmdtest")
# test basic informational commands
assertiCmd(s.adminsession,"iinit -l", "LIST", s.adminsession.getUserName() )
assertiCmd(s.adminsession,"iinit -l", "LIST", s.adminsession.getZoneName() )
assertiCmd(s.adminsession,"iinit -l", "LIST", s.adminsession.getDefResource() )
res = s.adminsession.runCmd('ils', ['-V'])
assert (res[0].count('NOTICE: irodsHost') == 1
and res[0].count('NOTICE: irodsPort') == 1
and res[0].count('NOTICE: irodsDefResource') == 1)
# begin original devtest
assertiCmd(s.adminsession,"ilsresc", "LIST", self.testresc)
assertiCmd(s.adminsession,"ilsresc -l", "LIST", self.testresc)
assertiCmd(s.adminsession,"imiscsvrinfo", "LIST", ["relVersion"] )
assertiCmd(s.adminsession,"iuserinfo", "LIST", "name: "+username )
assertiCmd(s.adminsession,"ienv", "LIST", "irodsZone" )
assertiCmd(s.adminsession,"ipwd", "LIST", "home" )
assertiCmd(s.adminsession,"ihelp ils", "LIST", "ils" )
assertiCmd(s.adminsession,"ierror -14000", "LIST", "SYS_API_INPUT_ERR" )
assertiCmd(s.adminsession,"iexecmd hello", "LIST", "Hello world" )
assertiCmd(s.adminsession,"ips -v", "LIST", "ips" )
assertiCmd(s.adminsession,"iqstat", "LIST", "No delayed rules pending for user rods" )
# put and list basic file information
assertiCmd(s.adminsession,"ils -AL","LIST","home") # debug
assertiCmd(s.adminsession,"iput -K --wlock "+progname+" "+irodshome+"/icmdtest/foo1" )
assertiCmd(s.adminsession,"ichksum -f "+irodshome+"/icmdtest/foo1", "LIST", "performed = 1" )
assertiCmd(s.adminsession,"iput -kf "+progname+" "+irodshome+"/icmdtest/foo1" )
assertiCmd(s.adminsession,"ils "+irodshome+"/icmdtest/foo1" , "LIST", "foo1" )
assertiCmd(s.adminsession,"ils -l "+irodshome+"/icmdtest/foo1", "LIST", ["foo1",myssize] )
assertiCmd(s.adminsession,"iadmin ls "+irodshome+"/icmdtest", "LIST", "foo1" )
assertiCmd(s.adminsession,"ils -A "+irodshome+"/icmdtest/foo1", "LIST", username+"#"+irodszone+":own" )
assertiCmd(s.adminsession,"ichmod read "+testuser1+" "+irodshome+"/icmdtest/foo1" )
assertiCmd(s.adminsession,"ils -A "+irodshome+"/icmdtest/foo1", "LIST", testuser1+"#"+irodszone+":read" )
# basic replica
assertiCmd(s.adminsession,"irepl -B -R "+self.testresc+" --rlock "+irodshome+"/icmdtest/foo1" )
assertiCmd(s.adminsession,"ils -l "+irodshome+"/icmdtest/foo1", "LIST", self.testresc )
# overwrite a copy
assertiCmd(s.adminsession,"itrim -S "+irodsdefresource+" -N1 "+irodshome+"/icmdtest/foo1" )
assertiCmdFail(s.adminsession,"ils -L "+irodshome+"/icmdtest/foo1", "LIST", irodsdefresource )
assertiCmd(s.adminsession,"iphymv -R "+irodsdefresource+" "+irodshome+"/icmdtest/foo1" )
assertiCmd(s.adminsession,"ils -l "+irodshome+"/icmdtest/foo1", "LIST", irodsdefresource[0:19] )
# basic metadata shuffle
assertiCmd(s.adminsession,"imeta add -d "+irodshome+"/icmdtest/foo1 testmeta1 180 cm" )
assertiCmd(s.adminsession,"imeta ls -d "+irodshome+"/icmdtest/foo1", "LIST", ["testmeta1"] )
assertiCmd(s.adminsession,"imeta ls -d "+irodshome+"/icmdtest/foo1", "LIST", ["180"] )
assertiCmd(s.adminsession,"imeta ls -d "+irodshome+"/icmdtest/foo1", "LIST", ["cm"] )
assertiCmd(s.adminsession,"icp -K -R "+self.testresc+" "+irodshome+"/icmdtest/foo1 "+irodshome+"/icmdtest/foo2" )
# new file mode check
assertiCmd(s.adminsession,"iget -fK --rlock "+irodshome+"/icmdtest/foo2 /tmp/" )
assert oct(stat.S_IMODE(os.stat("/tmp/foo2").st_mode)) == '0640'
os.unlink( "/tmp/foo2" )
assertiCmd(s.adminsession,"ils "+irodshome+"/icmdtest/foo2", "LIST", "foo2" )
assertiCmd(s.adminsession,"imv "+irodshome+"/icmdtest/foo2 "+irodshome+"/icmdtest/foo4" )
assertiCmd(s.adminsession,"ils -l "+irodshome+"/icmdtest/foo4", "LIST", "foo4" )
assertiCmd(s.adminsession,"imv "+irodshome+"/icmdtest/foo4 "+irodshome+"/icmdtest/foo2" )
assertiCmd(s.adminsession,"ils -l "+irodshome+"/icmdtest/foo2", "LIST", "foo2" )
assertiCmd(s.adminsession,"ichksum "+irodshome+"/icmdtest/foo2", "LIST", "foo2" )
assertiCmd(s.adminsession,"imeta add -d "+irodshome+"/icmdtest/foo2 testmeta1 180 cm" )
assertiCmd(s.adminsession,"imeta add -d "+irodshome+"/icmdtest/foo1 testmeta2 hello" )
assertiCmd(s.adminsession,"imeta ls -d "+irodshome+"/icmdtest/foo1", "LIST", ["testmeta1"] )
assertiCmd(s.adminsession,"imeta ls -d "+irodshome+"/icmdtest/foo1", "LIST", ["hello"] )
assertiCmd(s.adminsession,"imeta qu -d testmeta1 = 180", "LIST", "foo1" )
assertiCmd(s.adminsession,"imeta qu -d testmeta2 = hello", "LIST", "dataObj: foo1" )
assertiCmd(s.adminsession,"iget -f -K --rlock "+irodshome+"/icmdtest/foo2 "+dir_w )
assert myssize == str(os.stat(dir_w+"/foo2").st_size)
os.unlink( dir_w+"/foo2" )
# we have foo1 in $irodsdefresource and foo2 in testresource
# cleanup
os.unlink( sfile2 )
def test_iput_ibun_gzip_bzip2_from_devtest(self):
# build expected variables with similar devtest names
progname = __file__
myssize = str(os.stat(progname).st_size)
username = s.adminsession.getUserName()
irodszone = s.adminsession.getZoneName()
testuser1 = s.sessions[1].getUserName()
irodshome = "/"+irodszone+"/home/rods/"+s.adminsession.sessionId
irodsdefresource = s.adminsession.getDefResource()
dir_w = "."
sfile2 = dir_w+"/sfile2"
commands.getstatusoutput( "cat "+progname+" "+progname+" > "+sfile2 )
mysdir = "/tmp/irodssdir"
myldir = dir_w+"/ldir"
if os.path.exists( myldir ):
shutil.rmtree( myldir )
assertiCmd(s.adminsession,"imkdir icmdtest")
# make a directory containing 20 small files
if not os.path.isdir(mysdir):
os.mkdir(mysdir)
for i in range(20):
mysfile = mysdir+"/sfile"+str(i)
shutil.copyfile( progname, mysfile )
# we put foo1 in $irodsdefresource and foo2 in testresource
assertiCmd(s.adminsession,"iput -K --wlock "+progname+" "+irodshome+"/icmdtest/foo1" )
assertiCmd(s.adminsession,"icp -K -R "+self.testresc+" "+irodshome+"/icmdtest/foo1 "+irodshome+"/icmdtest/foo2" )
assertiCmd(s.adminsession,"irepl -B -R "+self.testresc+" "+irodshome+"/icmdtest/foo1" )
phypath = dir_w+"/"+"foo1."+str(random.randrange(10000000))
assertiCmd(s.adminsession,"iput -kfR "+irodsdefresource+" "+sfile2+" "+irodshome+"/icmdtest/foo1" )
# show have 2 different copies
assertiCmd(s.adminsession,"ils -l "+irodshome+"/icmdtest/foo1", "LIST", ["foo1",myssize] )
assertiCmd(s.adminsession,"ils -l "+irodshome+"/icmdtest/foo1", "LIST", ["foo1",str(os.stat(sfile2).st_size)] )
# update all old copies
assertiCmd(s.adminsession,"irepl -U "+irodshome+"/icmdtest/foo1" )
# make sure the old size is not there
assertiCmdFail(s.adminsession,"ils -l "+irodshome+"/icmdtest/foo1", "LIST", myssize )
assertiCmd(s.adminsession,"itrim -S "+irodsdefresource+" "+irodshome+"/icmdtest/foo1" )
# bulk test
assertiCmd(s.adminsession,"iput -bIvPKr "+mysdir+" "+irodshome+"/icmdtest", "LIST", "Bulk upload" )
# iput with a lot of options
rsfile = dir_w+"/rsfile"
if os.path.isfile( rsfile ):
os.unlink( rsfile )
assertiCmd(s.adminsession,"iput -PkITr -X "+rsfile+" --retries 10 "+mysdir+" "+irodshome+"/icmdtestw", "LIST", "Processing" )
assertiCmd(s.adminsession,"imv "+irodshome+"/icmdtestw "+irodshome+"/icmdtestw1" )
assertiCmd(s.adminsession,"ils -lr "+irodshome+"/icmdtestw1", "LIST", "sfile10" )
assertiCmd(s.adminsession,"ils -Ar "+irodshome+"/icmdtestw1", "LIST", "sfile10" )
assertiCmd(s.adminsession,"irm -rvf "+irodshome+"/icmdtestw1", "LIST", "num files done" )
if os.path.isfile( rsfile ):
os.unlink( rsfile )
assertiCmd(s.adminsession,"iget -vIKPfr -X rsfile --retries 10 "+irodshome+"/icmdtest "+dir_w+"/testx", "LIST", "opened" )
if os.path.isfile( rsfile ):
os.unlink( rsfile )
commands.getstatusoutput( "tar -chf "+dir_w+"/testx.tar -C "+dir_w+"/testx ." )
assertiCmd(s.adminsession,"iput "+dir_w+"/testx.tar "+irodshome+"/icmdtestx.tar" )
assertiCmd(s.adminsession,"ibun -x "+irodshome+"/icmdtestx.tar "+irodshome+"/icmdtestx" )
assertiCmd(s.adminsession,"ils -lr "+irodshome+"/icmdtestx", "LIST", ["foo2"] )
assertiCmd(s.adminsession,"ils -lr "+irodshome+"/icmdtestx", "LIST", ["sfile10"] )
assertiCmd(s.adminsession,"ibun -cDtar "+irodshome+"/icmdtestx1.tar "+irodshome+"/icmdtestx" )
assertiCmd(s.adminsession,"ils -l "+irodshome+"/icmdtestx1.tar", "LIST", "testx1.tar" )
if os.path.exists(dir_w+"/testx1"):
shutil.rmtree(dir_w+"/testx1")
os.mkdir( dir_w+"/testx1" )
if os.path.isfile( dir_w+"/testx1.tar" ):
os.unlink( dir_w+"/testx1.tar" )
assertiCmd(s.adminsession,"iget "+irodshome+"/icmdtestx1.tar "+dir_w+"/testx1.tar" )
commands.getstatusoutput( "tar -xvf "+dir_w+"/testx1.tar -C "+dir_w+"/testx1" )
output = commands.getstatusoutput( "diff -r "+dir_w+"/testx "+dir_w+"/testx1/icmdtestx" )
print "output is ["+str(output)+"]"
assert output[0] == 0
assert output[1] == "", "diff output was not empty..."
# test ibun with gzip
assertiCmd(s.adminsession,"ibun -cDgzip "+irodshome+"/icmdtestx1.tar.gz "+irodshome+"/icmdtestx" )
assertiCmd(s.adminsession,"ibun -x "+irodshome+"/icmdtestx1.tar.gz "+irodshome+"/icmdtestgz")
if os.path.isfile( "icmdtestgz" ):
os.unlink( "icmdtestgz" )
assertiCmd(s.adminsession,"iget -vr "+irodshome+"/icmdtestgz "+dir_w+"", "LIST", "icmdtestgz")
output = commands.getstatusoutput( "diff -r "+dir_w+"/testx "+dir_w+"/icmdtestgz/icmdtestx" )
print "output is ["+str(output)+"]"
assert output[0] == 0
assert output[1] == "", "diff output was not empty..."
shutil.rmtree( dir_w+"/icmdtestgz")
assertiCmd(s.adminsession,"ibun --add "+irodshome+"/icmdtestx1.tar.gz "+irodshome+"/icmdtestgz")
assertiCmd(s.adminsession,"irm -rf "+irodshome+"/icmdtestx1.tar.gz "+irodshome+"/icmdtestgz")
# test ibun with bzip2
assertiCmd(s.adminsession,"ibun -cDbzip2 "+irodshome+"/icmdtestx1.tar.bz2 "+irodshome+"/icmdtestx")
assertiCmd(s.adminsession,"ibun -xb "+irodshome+"/icmdtestx1.tar.bz2 "+irodshome+"/icmdtestbz2")
if os.path.isfile( "icmdtestbz2" ):
os.unlink( "icmdtestbz2" )
assertiCmd(s.adminsession,"iget -vr "+irodshome+"/icmdtestbz2 "+dir_w+"", "LIST", "icmdtestbz2")
output = commands.getstatusoutput( "diff -r "+dir_w+"/testx "+dir_w+"/icmdtestbz2/icmdtestx" )
print "output is ["+str(output)+"]"
assert output[0] == 0
assert output[1] == "", "diff output was not empty..."
shutil.rmtree( dir_w+"/icmdtestbz2" )
assertiCmd(s.adminsession,"irm -rf "+irodshome+"/icmdtestx1.tar.bz2")
assertiCmd(s.adminsession,"iphybun -R "+self.anotherresc+" -Dbzip2 "+irodshome+"/icmdtestbz2" )
assertiCmd(s.adminsession,"itrim -N1 -S "+self.testresc+" -r "+irodshome+"/icmdtestbz2", "LIST", "Total size trimmed" )
assertiCmd(s.adminsession,"itrim -N1 -S "+irodsdefresource+" -r "+irodshome+"/icmdtestbz2", "LIST", "Total size trimmed" )
# get the name of bundle file
output = commands.getstatusoutput( "ils -L "+irodshome+"/icmdtestbz2/icmdtestx/foo1 | tail -n1 | awk '{ print $NF }'")
print output[1]
bunfile = output[1]
assertiCmd(s.adminsession,"ils --bundle "+bunfile, "LIST", "Subfiles" )
assertiCmd(s.adminsession,"irm -rf "+irodshome+"/icmdtestbz2")
assertiCmd(s.adminsession,"irm -f --empty "+bunfile )
# cleanup
os.unlink( dir_w+"/testx1.tar" )
os.unlink( dir_w+"/testx.tar" )
shutil.rmtree( dir_w+"/testx1" )
shutil.rmtree( dir_w+"/testx" )
os.unlink( sfile2 )
if os.path.exists( myldir ):
shutil.rmtree( myldir )
if os.path.exists( mysdir ):
shutil.rmtree( mysdir )
def test_ireg_from_devtest(self):
# build expected variables with similar devtest names
progname = __file__
myssize = str(os.stat(progname).st_size)
username = s.adminsession.getUserName()
irodszone = s.adminsession.getZoneName()
testuser1 = s.sessions[1].getUserName()
irodshome = "/"+irodszone+"/home/rods/"+s.adminsession.sessionId
irodsdefresource = s.adminsession.getDefResource()
dir_w = "."
sfile2 = dir_w+"/sfile2"
commands.getstatusoutput( "cat "+progname+" "+progname+" > "+sfile2 )
mysdir = "/tmp/irodssdir"
myldir = dir_w+"/ldir"
if os.path.exists( myldir ):
shutil.rmtree( myldir )
assertiCmd(s.adminsession,"imkdir icmdtest")
# make a directory containing 20 small files
if not os.path.isdir(mysdir):
os.mkdir(mysdir)
for i in range(20):
mysfile = mysdir+"/sfile"+str(i)
shutil.copyfile( progname, mysfile )
commands.getstatusoutput( "mv "+sfile2+" /tmp/sfile2" )
commands.getstatusoutput( "cp /tmp/sfile2 /tmp/sfile2r" )
assertiCmd(s.adminsession,"ireg -KR "+self.testresc+" /tmp/sfile2 "+irodshome+"/foo5" ) # <-- FAILING - REASON FOR SKIPPING
commands.getstatusoutput( "cp /tmp/sfile2 /tmp/sfile2r" )
assertiCmd(s.adminsession,"ireg -KR "+self.anotherresc+" --repl /tmp/sfile2r "+irodshome+"/foo5" )
assertiCmd(s.adminsession,"iget -fK "+irodshome+"/foo5 "+dir_w+"/foo5" )
output = commands.getstatusoutput("diff /tmp/sfile2 "+dir_w+"/foo5")
print "output is ["+str(output)+"]"
assert output[0] == 0
assert output[1] == "", "diff output was not empty..."
assertiCmd(s.adminsession,"ireg -KCR "+self.testresc+" "+mysdir+" "+irodshome+"/icmdtesta" )
if os.path.exists(dir_w+"/testa"):
shutil.rmtree( dir_w+"/testa" )
assertiCmd(s.adminsession,"iget -fvrK "+irodshome+"/icmdtesta "+dir_w+"/testa", "LIST", "testa" )
output = commands.getstatusoutput("diff -r "+mysdir+" "+dir_w+"/testa" )
print "output is ["+str(output)+"]"
assert output[0] == 0
assert output[1] == "", "diff output was not empty..."
shutil.rmtree( dir_w+"/testa" )
# test ireg with normal user
testuser2home = "/"+irodszone+"/home/"+s.sessions[2].getUserName()
commands.getstatusoutput( "cp /tmp/sfile2 /tmp/sfile2c" )
assertiCmd(s.sessions[2],"ireg -KR "+self.testresc+" /tmp/sfile2c "+testuser2home+"/foo5", "ERROR", "PATH_REG_NOT_ALLOWED" )
assertiCmd(s.sessions[2],"iput -R "+self.testresc+" /tmp/sfile2c "+testuser2home+"/foo5" )
assertiCmd(s.sessions[2],"irm -f "+testuser2home+"/foo5" )
# cleanup
os.unlink( "/tmp/sfile2c" )
os.unlink( dir_w+"/foo5" )
if os.path.exists( myldir ):
shutil.rmtree( myldir )
if os.path.exists( mysdir ):
shutil.rmtree( mysdir )
def test_mcoll_from_devtest(self):
# build expected variables with similar devtest names
progname = __file__
myssize = str(os.stat(progname).st_size)
username = s.adminsession.getUserName()
irodszone = s.adminsession.getZoneName()
testuser1 = s.sessions[1].getUserName()
irodshome = "/"+irodszone+"/home/rods/"+s.adminsession.sessionId
irodsdefresource = s.adminsession.getDefResource()
dir_w = "."
sfile2 = dir_w+"/sfile2"
commands.getstatusoutput( "cat "+progname+" "+progname+" > "+sfile2 )
mysdir = "/tmp/irodssdir"
myldir = dir_w+"/ldir"
if os.path.exists( myldir ):
shutil.rmtree( myldir )
# make a directory containing 20 small files
if not os.path.isdir(mysdir):
os.mkdir(mysdir)
for i in range(20):
mysfile = mysdir+"/sfile"+str(i)
shutil.copyfile( progname, mysfile )
assertiCmd(s.adminsession,"imkdir icmdtest")
# we put foo1 in $irodsdefresource and foo2 in testresource
assertiCmd(s.adminsession,"iput -K --wlock "+progname+" "+irodshome+"/icmdtest/foo1" )
assertiCmd(s.adminsession,"icp -K -R "+self.testresc+" "+irodshome+"/icmdtest/foo1 "+irodshome+"/icmdtest/foo2" )
# prepare icmdtesta
assertiCmd(s.adminsession,"ireg -KCR "+self.testresc+" "+mysdir+" "+irodshome+"/icmdtesta" )
# mcoll test
assertiCmd(s.adminsession,"imcoll -m link "+irodshome+"/icmdtesta "+irodshome+"/icmdtestb" )
assertiCmd(s.adminsession,"ils -lr "+irodshome+"/icmdtestb", "LIST", "icmdtestb" )
if os.path.exists(dir_w+"/testb"):
shutil.rmtree( dir_w+"/testb" )
assertiCmd(s.adminsession,"iget -fvrK "+irodshome+"/icmdtestb "+dir_w+"/testb", "LIST", "testb" )
output = commands.getstatusoutput("diff -r "+mysdir+" "+dir_w+"/testb" )
print "output is ["+str(output)+"]"
assert output[0] == 0
assert output[1] == "", "diff output was not empty..."
assertiCmd(s.adminsession,"imcoll -U "+irodshome+"/icmdtestb" )
assertiCmd(s.adminsession,"irm -rf "+irodshome+"/icmdtestb" )
shutil.rmtree( dir_w+"/testb" )
assertiCmd(s.adminsession,"imkdir "+irodshome+"/icmdtestm" )
assertiCmd(s.adminsession,"imcoll -m filesystem -R "+self.testresc+" "+mysdir+" "+irodshome+"/icmdtestm" )
assertiCmd(s.adminsession,"imkdir "+irodshome+"/icmdtestm/testmm" )
assertiCmd(s.adminsession,"iput "+progname+" "+irodshome+"/icmdtestm/testmm/foo1" )
assertiCmd(s.adminsession,"iput "+progname+" "+irodshome+"/icmdtestm/testmm/foo11" )
assertiCmd(s.adminsession,"imv "+irodshome+"/icmdtestm/testmm/foo1 "+irodshome+"/icmdtestm/testmm/foo2" )
assertiCmd(s.adminsession,"imv "+irodshome+"/icmdtestm/testmm "+irodshome+"/icmdtestm/testmm1" )
# mv to normal collection
assertiCmd(s.adminsession,"imv "+irodshome+"/icmdtestm/testmm1/foo2 "+irodshome+"/icmdtest/foo100" )
assertiCmd(s.adminsession,"ils -l "+irodshome+"/icmdtest/foo100", "LIST", "foo100" )
assertiCmd(s.adminsession,"imv "+irodshome+"/icmdtestm/testmm1 "+irodshome+"/icmdtest/testmm1" )
assertiCmd(s.adminsession,"ils -lr "+irodshome+"/icmdtest/testmm1", "LIST", "foo11" )
assertiCmd(s.adminsession,"irm -rf "+irodshome+"/icmdtest/testmm1 "+irodshome+"/icmdtest/foo100" )
if os.path.exists(dir_w+"/testm"):
shutil.rmtree( dir_w+"/testm" )
assertiCmd(s.adminsession,"iget -fvrK "+irodshome+"/icmdtesta "+dir_w+"/testm", "LIST", "testm")
output = commands.getstatusoutput("diff -r "+mysdir+" "+dir_w+"/testm" )
print "output is ["+str(output)+"]"
assert output[0] == 0
assert output[1] == "", "diff output was not empty..."
assertiCmd(s.adminsession,"imcoll -U "+irodshome+"/icmdtestm" )
assertiCmd(s.adminsession,"irm -rf "+irodshome+"/icmdtestm" )
shutil.rmtree( dir_w+"/testm" )
assertiCmd(s.adminsession,"imkdir "+irodshome+"/icmdtestt_mcol" )
assertiCmd(s.adminsession,"ibun -c "+irodshome+"/icmdtestx.tar "+irodshome+"/icmdtest" ) # added so icmdtestx.tar exists
assertiCmd(s.adminsession,"imcoll -m tar "+irodshome+"/icmdtestx.tar "+irodshome+"/icmdtestt_mcol" )
assertiCmd(s.adminsession,"ils -lr "+irodshome+"/icmdtestt_mcol", "LIST", ["foo2"] )
assertiCmd(s.adminsession,"ils -lr "+irodshome+"/icmdtestt_mcol", "LIST", ["foo1"] )
if os.path.exists(dir_w+"/testt"):
shutil.rmtree( dir_w+"/testt" )
if os.path.exists(dir_w+"/testx"):
shutil.rmtree( dir_w+"/testx" )
assertiCmd(s.adminsession,"iget -vr "+irodshome+"/icmdtest "+dir_w+"/testx", "LIST", "testx" )
assertiCmd(s.adminsession,"iget -vr "+irodshome+"/icmdtestt_mcol/icmdtest "+dir_w+"/testt", "LIST", "testt" )
output = commands.getstatusoutput("diff -r "+dir_w+"/testx "+dir_w+"/testt" )
print "output is ["+str(output)+"]"
assert output[0] == 0
assert output[1] == "", "diff output was not empty..."
assertiCmd(s.adminsession,"imkdir "+irodshome+"/icmdtestt_mcol/mydirtt" )
assertiCmd(s.adminsession,"iput "+progname+" "+irodshome+"/icmdtestt_mcol/mydirtt/foo1mt" )
assertiCmd(s.adminsession,"imv "+irodshome+"/icmdtestt_mcol/mydirtt/foo1mt "+irodshome+"/icmdtestt_mcol/mydirtt/foo1mtx" )
# unlink
assertiCmd(s.adminsession,"imcoll -U "+irodshome+"/icmdtestt_mcol" )
# cleanup
os.unlink( sfile2 )
shutil.rmtree( dir_w+"/testt" )
shutil.rmtree( dir_w+"/testx" )
if os.path.exists( mysdir ):
shutil.rmtree( mysdir )
def test_large_dir_and_mcoll_from_devtest(self):
# build expected variables with similar devtest names
progname = __file__
myssize = str(os.stat(progname).st_size)
username = s.adminsession.getUserName()
irodszone = s.adminsession.getZoneName()
testuser1 = s.sessions[1].getUserName()
irodshome = "/"+irodszone+"/home/rods/"+s.adminsession.sessionId
irodsdefresource = s.adminsession.getDefResource()
dir_w = "."
sfile2 = dir_w+"/sfile2"
commands.getstatusoutput( "cat "+progname+" "+progname+" > "+sfile2 )
mysdir = "/tmp/irodssdir"
myldir = dir_w+"/ldir"
if os.path.exists( myldir ):
shutil.rmtree( myldir )
assertiCmd(s.adminsession,"imkdir icmdtest")
# we put foo1 in $irodsdefresource and foo2 in testresource
assertiCmd(s.adminsession,"iput -K --wlock "+progname+" "+irodshome+"/icmdtest/foo1" )
assertiCmd(s.adminsession,"icp -K -R "+self.testresc+" "+irodshome+"/icmdtest/foo1 "+irodshome+"/icmdtest/foo2" )
assertiCmd(s.adminsession,"ibun -c "+irodshome+"/icmdtestx.tar "+irodshome+"/icmdtest" ) # added so icmdtestx.tar exists
assertiCmd(s.adminsession,"imkdir "+irodshome+"/icmdtestt_large" )
assertiCmd(s.adminsession,"imcoll -m tar "+irodshome+"/icmdtestx.tar "+irodshome+"/icmdtestt_large" )
assertiCmd(s.adminsession,"imkdir "+irodshome+"/icmdtestt_large/mydirtt" )
# make a directory of 2 large files and 2 small files
lfile = dir_w+"/lfile"
lfile1 = dir_w+"/lfile1"
commands.getstatusoutput( "echo 012345678901234567890123456789012345678901234567890123456789012 > "+lfile )
for i in range(6):
commands.getstatusoutput( "cat "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" > "+lfile1 )
os.rename ( lfile1, lfile )
os.mkdir( myldir )
for i in range(1,3):
mylfile = myldir+"/lfile"+str(i)
mysfile = myldir+"/sfile"+str(i)
if i != 2:
shutil.copyfile( lfile, mylfile )
else:
os.rename( lfile, mylfile )
shutil.copyfile( progname, mysfile )
# test adding a large file to a mounted collection
assertiCmd(s.adminsession,"iput "+myldir+"/lfile1 "+irodshome+"/icmdtestt_large/mydirtt" )
assertiCmd(s.adminsession,"iget "+irodshome+"/icmdtestt_large/mydirtt/lfile1 "+dir_w+"/testt" )
assertiCmd(s.adminsession,"irm -rf "+irodshome+"/icmdtestt_large/mydirtt" )
assertiCmd(s.adminsession,"imcoll -s "+irodshome+"/icmdtestt_large" )
assertiCmd(s.adminsession,"imcoll -p "+irodshome+"/icmdtestt_large" )
assertiCmd(s.adminsession,"imcoll -U "+irodshome+"/icmdtestt_large" )
assertiCmd(s.adminsession,"irm -rf "+irodshome+"/icmdtestt_large" )
os.unlink( dir_w+"/testt" )
# cleanup
os.unlink( sfile2 )
if os.path.exists( myldir ):
shutil.rmtree( myldir )
def test_phybun_from_devtest(self):
# build expected variables with similar devtest names
progname = __file__
myssize = str(os.stat(progname).st_size)
username = s.adminsession.getUserName()
irodszone = s.adminsession.getZoneName()
testuser1 = s.sessions[1].getUserName()
irodshome = "/"+irodszone+"/home/rods/"+s.adminsession.sessionId
irodsdefresource = s.adminsession.getDefResource()
dir_w = "."
sfile2 = dir_w+"/sfile2"
commands.getstatusoutput( "cat "+progname+" "+progname+" > "+sfile2 )
mysdir = "/tmp/irodssdir"
myldir = dir_w+"/ldir"
if os.path.exists( myldir ):
shutil.rmtree( myldir )
assertiCmd(s.adminsession,"imkdir icmdtest")
# make a directory containing 20 small files
if not os.path.isdir(mysdir):
os.mkdir(mysdir)
for i in range(20):
mysfile = mysdir+"/sfile"+str(i)
shutil.copyfile( progname, mysfile )
# iphybun test
assertiCmd(s.adminsession,"iput -rR "+self.testresc+" "+mysdir+" "+irodshome+"/icmdtestp" )
assertiCmd(s.adminsession,"iphybun -KR "+self.anotherresc+" "+irodshome+"/icmdtestp" )
assertiCmd(s.adminsession,"itrim -rS "+self.testresc+" -N1 "+irodshome+"/icmdtestp", "LIST", "files trimmed" )
output = commands.getstatusoutput( "ils -L "+irodshome+"/icmdtestp/sfile1 | tail -n1 | awk '{ print $NF }'")
print output[1]
bunfile = output[1]
assertiCmd(s.adminsession,"irepl --purgec -R "+self.anotherresc+" "+bunfile )
assertiCmd(s.adminsession,"itrim -rS "+self.testresc+" -N1 "+irodshome+"/icmdtestp", "LIST", "files trimmed" )
# get the name of bundle file
assertiCmd(s.adminsession,"irm -f --empty "+bunfile )
# should not be able to remove it because it is not empty
assertiCmd(s.adminsession,"ils "+bunfile, "LIST", bunfile )
assertiCmd(s.adminsession,"irm -rvf "+irodshome+"/icmdtestp", "LIST", "num files done" )
assertiCmd(s.adminsession,"irm -f --empty "+bunfile )
if os.path.exists(dir_w+"/testp"):
shutil.rmtree( dir_w+"/testp" )
shutil.rmtree( mysdir )
# cleanup
os.unlink( sfile2 )
if os.path.exists( myldir ):
shutil.rmtree( myldir )
if os.path.exists( mysdir ):
shutil.rmtree( mysdir )
def test_irsync_from_devtest(self):
# build expected variables with similar devtest names
progname = __file__
myssize = str(os.stat(progname).st_size)
username = s.adminsession.getUserName()
irodszone = s.adminsession.getZoneName()
testuser1 = s.sessions[1].getUserName()
irodshome = "/"+irodszone+"/home/rods/"+s.adminsession.sessionId
irodsdefresource = s.adminsession.getDefResource()
dir_w = "."
sfile2 = dir_w+"/sfile2"
commands.getstatusoutput( "cat "+progname+" "+progname+" > "+sfile2 )
mysdir = "/tmp/irodssdir"
myldir = dir_w+"/ldir"
if os.path.exists( myldir ):
shutil.rmtree( myldir )
assertiCmd(s.adminsession,"imkdir icmdtest")
# testing irsync
assertiCmd(s.adminsession,"irsync "+progname+" i:"+irodshome+"/icmdtest/foo100" )
assertiCmd(s.adminsession,"irsync i:"+irodshome+"/icmdtest/foo100 "+dir_w+"/foo100" )
assertiCmd(s.adminsession,"irsync i:"+irodshome+"/icmdtest/foo100 i:"+irodshome+"/icmdtest/foo200" )
assertiCmd(s.adminsession,"irm -f "+irodshome+"/icmdtest/foo100 "+irodshome+"/icmdtest/foo200")
assertiCmd(s.adminsession,"iput -R "+self.testresc+" "+progname+" "+irodshome+"/icmdtest/foo100")
assertiCmd(s.adminsession,"irsync "+progname+" i:"+irodshome+"/icmdtest/foo100" )
assertiCmd(s.adminsession,"iput -R "+self.testresc+" "+progname+" "+irodshome+"/icmdtest/foo200")
assertiCmd(s.adminsession,"irsync i:"+irodshome+"/icmdtest/foo100 i:"+irodshome+"/icmdtest/foo200" )
os.unlink( dir_w+"/foo100" )
# cleanup
os.unlink( sfile2 )
def test_xml_protocol_from_devtest(self):
# build expected variables with similar devtest names
progname = __file__
myssize = str(os.stat(progname).st_size)
username = s.adminsession.getUserName()
irodszone = s.adminsession.getZoneName()
testuser1 = s.sessions[1].getUserName()
irodshome = "/"+irodszone+"/home/rods/"+s.adminsession.sessionId
irodsdefresource = s.adminsession.getDefResource()
dir_w = "."
sfile2 = dir_w+"/sfile2"
commands.getstatusoutput( "cat "+progname+" "+progname+" > "+sfile2 )
mysdir = "/tmp/irodssdir"
myldir = dir_w+"/ldir"
if os.path.exists( myldir ):
shutil.rmtree( myldir )
assertiCmd(s.adminsession,"imkdir icmdtest")
lrsfile = dir_w+"/lrsfile"
rsfile = dir_w+"/rsfile"
# do test using xml protocol
os.environ['irodsProt'] = "1"
assertiCmd(s.adminsession,"ilsresc", "LIST", self.testresc )
assertiCmd(s.adminsession,"imiscsvrinfo", "LIST", "relVersion" )
assertiCmd(s.adminsession,"iuserinfo", "LIST", "name: "+username )
assertiCmd(s.adminsession,"ienv", "LIST", "Release Version" )
assertiCmd(s.adminsession,"icd "+irodshome )
assertiCmd(s.adminsession,"ipwd", "LIST", "home" )
assertiCmd(s.adminsession,"ihelp ils", "LIST", "ils" )
assertiCmd(s.adminsession,"ierror -14000", "LIST", "SYS_API_INPUT_ERR" )
assertiCmd(s.adminsession,"iexecmd hello", "LIST", "Hello world" )
assertiCmd(s.adminsession,"ips -v", "LIST", "ips" )
assertiCmd(s.adminsession,"iqstat", "LIST", "No delayed rules" )
assertiCmd(s.adminsession,"imkdir "+irodshome+"/icmdtest1" )
# make a directory of large files
assertiCmd(s.adminsession,"iput -kf "+progname+" "+irodshome+"/icmdtest1/foo1" )
assertiCmd(s.adminsession,"ils -l "+irodshome+"/icmdtest1/foo1", "LIST", ["foo1", myssize] )
assertiCmd(s.adminsession,"iadmin ls "+irodshome+"/icmdtest1", "LIST", "foo1" )
assertiCmd(s.adminsession,"ichmod read "+s.sessions[1].getUserName()+" "+irodshome+"/icmdtest1/foo1" )
assertiCmd(s.adminsession,"ils -A "+irodshome+"/icmdtest1/foo1", "LIST", s.sessions[1].getUserName()+"#"+irodszone+":read" )
assertiCmd(s.adminsession,"irepl -B -R "+self.testresc+" "+irodshome+"/icmdtest1/foo1" )
# overwrite a copy
assertiCmd(s.adminsession,"itrim -S "+irodsdefresource+" -N1 "+irodshome+"/icmdtest1/foo1" )
assertiCmd(s.adminsession,"iphymv -R "+irodsdefresource+" "+irodshome+"/icmdtest1/foo1" )
assertiCmd(s.adminsession,"imeta add -d "+irodshome+"/icmdtest1/foo1 testmeta1 180 cm" )
assertiCmd(s.adminsession,"imeta ls -d "+irodshome+"/icmdtest1/foo1", "LIST", "testmeta1" )
assertiCmd(s.adminsession,"imeta ls -d "+irodshome+"/icmdtest1/foo1", "LIST", "180" )
assertiCmd(s.adminsession,"imeta ls -d "+irodshome+"/icmdtest1/foo1", "LIST", "cm" )
assertiCmd(s.adminsession,"icp -K -R "+self.testresc+" "+irodshome+"/icmdtest1/foo1 "+irodshome+"/icmdtest1/foo2" )
assertiCmd(s.adminsession,"imv "+irodshome+"/icmdtest1/foo2 "+irodshome+"/icmdtest1/foo4" )
assertiCmd(s.adminsession,"imv "+irodshome+"/icmdtest1/foo4 "+irodshome+"/icmdtest1/foo2" )
assertiCmd(s.adminsession,"ichksum -K "+irodshome+"/icmdtest1/foo2", "LIST", "foo2" )
assertiCmd(s.adminsession,"iget -f -K "+irodshome+"/icmdtest1/foo2 "+dir_w )
os.unlink ( dir_w+"/foo2" )
assertiCmd(s.adminsession,"irsync "+progname+" i:"+irodshome+"/icmdtest1/foo1" )
assertiCmd(s.adminsession,"irsync i:"+irodshome+"/icmdtest1/foo1 /tmp/foo1" )
assertiCmd(s.adminsession,"irsync i:"+irodshome+"/icmdtest1/foo1 i:"+irodshome+"/icmdtest1/foo2" )
os.unlink ( "/tmp/foo1" )
os.environ['irodsProt'] = "0"
# cleanup
os.unlink( sfile2 )
def test_large_files_from_devtest(self):
# build expected variables with similar devtest names
progname = __file__
myssize = str(os.stat(progname).st_size)
username = s.adminsession.getUserName()
irodszone = s.adminsession.getZoneName()
testuser1 = s.sessions[1].getUserName()
irodshome = "/"+irodszone+"/home/rods/"+s.adminsession.sessionId
irodsdefresource = s.adminsession.getDefResource()
dir_w = "."
sfile2 = dir_w+"/sfile2"
commands.getstatusoutput( "cat "+progname+" "+progname+" > "+sfile2 )
mysdir = "/tmp/irodssdir"
myldir = dir_w+"/ldir"
if os.path.exists( myldir ):
shutil.rmtree( myldir )
assertiCmd(s.adminsession,"imkdir icmdtest")
# make a directory of 2 large files and 2 small files
lfile = dir_w+"/lfile"
lfile1 = dir_w+"/lfile1"
commands.getstatusoutput( "echo 012345678901234567890123456789012345678901234567890123456789012 > "+lfile )
for i in range(6):
commands.getstatusoutput( "cat "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" > "+lfile1 )
os.rename ( lfile1, lfile )
os.mkdir( myldir )
for i in range(1,3):
mylfile = myldir+"/lfile"+str(i)
mysfile = myldir+"/sfile"+str(i)
if i != 2:
shutil.copyfile( lfile, mylfile )
else:
os.rename( lfile, mylfile )
shutil.copyfile( progname, mysfile )
# do the large files tests
lrsfile = dir_w+"/lrsfile"
rsfile = dir_w+"/rsfile"
if os.path.isfile( lrsfile ):
os.unlink( lrsfile )
if os.path.isfile( rsfile ):
os.unlink( rsfile )
assertiCmd(s.adminsession,"iput -vbPKr --retries 10 --wlock -X "+rsfile+" --lfrestart "+lrsfile+" -N 2 "+myldir+" "+irodshome+"/icmdtest/testy", "LIST", "New restartFile" )
assertiCmd(s.adminsession,"ichksum -rK "+irodshome+"/icmdtest/testy", "LIST", "Total checksum performed" )
if os.path.isfile( lrsfile ):
os.unlink( lrsfile )
if os.path.isfile( rsfile ):
os.unlink( rsfile )
assertiCmd(s.adminsession,"irepl -BvrPT -R "+self.testresc+" --rlock "+irodshome+"/icmdtest/testy", "LIST", "icmdtest/testy" )
assertiCmd(s.adminsession,"itrim -vrS "+irodsdefresource+" --dryrun --age 1 -N 1 "+irodshome+"/icmdtest/testy", "LIST", "This is a DRYRUN" )
assertiCmd(s.adminsession,"itrim -vrS "+irodsdefresource+" -N 1 "+irodshome+"/icmdtest/testy", "LIST", "a copy trimmed" )
assertiCmd(s.adminsession,"icp -vKPTr -N 2 "+irodshome+"/icmdtest/testy "+irodshome+"/icmdtest/testz", "LIST", "Processing lfile1" )
assertiCmd(s.adminsession,"irsync -r i:"+irodshome+"/icmdtest/testy i:"+irodshome+"/icmdtest/testz" )
assertiCmd(s.adminsession,"irm -vrf "+irodshome+"/icmdtest/testy" )
assertiCmd(s.adminsession,"iphymv -vrS "+irodsdefresource+" -R "+self.testresc+" "+irodshome+"/icmdtest/testz", "LIST", "icmdtest/testz" )
if os.path.isfile( lrsfile ):
os.unlink( lrsfile )
if os.path.isfile( rsfile ):
os.unlink( rsfile )
if os.path.exists(dir_w+"/testz"):
shutil.rmtree( dir_w+"/testz" )
assertiCmd(s.adminsession,"iget -vPKr --retries 10 -X "+rsfile+" --lfrestart "+lrsfile+" --rlock -N 2 "+irodshome+"/icmdtest/testz "+dir_w+"/testz", "LIST", "testz" )
assertiCmd(s.adminsession,"irsync -r "+dir_w+"/testz i:"+irodshome+"/icmdtest/testz" )
assertiCmd(s.adminsession,"irsync -r i:"+irodshome+"/icmdtest/testz "+dir_w+"/testz" )
if os.path.isfile( lrsfile ):
os.unlink( lrsfile )
if os.path.isfile( rsfile ):
os.unlink( rsfile )
output = commands.getstatusoutput( "diff -r "+dir_w+"/testz "+myldir )
print "output is ["+str(output)+"]"
assert output[0] == 0
assert output[1] == "", "diff output was not empty..."
# test -N0 transfer
assertiCmd(s.adminsession,"iput -N0 -R "+self.testresc+" "+myldir+"/lfile1 "+irodshome+"/icmdtest/testz/lfoo100" )
if os.path.isfile( dir_w+"/lfoo100" ):
os.unlink( dir_w+"/lfoo100" )
assertiCmd(s.adminsession,"iget -N0 "+irodshome+"/icmdtest/testz/lfoo100 "+dir_w+"/lfoo100" )
output = commands.getstatusoutput( "diff "+myldir+"/lfile1 "+dir_w+"/lfoo100" )
print "output is ["+str(output)+"]"
assert output[0] == 0
assert output[1] == "", "diff output was not empty..."
shutil.rmtree( dir_w+"/testz" )
os.unlink( dir_w+"/lfoo100" )
assertiCmd(s.adminsession,"irm -vrf "+irodshome+"/icmdtest/testz" )
# cleanup
os.unlink( sfile2 )
if os.path.exists( myldir ):
shutil.rmtree( myldir )
def test_large_files_with_RBUDP_from_devtest(self):
# build expected variables with similar devtest names
progname = __file__
myssize = str(os.stat(progname).st_size)
username = s.adminsession.getUserName()
irodszone = s.adminsession.getZoneName()
testuser1 = s.sessions[1].getUserName()
irodshome = "/"+irodszone+"/home/rods/"+s.adminsession.sessionId
irodsdefresource = s.adminsession.getDefResource()
dir_w = "."
sfile2 = dir_w+"/sfile2"
commands.getstatusoutput( "cat "+progname+" "+progname+" > "+sfile2 )
mysdir = "/tmp/irodssdir"
myldir = dir_w+"/ldir"
if os.path.exists( myldir ):
shutil.rmtree( myldir )
assertiCmd(s.adminsession,"imkdir icmdtest")
# make a directory of 2 large files and 2 small files
lfile = dir_w+"/lfile"
lfile1 = dir_w+"/lfile1"
commands.getstatusoutput( "echo 012345678901234567890123456789012345678901234567890123456789012 > "+lfile )
for i in range(6):
commands.getstatusoutput( "cat "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" > "+lfile1 )
os.rename ( lfile1, lfile )
os.mkdir( myldir )
for i in range(1,3):
mylfile = myldir+"/lfile"+str(i)
mysfile = myldir+"/sfile"+str(i)
if i != 2:
shutil.copyfile( lfile, mylfile )
else:
os.rename( lfile, mylfile )
shutil.copyfile( progname, mysfile )
# do the large files tests using RBUDP
lrsfile = dir_w+"/lrsfile"
rsfile = dir_w+"/rsfile"
if os.path.isfile( lrsfile ):
os.unlink( lrsfile )
if os.path.isfile( rsfile ):
os.unlink( rsfile )
assertiCmd(s.adminsession,"iput -vQPKr --retries 10 -X "+rsfile+" --lfrestart "+lrsfile+" "+myldir+" "+irodshome+"/icmdtest/testy", "LIST", "icmdtest/testy" )
assertiCmd(s.adminsession,"irepl -BQvrPT -R "+self.testresc+" "+irodshome+"/icmdtest/testy", "LIST", "icmdtest/testy" )
assertiCmd(s.adminsession,"itrim -vrS "+irodsdefresource+" -N 1 "+irodshome+"/icmdtest/testy", "LIST", "a copy trimmed" )
assertiCmd(s.adminsession,"icp -vQKPTr "+irodshome+"/icmdtest/testy "+irodshome+"/icmdtest/testz", "LIST", "Processing sfile1" )
assertiCmd(s.adminsession,"irm -vrf "+irodshome+"/icmdtest/testy" )
if os.path.isfile( lrsfile ):
os.unlink( lrsfile )
if os.path.isfile( rsfile ):
os.unlink( rsfile )
if os.path.exists(dir_w+"/testz"):
shutil.rmtree( dir_w+"/testz" )
assertiCmd(s.adminsession,"iget -vQPKr --retries 10 -X "+rsfile+" --lfrestart "+lrsfile+" "+irodshome+"/icmdtest/testz "+dir_w+"/testz", "LIST", "Processing sfile2" )
if os.path.isfile( lrsfile ):
os.unlink( lrsfile )
if os.path.isfile( rsfile ):
os.unlink( rsfile )
output = commands.getstatusoutput( "diff -r "+dir_w+"/testz "+myldir )
print "output is ["+str(output)+"]"
assert output[0] == 0
assert output[1] == "", "diff output was not empty..."
shutil.rmtree( dir_w+"/testz" )
assertiCmd(s.adminsession,"irm -vrf "+irodshome+"/icmdtest/testz" )
shutil.rmtree( myldir )
# cleanup
os.unlink( sfile2 )
if os.path.exists( myldir ):
shutil.rmtree( myldir )
| {
"content_hash": "a27852002eadbd94c9e01d8f5787fbc5",
"timestamp": "",
"source": "github",
"line_count": 865,
"max_line_length": 180,
"avg_line_length": 49.53179190751445,
"alnum_prop": 0.6073520830902089,
"repo_name": "leesab/irods",
"id": "c7055528d28431a2d229dc6240bb39f42df97a2f",
"size": "42845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/pydevtest/test_chunkydevtest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "8928714"
},
{
"name": "FORTRAN",
"bytes": "6804"
},
{
"name": "Perl",
"bytes": "616072"
},
{
"name": "Prolog",
"bytes": "15035"
},
{
"name": "Puppet",
"bytes": "21402"
},
{
"name": "Python",
"bytes": "420547"
},
{
"name": "R",
"bytes": "8001"
},
{
"name": "Rebol",
"bytes": "176871"
},
{
"name": "Ruby",
"bytes": "5890"
},
{
"name": "SQL",
"bytes": "69248"
},
{
"name": "Shell",
"bytes": "186575"
}
],
"symlink_target": ""
} |
"""Univariate polynomials with galois field coefficients."""
import random
import modint
import sparse_poly
def GFPolyFactory(p):
"""Create custom class for specific coefficient type."""
coefficient_type = modint.ModularIntegerFactory(p)
class newClass(sparse_poly.SparsePolynomial):
coeff_type = coefficient_type
zero = coeff_type(0)
@staticmethod
def from_int_dict(int_dict):
"""Alternative construction, through integers."""
result_dict = {}
for e, c in int_dict.iteritems():
cc = coefficient_type(c)
if cc:
result_dict[e] = cc
return newClass(result_dict)
def to_int_dict(self):
"""Returns the dictionaries of integer representators."""
result_dict = {}
for e, c in self.coeffs.iteritems():
result_dict[e] = c.value
return result_dict
def to_sym_int_dict(self):
"""Returns the dictionaries of symmetric integer representators."""
result_dict = {}
for e, c in self.coeffs.iteritems():
result_dict[e] = int(c)
return result_dict
@staticmethod
def random(min_degree, max_degree, monic=True):
"""Generate random polynomial in given degree range."""
degree = random.randrange(min_degree, max_degree + 1)
p = coefficient_type.modulus
result_dict = {}
if monic:
result_dict[degree] = coefficient_type(1)
degree -= 1
for e in xrange(0, degree + 1):
c = coefficient_type(random.randrange(p))
if c:
result_dict[e] = c
return newClass(result_dict)
def monic(self):
if not self:
return self.coeff_type(0), self
leading_coeff = self[self.degree]
return leading_coeff, self.scale(self.coeff_type(1)/leading_coeff)
newClass.__name__ = "%sPoly" % coefficient_type.__name__
return newClass
# Division algorithms:
def div(f, g):
"""Division with remainder."""
q = f.__class__()
r = f
if not g:
return q, r
deg_diff = r.degree - g.degree
while deg_diff >= 0:
quot = f.__class__({deg_diff: r[r.degree]/g[g.degree]})
q += quot
r -= quot*g
deg_diff = r.degree - g.degree
return q, r
def gcd(f, g):
"""Euclidean algorithm."""
while g:
f, g = g, div(f,g)[1]
return f.monic()[1]
def lcm(f, g):
q, r = div(f*g, gcd(f,g))
assert not r
return q.monic()[1]
def xgcd(f, g):
"""Extended euclidean algorithm.
Outputs the gcd, s and t, such that:
h == s*f + t*g
"""
one = f.coeff_type(1)
p, q, r, s, t = [], [], [], [], []
pp, rr = f.monic()
p.append(pp)
r.append(rr)
pp, rr = g.monic()
p.append(pp)
r.append(rr)
s.append(f.__class__({0:(one/p[0])}))
s.append(f.__class__())
t.append(f.__class__())
t.append(f.__class__({0:(one/p[1])}))
while True:
q.append(div(r[-2], r[-1])[0])
pp, rr = (r[-2] - q[-1]*r[-1]).monic()
if not rr:
return r[-1], s[-1], t[-1]
p.append(pp)
r.append(rr)
pp = one/pp
s.append((s[-2] - q[-1]*s[-1]).scale(pp))
t.append((t[-2] - q[-1]*t[-1]).scale(pp))
# Arithmetic modular a polynomial p:
def truncate(f, n):
"""The remainder from division by x**n."""
result_dict = {}
for e, c in f.coeffs.iteritems():
if e < n:
result_dict[e] = c
return f.__class__(result_dict)
def pow_mod(f, n, p):
"""Repeated squaring."""
assert isinstance(n, (int, long)) and n >= 0
if n == 0:
return f.__class__({0: f.__class__.coeff_type(1)})
binary_n = []
while n:
if n % 2:
binary_n.insert(0, 1)
n = (n - 1) / 2
else:
binary_n.insert(0, 0)
n /= 2
result = div(f, p)[1]
for k in binary_n[1:]:
result *= result
result = div(result, p)[1]
if k:
result *= f
result = div(result, p)[1]
return result
# Factorization:
def distinct_degree_factor(f):
"""Return a list of divisors.
Each polynomial has only factors of a specific degree.
"""
result = []
coeff_type = f.__class__.coeff_type
p = coeff_type.modulus
x_poly = f.__class__({1: coeff_type(1)})
one_poly = f.__class__({0: coeff_type(1)})
h = x_poly
while f != one_poly:
h = pow_mod(h, p, f) # h <- h**p mod f
g = gcd(h - x_poly, f)
f, r = div(f, g)
assert not r
result.append(g)
# Early abort:
if f.degree < 2*(g.degree + 1):
result.append(f)
break
return result
def equal_degree_split(f, degree):
"""Finds divisor of a result from distinct-degree factorization."""
coeff_type = f.__class__.coeff_type
one_poly = f.__class__({0: coeff_type(1)})
a = f.random(1, f.degree - 1)
g = gcd(f, a)
if g != one_poly:
return g
b = pow_mod(a, (coeff_type.modulus**degree - 1)/2, f)
g = gcd(b - one_poly, f)
if g != one_poly and g != f:
return g
return None # Failure, try again with another random a.
def equal_degree_factor(f, degree):
"""Finds all divisors of a result from distinct-degree factorization."""
if f.degree == degree:
return [f]
g = None
while g is None:
g = equal_degree_split(f, degree)
q, r = div(f, g)
assert not r
return equal_degree_factor(g, degree) + equal_degree_factor(q, degree)
def factor(f):
"""Factorization of a univariate polynomial over a Galois field.
Returns a list of the leading coefficient of f and the monic
factors with their multiplicities.
"""
p = f.__class__.coeff_type.modulus
leading_coeff, f = f.monic()
one_poly = f.__class__({0: f.__class__.coeff_type(1)})
x_poly = f.__class__({1: f.__class__.coeff_type(1)})
h = x_poly
i = 0
result = [leading_coeff]
while f != one_poly:
i += 1
# One distinct-degree factorization step.
h = pow_mod(h, p, f) # h <- h**p mod f
g = gcd(h - x_poly, f)
if g != one_poly:
# Equal-degree factorization for degree i:
g_factors = equal_degree_factor(g, i)
# Now determine multiplicities of factors.
for gg in g_factors:
e = 0
q, r = div(f, gg)
while not r: # gg**e divides f
e += 1
f = q
q, r = div(f, gg)
result.append((gg, e))
return result
def factor_sqf(f):
"""Factorization of a univariate square-free polynomial over a Galois field.
Returns a list of the leading coefficient and the monic factors of f.
"""
one_poly = f.__class__({0: f.__class__.coeff_type(1)})
leading_coeff, f = f.monic()
result = [leading_coeff]
for degree, divisor in enumerate(distinct_degree_factor(f)):
if divisor == one_poly:
continue
result += equal_degree_factor(divisor, degree + 1)
return result
| {
"content_hash": "8427515fe25d495ab95116ae7dd6a9aa",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 80,
"avg_line_length": 28.96062992125984,
"alnum_prop": 0.5216150081566069,
"repo_name": "certik/sympy-oldcore",
"id": "60555186f9aeeb12f5c6920cb8a17155dde50c1a",
"size": "7356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sympy/polynomials/fast/gfpoly.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.apps import apps
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.db.models import Model
from django.db.models.base import ModelBase
from django.db.models.query import QuerySet
from django.shortcuts import get_object_or_404
from django.utils.functional import wraps
from guardian.compat import basestring
from guardian.exceptions import GuardianError
from guardian.utils import get_403_or_None
def permission_required(perm, lookup_variables=None, **kwargs):
"""
Decorator for views that checks whether a user has a particular permission
enabled.
Optionally, instances for which check should be made may be passed as an
second argument or as a tuple parameters same as those passed to
``get_object_or_404`` but must be provided as pairs of strings. This way
decorator can fetch i.e. ``User`` instance based on performed request and
check permissions on it (without this, one would need to fetch user instance
at view's logic and check permission inside a view).
:param login_url: if denied, user would be redirected to location set by
this parameter. Defaults to ``django.conf.settings.LOGIN_URL``.
:param redirect_field_name: name of the parameter passed if redirected.
Defaults to ``django.contrib.auth.REDIRECT_FIELD_NAME``.
:param return_403: if set to ``True`` then instead of redirecting to the
login page, response with status code 403 is returned (
``django.http.HttpResponseForbidden`` instance or rendered template -
see :setting:`GUARDIAN_RENDER_403`). Defaults to ``False``.
:param accept_global_perms: if set to ``True``, then *object level
permission* would be required **only if user does NOT have global
permission** for target *model*. If turned on, makes this decorator
like an extension over standard
``django.contrib.admin.decorators.permission_required`` as it would
check for global permissions first. Defaults to ``False``.
Examples::
@permission_required('auth.change_user', return_403=True)
def my_view(request):
return HttpResponse('Hello')
@permission_required('auth.change_user', (User, 'username', 'username'))
def my_view(request, username):
'''
auth.change_user permission would be checked based on given
'username'. If view's parameter would be named ``name``, we would
rather use following decorator::
@permission_required('auth.change_user', (User, 'username', 'name'))
'''
user = get_object_or_404(User, username=username)
return user.get_absolute_url()
@permission_required('auth.change_user',
(User, 'username', 'username', 'groups__name', 'group_name'))
def my_view(request, username, group_name):
'''
Similar to the above example, here however we also make sure that
one of user's group is named same as request's ``group_name`` param.
'''
user = get_object_or_404(User, username=username,
group__name=group_name)
return user.get_absolute_url()
"""
login_url = kwargs.pop('login_url', settings.LOGIN_URL)
redirect_field_name = kwargs.pop(
'redirect_field_name', REDIRECT_FIELD_NAME)
return_403 = kwargs.pop('return_403', False)
accept_global_perms = kwargs.pop('accept_global_perms', False)
# Check if perm is given as string in order not to decorate
# view function itself which makes debugging harder
if not isinstance(perm, basestring):
raise GuardianError("First argument must be in format: "
"'app_label.codename or a callable which return similar string'")
def decorator(view_func):
def _wrapped_view(request, *args, **kwargs):
# if more than one parameter is passed to the decorator we try to
# fetch object for which check would be made
obj = None
if lookup_variables:
model, lookups = lookup_variables[0], lookup_variables[1:]
# Parse model
if isinstance(model, basestring):
splitted = model.split('.')
if len(splitted) != 2:
raise GuardianError("If model should be looked up from "
"string it needs format: 'app_label.ModelClass'")
model = apps.get_model(*splitted)
elif issubclass(model.__class__, (Model, ModelBase, QuerySet)):
pass
else:
raise GuardianError("First lookup argument must always be "
"a model, string pointing at app/model or queryset. "
"Given: %s (type: %s)" % (model, type(model)))
# Parse lookups
if len(lookups) % 2 != 0:
raise GuardianError("Lookup variables must be provided "
"as pairs of lookup_string and view_arg")
lookup_dict = {}
for lookup, view_arg in zip(lookups[::2], lookups[1::2]):
if view_arg not in kwargs:
raise GuardianError("Argument %s was not passed "
"into view function" % view_arg)
lookup_dict[lookup] = kwargs[view_arg]
obj = get_object_or_404(model, **lookup_dict)
response = get_403_or_None(request, perms=[perm], obj=obj,
login_url=login_url, redirect_field_name=redirect_field_name,
return_403=return_403, accept_global_perms=accept_global_perms)
if response:
return response
return view_func(request, *args, **kwargs)
return wraps(view_func)(_wrapped_view)
return decorator
def permission_required_or_403(perm, *args, **kwargs):
"""
Simple wrapper for permission_required decorator.
Standard Django's permission_required decorator redirects user to login page
in case permission check failed. This decorator may be used to return
HttpResponseForbidden (status 403) instead of redirection.
The only difference between ``permission_required`` decorator is that this
one always set ``return_403`` parameter to ``True``.
"""
kwargs['return_403'] = True
return permission_required(perm, *args, **kwargs)
| {
"content_hash": "4e61eff580d0cff7b0ee967a7ffd252e",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 102,
"avg_line_length": 48.44927536231884,
"alnum_prop": 0.6172599461561472,
"repo_name": "benkonrath/django-guardian",
"id": "c9e3306e0fa11fbc90ece6cd0321c55cca8c1eb6",
"size": "6686",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "guardian/decorators.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "30857"
},
{
"name": "Python",
"bytes": "325903"
},
{
"name": "Shell",
"bytes": "269"
}
],
"symlink_target": ""
} |
import pytest
pytest.importorskip("ethosu.vela")
import tvm
from tvm import relay
from tvm.relay.testing import run_opt_pass
from tvm.relay.backend.contrib.ethosu.tir import spec
from tvm.relay.backend.contrib.ethosu.tir.compiler import lower_to_tir
from .infra import make_ethosu_identity, get_pooling_args
@pytest.mark.parametrize("ifm_shape", [[1, 5, 9, 3], [20, 14, 7], [31, 40], [101]])
def test_identity(ifm_shape):
ifm = relay.var("ifm", shape=ifm_shape, dtype="int8")
identity = make_ethosu_identity(ifm)
func = relay.Function(relay.analysis.free_vars(identity), identity)
func = run_opt_pass(func, relay.transform.InferType())
mod, _ = lower_to_tir(func)
data = []
def _visit(stmt):
if isinstance(stmt, tvm.tir.Call):
data.append(get_pooling_args(stmt))
# Construct the ifm shape that the initial ifm shape gets legalized into
ref_ifm_shape = ifm_shape
if len(ref_ifm_shape) < 4:
ref_ifm_shape = [1] + ref_ifm_shape
while len(ref_ifm_shape) < 4:
ref_ifm_shape.append(1)
tvm.tir.stmt_functor.post_order_visit(mod["main"].body, _visit)
ifm_stride_c = 1
ifm_stride_w = ref_ifm_shape[3]
ifm_stride_h = ref_ifm_shape[2] * ref_ifm_shape[3]
ofm_height = ref_ifm_shape[1]
ofm_width = ref_ifm_shape[2]
ofm_channels = ref_ifm_shape[3]
ofm_stride_c = 1
ofm_stride_w = ofm_channels if ofm_width > 1 else 1
ofm_stride_h = ofm_channels * ofm_width if ofm_height > 1 else 1
# The identity operator TIR gets converted into serial pooling
serial_pooling = spec.SerialPooling(
ifm=spec.SerialFeatureMap(
data_type="int8",
height=ref_ifm_shape[1],
width=ref_ifm_shape[2],
channels=ofm_channels,
tile_height_0=ref_ifm_shape[1],
tile_height_1=0,
tile_width_0=ref_ifm_shape[2],
tile_address_0=0,
tile_address_1=0,
tile_address_2=0,
tile_address_3=0,
scale=1.0,
zero_point=0,
layout="NHWC",
stride_h=ifm_stride_h,
stride_w=ifm_stride_w,
stride_c=ifm_stride_c,
),
ofm=spec.SerialFeatureMap(
data_type="int8",
height=ofm_height,
width=ofm_width,
channels=ofm_channels,
tile_height_0=ofm_height,
tile_height_1=0,
tile_width_0=ofm_width,
tile_address_0=0,
tile_address_1=0,
tile_address_2=0,
tile_address_3=0,
scale=1.0,
zero_point=0,
layout="NHWC",
stride_h=ofm_stride_h,
stride_w=ofm_stride_w,
stride_c=ofm_stride_c,
),
pooling_type="AVG",
pool_shape=spec.SerialKernel(1, 1, 1, 1, 1, 1),
padding=spec.SerialPadding(0, 0, 0, 0),
activation=spec.SerialActivation(op="NONE", clip_min=0, clip_max=0),
upscale="NONE",
rounding_mode="TFL",
)
assert data[0] == ["ethosu_identity"] + list(serial_pooling)
if __name__ == "__main__":
pytest.main([__file__])
| {
"content_hash": "da7ae7fcae164eee9a1481549bdbb7fc",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 83,
"avg_line_length": 32.06060606060606,
"alnum_prop": 0.5775047258979206,
"repo_name": "Laurawly/tvm-1",
"id": "1ce55c49ea96ea8b607d2ae26711a24efca2a0e8",
"size": "3959",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/python/contrib/test_ethosu/test_replace_identity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4093"
},
{
"name": "C",
"bytes": "351611"
},
{
"name": "C++",
"bytes": "11660999"
},
{
"name": "CMake",
"bytes": "228510"
},
{
"name": "Cuda",
"bytes": "16902"
},
{
"name": "Cython",
"bytes": "28979"
},
{
"name": "Go",
"bytes": "111527"
},
{
"name": "HTML",
"bytes": "2664"
},
{
"name": "Java",
"bytes": "199950"
},
{
"name": "JavaScript",
"bytes": "15305"
},
{
"name": "Makefile",
"bytes": "67149"
},
{
"name": "Objective-C",
"bytes": "24259"
},
{
"name": "Objective-C++",
"bytes": "87655"
},
{
"name": "Python",
"bytes": "16256580"
},
{
"name": "RenderScript",
"bytes": "1895"
},
{
"name": "Rust",
"bytes": "391076"
},
{
"name": "Shell",
"bytes": "228674"
},
{
"name": "TypeScript",
"bytes": "94385"
}
],
"symlink_target": ""
} |
from django.http import HttpResponse
def home(request):
return HttpResponse("Hello from django, try out <a href='/admin/'>/admin/</a>\n") | {
"content_hash": "d303b8a8f2f0f88f7b6424a9f4414a48",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 85,
"avg_line_length": 35.5,
"alnum_prop": 0.7183098591549296,
"repo_name": "Si-elegans/Web-based_GUI_Tools",
"id": "1a735e5f09e4022faf3dc26c1a9be44cb854c7be",
"size": "142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysite/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "183432"
},
{
"name": "HTML",
"bytes": "821815"
},
{
"name": "JavaScript",
"bytes": "5240621"
},
{
"name": "Python",
"bytes": "2130547"
}
],
"symlink_target": ""
} |
"""Tests for tensorflow.ops.math_ops."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class ReduceTest(test_util.TensorFlowTestCase):
def testReduceAllDims(self):
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
with test_util.device(use_gpu=True):
y_tf = self.evaluate(math_ops.reduce_sum(x))
self.assertEqual(y_tf, 21)
def testReduceExtendType(self):
in_f32 = np.random.randn(1000, 1000).astype(np.float32)
in_bf16 = math_ops.cast(in_f32, dtypes.bfloat16)
out_f32 = self.evaluate(math_ops.reduce_sum(in_f32))
out_bf16 = self.evaluate(math_ops.reduce_sum(in_bf16))
expected = math_ops.cast(out_f32, dtypes.bfloat16)
self.assertAllClose(out_bf16, expected, 1e-3)
def testReduceExplicitAxes(self):
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
with test_util.device(use_gpu=True):
for axis in (0, -2):
self.assertAllEqual(
self.evaluate(math_ops.reduce_sum(x, axis=axis)), [5, 7, 9])
for axis in (1, -1):
self.assertAllEqual(
self.evaluate(math_ops.reduce_sum(x, axis=axis)), [6, 15])
for axis in (None, (0, 1), (1, 0), (-1, 0), (0, -1), (-2, 1), (1, -2),
(-1, -2), (-2, -1)):
self.assertEqual(self.evaluate(math_ops.reduce_sum(x, axis=axis)), 21)
def testReduceInvalidAxis(self):
if context.executing_eagerly():
# The shape check is in run a graph construction time. In eager mode,
# it misses the check, magically return result given wrong shape.
return
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
axis = np.array([[0], [1]])
with self.assertRaisesRegex(ValueError, "must be at most rank 1"):
math_ops.reduce_sum(x, axis)
def testReduceVar(self):
x = np.array([[0, 0, 0], [0, 0, 0]], "float32")
self.assertAllClose(self.evaluate(math_ops.reduce_variance(x)), 0)
self.assertAllClose(
self.evaluate(math_ops.reduce_variance(x, axis=0)), [0, 0, 0])
x = [[1, 2, 1, 1], [1, 1, 0, 1]]
with self.assertRaisesRegex(TypeError, "must be either real or complex"):
math_ops.reduce_variance(x)
x = [[1., 2., 1., 1.], [1., 1., 0., 1.]]
self.assertEqual(self.evaluate(math_ops.reduce_variance(x)), 0.25)
x_np = np.array(x)
self.assertEqual(np.var(x_np), 0.25)
self.assertEqual(self.evaluate(math_ops.reduce_variance(x_np)), 0.25)
x = ragged_factory_ops.constant([[5., 1., 4., 1.], [], [5., 9., 2.], [5.],
[]])
self.assertAllClose(math_ops.reduce_variance(x, axis=0), [0., 16., 1., 0.])
def testReduceVarComplex(self):
# Ensure that complex values are handled to be consistent with numpy
complex_ys = [([0 - 1j, 0 + 1j], dtypes.float64),
(np.array([0 - 1j, 0 + 1j], "complex64"), dtypes.float32),
(np.array([0 - 1j, 0 + 1j], "complex128"), dtypes.float64)]
for y, dtype in complex_ys:
y_result = math_ops.reduce_variance(y)
self.assertEqual(np.var(y), 1.0)
self.assertEqual(self.evaluate(y_result), 1.0)
self.assertEqual(y_result.dtype, dtype)
def testReduceStd(self):
x = np.array([[0, 0, 0], [0, 0, 0]], "float32")
self.assertAllClose(self.evaluate(math_ops.reduce_std(x)), 0)
self.assertAllClose(
self.evaluate(math_ops.reduce_std(x, axis=0)), [0, 0, 0])
x = [[1, 2, 1, 1], [1, 1, 0, 1]]
with self.assertRaisesRegex(TypeError, "must be either real or complex"):
math_ops.reduce_std(x)
x = [[1., 2., 1., 1.], [1., 1., 0., 1.]]
self.assertEqual(self.evaluate(math_ops.reduce_std(x)), 0.5)
x_np = np.array(x)
self.assertEqual(np.std(x_np), 0.5)
self.assertEqual(self.evaluate(math_ops.reduce_std(x_np)), 0.5)
x = ragged_factory_ops.constant([[5., 1., 4., 1.], [], [5., 9., 2.], [5.],
[]])
self.assertAllClose(math_ops.reduce_std(x, axis=0), [0., 4., 1., 0.])
def testReduceStdComplex(self):
# Ensure that complex values are handled to be consistent with numpy
complex_ys = [([0 - 1j, 0 + 1j], dtypes.float64),
(np.array([0 - 1j, 0 + 1j], "complex64"), dtypes.float32),
(np.array([0 - 1j, 0 + 1j], "complex128"), dtypes.float64)]
for y, dtype in complex_ys:
y_result = math_ops.reduce_std(y)
self.assertEqual(np.std(y), 1.0)
self.assertEqual(self.evaluate(y_result), 1.0)
self.assertEqual(y_result.dtype, dtype)
@test_util.run_all_in_graph_and_eager_modes
class LogSumExpTest(test_util.TensorFlowTestCase):
def testReduceLogSumExp(self):
for dtype in [np.float16, np.float32, np.double]:
x_np = np.random.rand(5, 5).astype(dtype)
with test_util.use_gpu():
y_tf_np = math_ops.reduce_logsumexp(x_np)
y_np = np.log(np.sum(np.exp(x_np)))
self.assertAllClose(y_tf_np, y_np)
def testReductionIndices(self):
for dtype in [np.float16, np.float32, np.double]:
x_np = np.random.rand(5, 5).astype(dtype)
with test_util.use_gpu():
y_tf = math_ops.reduce_logsumexp(x_np, axis=[0])
y_np = np.log(np.sum(np.exp(x_np), axis=0))
self.assertShapeEqual(y_np, y_tf)
y_tf_np = self.evaluate(y_tf)
self.assertAllClose(y_tf_np, y_np)
def testReductionIndices2(self):
for dtype in [np.float16, np.float32, np.double]:
x_np = np.random.rand(5, 5).astype(dtype)
with test_util.use_gpu():
y_tf = math_ops.reduce_logsumexp(x_np, axis=0)
y_np = np.log(np.sum(np.exp(x_np), axis=0))
self.assertShapeEqual(y_np, y_tf)
y_tf_np = self.evaluate(y_tf)
self.assertAllClose(y_tf_np, y_np)
def testKeepDims(self):
for dtype in [np.float16, np.float32, np.double]:
x_np = np.random.rand(5, 5).astype(dtype)
with test_util.use_gpu():
y_tf_np = math_ops.reduce_logsumexp(x_np, keepdims=True)
self.assertEqual(y_tf_np.shape.rank, x_np.ndim)
y_np = np.log(np.sum(np.exp(x_np), keepdims=True))
self.assertAllClose(y_tf_np, y_np)
def testOverflow(self):
x = [1000, 1001, 1002, 1003]
for dtype in [np.float16, np.float32, np.double]:
x_np = np.array(x, dtype=dtype)
max_np = np.max(x_np)
with self.assertRaisesRegex(RuntimeWarning,
"overflow encountered in exp"):
out = np.log(np.sum(np.exp(x_np)))
if out == np.inf:
raise RuntimeWarning("overflow encountered in exp")
with test_util.use_gpu():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf_np = math_ops.reduce_logsumexp(x_tf)
y_np = np.log(np.sum(np.exp(x_np - max_np))) + max_np
self.assertAllClose(y_tf_np, y_np)
def testUnderflow(self):
x = [-1000, -1001, -1002, -1003]
for dtype in [np.float16, np.float32, np.double]:
x_np = np.array(x, dtype=dtype)
max_np = np.max(x_np)
with self.assertRaisesRegex(RuntimeWarning,
"divide by zero encountered in log"):
out = np.log(np.sum(np.exp(x_np)))
if out == -np.inf:
raise RuntimeWarning("divide by zero encountered in log")
with test_util.use_gpu():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf_np = math_ops.reduce_logsumexp(x_tf)
y_np = np.log(np.sum(np.exp(x_np - max_np))) + max_np
self.assertAllClose(y_tf_np, y_np)
def testInfinity(self):
with test_util.use_gpu():
res = math_ops.reduce_logsumexp(-np.inf)
self.assertEqual(-np.inf, self.evaluate(res))
def testRaggedTensor(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.double]:
x_rt = ragged_factory_ops.constant([[1, 2], [], [3, 4, 5]], dtype=dtype)
x_np = np.array(self.evaluate(x_rt.flat_values))
with test_util.use_gpu():
y_rt = math_ops.reduce_logsumexp(x_rt)
y_np = np.log(np.sum(np.exp(x_np - np.max(x_np)))) + np.max(x_np)
self.assertAllClose(y_rt, y_np)
@test_util.run_all_in_graph_and_eager_modes
class RoundTest(test_util.TensorFlowTestCase):
def testRounding(self):
x = np.arange(-5.0, 5.0, .25)
for dtype in [np.float32, np.double, np.int32]:
x_np = np.array(x, dtype=dtype)
with test_util.device(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.round(x_tf)
y_tf_np = self.evaluate(y_tf)
y_np = np.round(x_np)
self.assertAllClose(y_tf_np, y_np, atol=1e-2)
@test_util.with_eager_op_as_function
@test_util.run_all_in_graph_and_eager_modes
class MatMulTest(test_util.TensorFlowTestCase, parameterized.TestCase):
"""Test for matmul."""
SUPPORTED_DTYPES = [
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32,
dtypes.int64, dtypes.bfloat16, dtypes.complex64, dtypes.complex128
]
def testMatMul2D(self):
for dtype in self.SUPPORTED_DTYPES:
a = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype)
b = constant_op.constant([7, 8, 9, 10, 11, 12], shape=[3, 2], dtype=dtype)
c = math_ops.matmul(a, b)
c_np = constant_op.constant([[58, 64], [139, 154]],
shape=(2, 2),
dtype=dtype)
self.assertAllClose(c, c_np, atol=1e-2)
def testBatchMatMul(self):
for dtype in self.SUPPORTED_DTYPES:
a = constant_op.constant(np.arange(1, 13), shape=[2, 2, 3], dtype=dtype)
b = constant_op.constant(np.arange(13, 25), shape=[2, 3, 2], dtype=dtype)
c = math_ops.matmul(a, b)
c_np = constant_op.constant(
[[[94, 100], [229, 244]], [[508, 532], [697, 730]]],
shape=[2, 2, 2],
dtype=dtype)
self.assertAllClose(c, c_np, atol=1e-2)
def testUnsupportedtypeMatmul(self):
a = constant_op.constant(
np.arange(1, 13), shape=[2, 2, 3], dtype=dtypes.int8)
b = constant_op.constant(
np.arange(13, 25), shape=[2, 3, 2], dtype=dtypes.int8)
with self.assertRaisesRegex((TypeError, errors.InvalidArgumentError),
"list of allowed values:"):
math_ops.matmul(a, b)
@parameterized.parameters((dtypes.int8, dtypes.int8),
(dtypes.int8, dtypes.uint8),
(dtypes.uint8, dtypes.int8))
# TODO(shivaniagrawal): matmul (dtypes.uint8, dtypes.uint8) fails in xla_gpu.
def testInt8MatMul2D(self, a_dtype, b_dtype):
a = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=a_dtype)
b = constant_op.constant([7, 8, 9, 10, 11, 12], shape=[3, 2], dtype=b_dtype)
c = math_ops.matmul(a, b, output_type=dtypes.int32)
c_np = constant_op.constant([[58, 64], [139, 154]],
shape=(2, 2),
dtype=dtypes.int32)
self.assertAllClose(c, c_np)
@parameterized.parameters((dtypes.int8), (dtypes.uint8))
def testMixPrecMatMul2D(self, b_dtype):
a = constant_op.constant([1, 2, 3, 4, 5, 6],
shape=[2, 3],
dtype=dtypes.bfloat16)
b = constant_op.constant([7, 8, 9, 10, 11, 12], shape=[3, 2], dtype=b_dtype)
c = math_ops.matmul(a, b, output_type=dtypes.bfloat16)
c_np = constant_op.constant([[58, 64], [139, 154]],
shape=(2, 2),
dtype=dtypes.bfloat16)
self.assertAllClose(c, c_np, atol=1e-2)
@parameterized.parameters((dtypes.int8, dtypes.int8),
(dtypes.int8, dtypes.uint8),
(dtypes.uint8, dtypes.int8))
# TODO(shivaniagrawal): matmul (dtypes.uint8, dtypes.uint8) fails in xla_gpu.
def testInt8BatchMatmul(self, a_dtype, b_dtype):
a = constant_op.constant(np.arange(1, 13), shape=[2, 2, 3], dtype=a_dtype)
b = constant_op.constant(np.arange(13, 25), shape=[2, 3, 2], dtype=b_dtype)
c_np = constant_op.constant(
[[[94, 100], [229, 244]], [[508, 532], [697, 730]]],
shape=[2, 2, 2],
dtype=dtypes.int32)
c = math_ops.matmul(a, b, output_type=dtypes.int32)
self.assertAllEqual(c, c_np)
@parameterized.parameters((dtypes.int8), (dtypes.uint8))
def testMixPrecBatchMatmul(self, b_dtype):
a = constant_op.constant(
np.arange(1, 13), shape=[2, 2, 3], dtype=dtypes.bfloat16)
b = constant_op.constant(np.arange(13, 25), shape=[2, 3, 2], dtype=b_dtype)
c_np = constant_op.constant(
[[[94, 100], [229, 244]], [[508, 532], [697, 730]]],
shape=[2, 2, 2],
dtype=dtypes.bfloat16)
c = math_ops.matmul(a, b, output_type=dtypes.bfloat16)
self.assertAllClose(c, c_np, atol=1e-2)
def testInvalidOutputTypeMatmul(self):
for dtype in [dtypes.int8, dtypes.bfloat16]:
a = constant_op.constant(np.arange(1, 13), shape=[2, 2, 3], dtype=dtype)
b = constant_op.constant(
np.arange(13, 25), shape=[2, 3, 2], dtype=dtypes.int8)
if context.executing_eagerly():
if context.is_tfrt_enabled():
with self.assertRaisesRegex(errors.InvalidArgumentError,
"NodeDef expected inputs"):
math_ops.matmul(a, b, output_type=dtypes.float32)
else:
with self.assertRaisesRegex(errors.NotFoundError,
"Could not find device for node:"):
math_ops.matmul(a, b, output_type=dtypes.float32)
else:
with self.assertRaisesRegex(errors.InvalidArgumentError,
"No OpKernel was registered to support Op"):
self.evaluate(math_ops.matmul(a, b, output_type=dtypes.float32))
@test_util.run_all_in_graph_and_eager_modes
class ModTest(test_util.TensorFlowTestCase):
def testFloat(self):
x = [0.5, 0.7, 0.3]
for dtype in [np.float32, np.double]:
# Test scalar and vector versions.
for denom in [x[0], [x[0]] * 3]:
x_np = np.array(x, dtype=dtype)
with test_util.use_gpu():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.mod(x_tf, denom)
y_tf_np = self.evaluate(y_tf)
y_np = np.fmod(x_np, denom)
self.assertAllClose(y_tf_np, y_np, atol=1e-2)
def testFixed(self):
x = [5, 10, 23]
for dtype in [np.int32, np.int64]:
# Test scalar and vector versions.
for denom in [x[0], x]:
x_np = np.array(x, dtype=dtype)
with test_util.use_gpu():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.mod(x_tf, denom)
y_tf_np = self.evaluate(y_tf)
y_np = np.mod(x_np, denom)
self.assertAllClose(y_tf_np, y_np)
@test_util.run_all_in_graph_and_eager_modes
class SquaredDifferenceTest(test_util.TensorFlowTestCase):
def testSquaredDifference(self):
for dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype,
np.int32, np.int64
]:
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
y = np.array([-3, -2, -1], dtype=dtype)
z = (x - y) * (x - y)
with test_util.device(use_gpu=True):
z_tf = self.evaluate(math_ops.squared_difference(x, y))
self.assertAllClose(z, z_tf)
def testComplexSquaredDifference(self):
for dtype in [np.complex64, np.complex128]:
x = np.array([[1 + 3j, 2 + 2j, 3 + 1j], [4 - 1j, 5 - 2j, 6 - 3j]],
dtype=dtype)
y = np.array([-3 + 1j, -2 + 2j, -1 + 3j], dtype=dtype)
z = np.conj(x - y) * (x - y)
with test_util.device(use_gpu=False):
z_tf = self.evaluate(math_ops.squared_difference(x, y))
self.assertAllClose(z, z_tf)
@test_util.with_eager_op_as_function
@test_util.run_all_in_graph_and_eager_modes
class ApproximateEqualTest(test_util.TensorFlowTestCase):
def testApproximateEqual(self):
for dtype in [np.float32, np.double]:
x = dtype(1)
y = dtype(1.00009)
z = False
with test_util.device(use_gpu=True):
# Default tolerance is 0.00001
z_tf = self.evaluate(math_ops.approximate_equal(x, y))
self.assertAllEqual(z, z_tf)
for dtype in [np.float32, np.double]:
x = dtype(1)
y = dtype(1.000009)
z = True
with test_util.device(use_gpu=True):
# Default tolerance is 0.00001
z_tf = self.evaluate(math_ops.approximate_equal(x, y))
self.assertAllEqual(z, z_tf)
for dtype in [np.float32, np.double]:
x = np.array([[[[-1, 2.00009999], [-3, 4.01]]]], dtype=dtype)
y = np.array([[[[-1.001, 2], [-3.00009, 4]]]], dtype=dtype)
z = np.array([[[[False, True], [True, False]]]], dtype=np.bool_)
with test_util.device(use_gpu=True):
z_tf = self.evaluate(math_ops.approximate_equal(x, y, tolerance=0.0001))
self.assertAllEqual(z, z_tf)
def testApproximateEqualShape(self):
for dtype in [np.float32, np.double]:
x = np.array([1, 2], dtype=dtype)
y = np.array([[1, 2]], dtype=dtype)
# The inputs 'x' and 'y' must have the same shape.
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError),
"Shapes must be equal rank|must be of the same shape"):
math_ops.approximate_equal(x, y)
def testApproximateEqualShapeXla(self):
@def_function.function(jit_compile=True)
def approximate_equal(x, y):
return math_ops.approximate_equal(x, y)
for dtype in [np.float32, np.double]:
x = np.array([1, 2], dtype=dtype)
y = np.array([[1, 2]], dtype=dtype)
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError),
"Shapes must be equal rank|must be of the same shape"):
approximate_equal(x, y)
@test_util.run_all_in_graph_and_eager_modes
class ScalarMulTest(test_util.TensorFlowTestCase):
def testAcceptsRefs(self):
if context.executing_eagerly():
var = resource_variable_ops.ResourceVariable(10, name="var")
else:
var = variables.Variable(10)
result = math_ops.scalar_mul(3, var)
init = variables.global_variables_initializer()
with test_util.device(use_gpu=True):
self.evaluate(init)
self.assertEqual(30, self.evaluate(result))
def testAcceptsConstant(self):
const = constant_op.constant(10)
result = math_ops.scalar_mul(3, const)
with test_util.device(use_gpu=True):
self.assertEqual(30, self.evaluate(result))
def testAcceptsTensor(self):
tensor = array_ops.ones([10, 10])
result = math_ops.scalar_mul(3, tensor)
expected = array_ops.ones([10, 10]) * 3
with test_util.device(use_gpu=True):
self.assertAllEqual(self.evaluate(expected), self.evaluate(result))
def testAcceptsIndexedSlices(self):
values = constant_op.constant([2, 3, 5, 7, 0, -1], shape=[3, 2])
indices = constant_op.constant([0, 2, 5])
x = math_ops.scalar_mul(-3, indexed_slices.IndexedSlices(values, indices))
with test_util.device(use_gpu=True):
self.assertAllEqual(
self.evaluate(x.values), [[-6, -9], [-15, -21], [0, 3]])
self.assertAllEqual(self.evaluate(x.indices), [0, 2, 5])
@test_util.run_all_in_graph_and_eager_modes
class AddNTest(test_util.TensorFlowTestCase):
def testPartials(self):
"""Test that previously revealed a bug in buffer forwarding for AddN."""
partials = []
for _ in range(98):
partials.append(math_ops.add_n([constant_op.constant(1)]))
partials.append(
math_ops.add_n([constant_op.constant(1),
constant_op.constant(1)]))
res = math_ops.add_n(partials) + constant_op.constant(0)
with test_util.use_gpu():
self.assertAllEqual(res, 100)
def testFloat(self):
np.random.seed(12345)
for num_inputs in range(1, 10):
x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(num_inputs)]
tf_x = ops.convert_n_to_tensor(x)
with test_util.use_gpu():
self.assertAllClose(sum(x), math_ops.add_n(tf_x))
self.assertAllClose(x[0] * num_inputs,
math_ops.add_n([tf_x[0]] * num_inputs))
def testInt(self):
np.random.seed(54321)
for num_inputs in range(1, 10):
x = [
np.random.randint(-128, 128, (5, 4, 3, 2, 1))
for _ in range(num_inputs)
]
tf_x = ops.convert_n_to_tensor(x)
with test_util.use_gpu():
self.assertAllEqual(sum(x), math_ops.add_n(tf_x))
self.assertAllEqual(x[0] * num_inputs,
math_ops.add_n([tf_x[0]] * num_inputs))
def testGrad(self):
np.random.seed(42)
for num_inputs in range(1, 10):
with test_util.use_gpu():
input_vars = [
variables.Variable(10.0 * np.random.random())
for _ in range(0, num_inputs)
]
self.evaluate(variables.global_variables_initializer())
if context.executing_eagerly():
with backprop.GradientTape() as tape:
tape.watch(input_vars)
addn = math_ops.add_n(input_vars)
add_n_grad = tape.gradient(addn, input_vars)
else:
addn = math_ops.add_n(input_vars)
add_n_grad = gradients.gradients(addn, input_vars)
self.assertAllEqual(
np.repeat(1.0, num_inputs), # d/dx (x + y + ...) = 1
[self.evaluate(g) for g in add_n_grad])
def testIndexedSlices(self):
slc = indexed_slices.IndexedSlices(
array_ops.constant([1, 2], shape=[1, 2]), array_ops.constant([1]),
array_ops.constant([2, 2]))
slc_as_dense = np.array([[0, 0], [1, 2]])
with test_util.use_gpu():
# add_n currently always converts IndexedSlices to dense
self.assertAllEqual(slc_as_dense, math_ops.add_n([slc]))
self.assertAllEqual(2 * slc_as_dense, math_ops.add_n([slc, slc]))
def test_iterable(self):
"""Test that add_n supports iterables (e.g. generators and dict values)."""
def fn():
yield 1
yield 2
values_dict = {"a": 1, "b": 2}
with test_util.use_gpu():
self.assertAllEqual(3, math_ops.add_n(fn()))
self.assertAllEqual(3, math_ops.add_n(values_dict.values()))
@test_util.run_all_in_graph_and_eager_modes
class DivAndModTest(test_util.TensorFlowTestCase):
# TODO(aselle): Test more types before exposing new division operators.
def intTestData(self):
nums = np.arange(-10, 10, 1).reshape(20, 1)
divs = np.arange(-3, 4, 2).reshape(1, 4)
return nums, divs
def floatTestData(self):
nums = np.arange(-10, 10, .25).reshape(80, 1)
divs = np.arange(-3, 0, .25).reshape(1, 12)
return nums, divs
def numpySafeFloorDivInt(self, x, y):
z = x // y
# Numpy produces 0 for INT_MIN/-1, but we expect an overflow to INT_MIN
# so that (INT_MIN/-1) + (INT_MIN % -1) = INT_MIN + 0 = INT_MIN.
z[(x == np.iinfo(x.dtype).min) & (y == -1)] = np.iinfo(x.dtype).min
return z
def numpySafeFloorModInt(self, x, y):
# Numpy crashes with a FPE for INT_MIN % -1.
z = self.numpySafeFloorDivInt(x, y)
return x - z * y
def numpySafeTruncateDivInt(self, x, y):
z = self.numpySafeFloorDivInt(x, y)
# Round up if non-zero remainder and inputs have opposite signs.
z[(x != z * y) & ((x < 0) != (y < 0))] += 1
return z
def numpySafeTruncateModInt(self, x, y):
# Numpy crashes with a FPE for INT_MIN % -1.
z = self.numpySafeTruncateDivInt(x, y)
return x - z * y
def testFloorModInt(self):
nums, divs = self.intTestData()
for dtype in [np.int32, np.int64]:
x = nums.astype(dtype)
y = divs.astype(dtype)
tf_result = math_ops.floormod(x, y)
np_result = self.numpySafeFloorModInt(x, y)
self.assertAllEqual(tf_result, np_result)
tf2_result = (array_ops.constant(x) % array_ops.constant(y))
self.assertAllEqual(tf2_result, tf_result)
def testFloorModFloat(self):
nums, divs = self.floatTestData()
for dtype in [np.float16, np.float32, np.float64]:
x = nums.astype(dtype)
y = divs.astype(dtype)
tf_result = math_ops.floormod(x, y)
np_result = x % y
self.assertAllEqual(tf_result, np_result)
tf2_result = (array_ops.constant(x) % array_ops.constant(y))
self.assertAllEqual(tf2_result, tf_result)
def testFloorModBfloat16(self):
nums, divs = self.floatTestData()
tf_result = math_ops.floormod(
math_ops.cast(nums, dtypes.bfloat16),
math_ops.cast(divs, dtypes.bfloat16))
np_result = nums % divs
self.assertAllEqual(tf_result, np_result)
def testTruncateModInt(self):
nums, divs = self.intTestData()
tf_result = math_ops.truncatemod(nums, divs)
np_result = np.fmod(nums, divs)
self.assertAllEqual(tf_result, np_result)
def testTruncateModFloat(self):
nums, divs = self.floatTestData()
tf_result = math_ops.truncatemod(nums, divs)
np_result = np.fmod(nums, divs)
self.assertAllEqual(tf_result, np_result)
def testFloorDivideInt(self):
nums, divs = self.intTestData()
tf_result = math_ops.floor_div(nums, divs)
np_result = self.numpySafeFloorDivInt(nums, divs)
self.assertAllEqual(tf_result, np_result)
tf2_result = (array_ops.constant(nums) // array_ops.constant(divs))
self.assertAllEqual(tf2_result, tf_result)
def testTruncateDivideInt(self):
nums, divs = self.intTestData()
tf_result = math_ops.truncatediv(nums, divs)
np_result = self.numpySafeTruncateDivInt(nums, divs)
self.assertAllEqual(tf_result, np_result)
@test_util.deprecated_graph_mode_only
def testDivideName(self):
op = math_ops.divide(
array_ops.constant(3), array_ops.constant(4), name="my_cool_divide")
self.assertEqual(op.name, "my_cool_divide:0")
def testRealDiv(self):
nums, divs = self.floatTestData()
tf_result = math_ops.realdiv(nums, divs)
np_result = np.divide(nums, divs)
self.assertAllClose(tf_result, np_result)
def testDivideType(self):
a = array_ops.constant([2], dtype=dtypes.int32)
# Since __future__.division is effect, we should always upgrade to float64
b = math_ops.divide(a, 1)
self.assertEqual(b.dtype, dtypes.float64)
self.assertEqual(2.0, self.evaluate(b))
c = math_ops.divide(a, 4)
self.assertEqual(c.dtype, dtypes.float64)
self.assertEqual(0.5, self.evaluate(c))
def testComplexDiv(self):
foo = array_ops.constant([1. + 3.j])
_ = math_ops.divide(foo, 1.)
_ = math_ops.div(foo, 2.)
def testFloorDivGrad(self):
a = variables.Variable(2.)
b = variables.Variable(4.)
input_vars = [a, b]
self.evaluate(variables.global_variables_initializer())
if context.executing_eagerly():
# TDOO(rmlarsen): Is there a more compact way of
# writing this for multiple expressions?
with backprop.GradientTape() as tape:
tape.watch(input_vars)
c_grad0 = tape.gradient(math_ops.divide(a, b), input_vars)
with backprop.GradientTape() as tape:
tape.watch(input_vars)
c_grad1 = tape.gradient(math_ops.div(a, b), input_vars)
with backprop.GradientTape() as tape:
tape.watch(input_vars)
c_grad2 = tape.gradient(math_ops.floordiv(a, b), input_vars)
else:
c_grad0 = gradients.gradients(math_ops.divide(a, b), input_vars)
c_grad1 = gradients.gradients(math_ops.div(a, b), input_vars)
c_grad2 = gradients.gradients(math_ops.floordiv(a, b), input_vars)
self.assertAllEqual([self.evaluate(x) for x in c_grad0], [.25, -.125])
self.assertAllEqual([self.evaluate(x) for x in c_grad1], [.25, -.125])
self.assertAllEqual(
[None if x is None else self.evaluate(x) for x in c_grad2],
[None, None])
def testConsistent(self):
nums, divs = self.intTestData()
tf_result = (
math_ops.floor_div(nums, divs) * divs + math_ops.floormod(nums, divs))
tf_nums = array_ops.constant(nums)
tf_divs = array_ops.constant(divs)
tf2_result = (tf_nums // tf_divs * tf_divs + tf_nums % tf_divs)
np_result = (nums // divs) * divs + (nums % divs)
# Consistent with numpy
self.assertAllEqual(tf_result, np_result)
# Consistent with two forms of divide
self.assertAllEqual(tf_result, tf2_result)
# consistency for truncation form
tf3_result = (
math_ops.truncatediv(nums, divs) * divs +
math_ops.truncatemod(nums, divs))
expanded_nums = np.reshape(
np.tile(nums, divs.shape[1]), (nums.shape[0], divs.shape[1]))
# Consistent with desire to get numerator
self.assertAllEqual(tf3_result, expanded_nums)
# Consistent with desire to get numerator
self.assertAllEqual(tf_result, expanded_nums)
def testWithPythonValue(self):
# Test case for https://github.com/tensorflow/tensorflow/issues/39475
x = math_ops.divide(5, 2)
self.assertIsInstance(x, ops.Tensor)
x = math_ops.divide(5, array_ops.constant(2.0))
self.assertIsInstance(x, ops.Tensor)
def intEdgeTestData(self, dtype):
"""Edge-case test data for integer types."""
# INT_MIN/-1 expected to produce signed-integer overflow,
# INT_MIN/INT_MAX expected to work.
nums = np.array([np.iinfo(dtype).min, -1, 1,
np.iinfo(dtype).max],
dtype=dtype).reshape([4, 1])
divs = nums.reshape([1, 4])
return nums, divs
@test_util.disable_asan("Expected signed integer overflow.")
@test_util.disable_ubsan("Expected signed integer overflow.")
def testFloorDivModIntEdges(self):
for dtype in [np.int32, np.int64]:
x, y = self.intEdgeTestData(dtype)
tf_floor_div = math_ops.floor_div(x, y)
np_floor_div = self.numpySafeFloorDivInt(x, y)
self.assertAllEqual(tf_floor_div, np_floor_div)
tf_floor_mod = math_ops.floormod(x, y)
np_floor_mod = self.numpySafeFloorModInt(x, y)
self.assertAllEqual(tf_floor_mod, np_floor_mod)
z = math_ops.add(math_ops.multiply(tf_floor_div, y), tf_floor_mod)
# x = floor_div(x, y) * y + floor_mod(x, y)
self.assertAllEqual(z, np.broadcast_to(x, z.shape))
@test_util.disable_asan("Expected signed integer overflow.")
@test_util.disable_ubsan("Expected signed integer overflow.")
def testTruncateDivModIntEdges(self):
for dtype in [np.int32, np.int64]:
x, y = self.intEdgeTestData(dtype)
tf_truncate_div = math_ops.truncatediv(x, y)
np_truncate_div = self.numpySafeTruncateDivInt(x, y)
self.assertAllEqual(tf_truncate_div, np_truncate_div)
tf_truncate_mod = math_ops.truncatemod(x, y)
np_truncate_mod = self.numpySafeTruncateModInt(x, y)
self.assertAllEqual(tf_truncate_mod, np_truncate_mod)
z = math_ops.add(math_ops.multiply(tf_truncate_div, y), tf_truncate_mod)
# x = truncatediv(x, y) * y + truncatemod(x, y)
self.assertAllEqual(z, np.broadcast_to(x, z.shape))
@test_util.run_all_in_graph_and_eager_modes
class DivNoNanTest(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.parameters((dtypes.bfloat16), (dtypes.float16),
(dtypes.float32), (dtypes.float64),
(dtypes.complex64), (dtypes.complex128))
def testBasic(self, dtype):
nums = np.arange(-10, 10, .25).reshape(80, 1)
divs = np.arange(-3, 3, .25).reshape(1, 24)
tf_nums = constant_op.constant(nums, dtype=dtype)
tf_divs = constant_op.constant(divs, dtype=dtype)
# Use tf versions for expected value to ensure inputs are identical
# (e.g. in the case of bfloat16).
np_nums = self.evaluate(tf_nums)
np_divs = self.evaluate(tf_divs)
np_result = np.true_divide(np_nums, np_divs)
np_result[:, np_divs[0] == 0] = 0
with test_util.use_gpu():
tf_result = math_ops.div_no_nan(tf_nums, tf_divs)
self.assertAllCloseAccordingToType(tf_result, np_result)
@parameterized.parameters((dtypes.bfloat16), (dtypes.float16),
(dtypes.float32), (dtypes.float64),
(dtypes.complex64), (dtypes.complex128))
def testSmall(self, dtype):
# Choose values whose squared magnitude underflows to zero/subnormal.
zero = constant_op.constant([0, 0, 0, 0], dtype=dtype)
divs = constant_op.constant([1e-25, -1e-20, 1e-165, -1e-160], dtype=dtype)
tf_result = math_ops.div_no_nan(zero, divs)
# Results should always be exactly zero.
self.assertAllEqual(tf_result, zero)
@parameterized.parameters((dtypes.bfloat16), (dtypes.float16),
(dtypes.float32), (dtypes.float64),
(dtypes.complex64), (dtypes.complex128))
def testNonFiniteInNumerator(self, dtype):
nums = constant_op.constant([np.nan, np.inf, np.NINF], dtype=dtype)
zeros = constant_op.constant([0, 0, 0], dtype=dtype)
ones = constant_op.constant([1, 1, 1], dtype=dtype)
with test_util.use_gpu():
tf_result_zeros = math_ops.div_no_nan(nums, zeros)
self.assertAllEqual([0, 0, 0], tf_result_zeros)
tf_result_ones = math_ops.div_no_nan(nums, ones)
self.assertAllEqual(nums / ones, tf_result_ones)
@test_util.run_all_in_graph_and_eager_modes
class MultiplyNoNanTest(test_util.TensorFlowTestCase):
def testBasic(self):
for dtype in [np.float32, np.float64]:
values = [0, 1, np.nan, np.inf, np.NINF]
x = constant_op.constant(values, dtype=dtype)
zeros = constant_op.constant(np.zeros((5,)), dtype=dtype)
ones = constant_op.constant(np.ones((5,)), dtype=dtype)
with test_util.use_gpu():
tf_result_zeros = math_ops.multiply_no_nan(x, zeros)
self.assertAllEqual(tf_result_zeros, zeros)
tf_result_ones = math_ops.multiply_no_nan(x, ones)
self.assertAllEqual(tf_result_ones, x)
# Normal floating point arithmetic if nonfinite values are in the
# second argument.
tf_result_reverseargs = math_ops.multiply_no_nan(zeros, x)
self.assertAllEqual(zeros * x, tf_result_reverseargs)
@test_util.run_all_in_graph_and_eager_modes
class XlogyTest(test_util.TensorFlowTestCase):
def testXlogyNoZero(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant([[0.1, 0.2, 3.5], [-2., -5., 30.]], dtype=dtype)
y = constant_op.constant([[0.1, 0.2, 3.5], [3.1, 4., 2.]], dtype=dtype)
with test_util.use_gpu():
xlogy = self.evaluate(math_ops.xlogy(x, y))
xtimeslogy = self.evaluate(x * math_ops.log(y))
self.assertAllClose(xlogy, xtimeslogy)
def testXlogyWithZero(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(np.zeros((2, 3)), dtype=dtype)
y = constant_op.constant([[0.1, 0.2, 3.5], [0., 1., 2.]], dtype=dtype)
with test_util.use_gpu():
xlogy_tf_np = self.evaluate(math_ops.xlogy(x, y))
zeros_np = self.evaluate(array_ops.zeros_like(y))
self.assertAllClose(xlogy_tf_np, zeros_np)
def testXlogyWithZeroBroadcast(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant([[0.], [1.]], dtype=dtype)
y = constant_op.constant([[0.1, 0.2, 3.5], [0., 1., 2.]], dtype=dtype)
with test_util.use_gpu():
xlogy_tf_np = self.evaluate(math_ops.xlogy(x, y))
zeros_np = self.evaluate(array_ops.zeros_like(y[0]))
xtimes_logy = self.evaluate(math_ops.log(y[1]))
self.assertAllClose(zeros_np, xlogy_tf_np[0])
self.assertAllClose(xtimes_logy, xlogy_tf_np[1])
@test_util.run_all_in_graph_and_eager_modes
class Xlog1pyTest(test_util.TensorFlowTestCase):
def testXlog1pyNoNeg1(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant([[0.1, 0.2, 3.5], [-2., -5., 30.]], dtype=dtype)
y = constant_op.constant([[-0.1, -0.2, 3.5], [3.1, -0.9, 2.]],
dtype=dtype)
with test_util.use_gpu():
xlog1py = self.evaluate(math_ops.xlog1py(x, y))
xtimeslog1py = self.evaluate(x * math_ops.log1p(y))
self.assertAllClose(xlog1py, xtimeslog1py)
def testXlog1pyWithNegOne(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(np.zeros((2, 3)), dtype=dtype)
y = constant_op.constant([[0.1, 0.2, 3.5], [-1., 1., 2.]], dtype=dtype)
with test_util.use_gpu():
xlog1py_tf_np = self.evaluate(math_ops.xlog1py(x, y))
zeros_np = self.evaluate(array_ops.zeros_like(y))
self.assertAllClose(xlog1py_tf_np, zeros_np)
def testXlog1pyWithZeroBroadcast(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant([[0.], [1.]], dtype=dtype)
y = constant_op.constant([[-0.1, -0.2, -1.], [0., 1., 2.]], dtype=dtype)
with test_util.use_gpu():
xlog1py_tf_np = self.evaluate(math_ops.xlog1py(x, y))
zeros_np = self.evaluate(array_ops.zeros_like(y[0]))
xtimes_log1py = self.evaluate(math_ops.log1p(y[1]))
self.assertAllClose(zeros_np, xlog1py_tf_np[0])
self.assertAllClose(xtimes_log1py, xlog1py_tf_np[1])
@test_util.run_all_in_graph_and_eager_modes
class XdivyTest(test_util.TensorFlowTestCase):
def testXdivyNoZero(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant([[0.1, 0.2, 3.5], [-2., -5., 30.]], dtype=dtype)
y = constant_op.constant([[0.1, 0.2, 3.5], [3.1, 4., 2.]], dtype=dtype)
with test_util.use_gpu():
xdivy = self.evaluate(math_ops.xdivy(x, y))
x_over_y = self.evaluate(x / y)
self.assertAllClose(xdivy, x_over_y)
def testXdivyWithZero(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(np.zeros((2, 3)), dtype=dtype)
y = constant_op.constant([[0.1, 0.2, 3.5], [0., 1., 2.]], dtype=dtype)
with test_util.use_gpu():
xdivy_tf_np = self.evaluate(math_ops.xdivy(x, y))
zeros_np = self.evaluate(array_ops.zeros_like(y))
self.assertAllClose(xdivy_tf_np, zeros_np)
def testXdivyWithZeroBroadcast(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant([[0.], [1.]], dtype=dtype)
y = constant_op.constant([[0.1, 0.2, 3.5], [0., 1., 2.]], dtype=dtype)
with test_util.use_gpu():
xdivy_tf_np = self.evaluate(math_ops.xdivy(x, y))
zeros_np = self.evaluate(array_ops.zeros_like(y[0]))
x_over_y = self.evaluate(1 / y[1])
self.assertAllClose(zeros_np, xdivy_tf_np[0])
self.assertAllClose(x_over_y, xdivy_tf_np[1])
@test_util.run_all_in_graph_and_eager_modes
class NextAfterTest(test_util.TensorFlowTestCase):
# Basic NextAfter tests that replicate numpy nextafter tests.
def testBasic(self):
for dtype in [dtypes.float32, dtypes.float64]:
one = constant_op.constant([1], dtype=dtype)
two = constant_op.constant([2], dtype=dtype)
zero = constant_op.constant([0], dtype=dtype)
nan = constant_op.constant([np.nan], dtype=dtype)
eps = constant_op.constant([np.finfo(dtype.as_numpy_dtype).eps],
dtype=dtype)
self.assertAllEqual(math_ops.nextafter(one, two) - one, eps)
self.assertAllLess(math_ops.nextafter(one, zero) - one, 0)
self.assertAllEqual(math_ops.is_nan(math_ops.nextafter(nan, one)), [True])
self.assertAllEqual(math_ops.is_nan(math_ops.nextafter(one, nan)), [True])
self.assertAllEqual(math_ops.nextafter(one, one), one)
def testBroadcasting(self):
for dtype in [dtypes.float32, dtypes.float64]:
one = constant_op.constant([1, 1], dtype=dtype)
two = constant_op.constant([2], dtype=dtype)
eps = np.finfo(dtype.as_numpy_dtype).eps
eps_const = constant_op.constant([eps, eps], dtype=dtype)
self.assertAllEqual(math_ops.nextafter(one, two) - one, eps_const)
@test_util.run_all_in_graph_and_eager_modes
class BinaryOpsTest(test_util.TensorFlowTestCase):
def testErrorReceivedIfDtypeMismatchFromOp(self):
if context.executing_eagerly():
error = errors_impl.InvalidArgumentError
error_message = (
r"cannot compute Add(V2)? as input #1\(zero-based\) was expected to "
r"be a int32 tensor but is a float tensor \[Op:Add(V2)?\]")
else:
error = TypeError
error_message = (
"Input 'y' of 'Add(V2)?' Op has type float32 that does not "
"match type int32 of argument 'x'.")
with self.assertRaisesRegex(error, error_message):
a = array_ops.ones([1], dtype=dtypes.int32) + 1.0
self.evaluate(a)
def testRHSDispatchingAndErrorRaising(self):
if context.executing_eagerly():
error = ValueError
error_message = (
r"Attempt to convert a value .* with an unsupported type")
else:
error = TypeError
error_message = (r"Failed to convert elements of .* to Tensor")
class RHSReturnsTrue:
def __radd__(self, other):
return True
a = array_ops.ones([1], dtype=dtypes.int32) + RHSReturnsTrue()
self.assertEqual(a, True)
class RHSRaisesError:
def __radd__(self, other):
raise TypeError("RHS not implemented")
with self.assertRaisesRegex(error, error_message):
a = array_ops.ones([1], dtype=dtypes.int32) + RHSRaisesError()
self.evaluate(a)
class RHSReturnsNotImplemented:
def __radd__(self, other):
return NotImplemented
with self.assertRaisesRegex(error, error_message):
a = array_ops.ones([1], dtype=dtypes.int32) + RHSReturnsNotImplemented()
self.evaluate(a)
class RHSNotImplemented:
pass
with self.assertRaisesRegex(error, error_message):
a = array_ops.ones([1], dtype=dtypes.int32) + RHSNotImplemented()
self.evaluate(a)
class SignTest(test_util.TensorFlowTestCase):
def test_complex_sign_gradient(self):
with context.eager_mode():
x = math_ops.complex(1., 1.)
with backprop.GradientTape() as t:
t.watch(x)
y = math_ops.sign(x)
self.assertAllClose(
t.gradient(y, x), math_ops.complex(0.353553, -0.353553))
@test_util.run_all_in_graph_and_eager_modes
class ReciprocalNoNanTest(test_util.TensorFlowTestCase):
allowed_dtypes = [
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.complex64,
dtypes.complex128
]
def testBasic(self):
for dtype in self.allowed_dtypes:
x = constant_op.constant([1.0, 2.0, 0.0, 4.0], dtype=dtype)
y = math_ops.reciprocal_no_nan(x)
target = constant_op.constant([1.0, 0.5, 0.0, 0.25], dtype=dtype)
self.assertAllEqual(y, target)
self.assertEqual(y.dtype.base_dtype, target.dtype.base_dtype)
def testInverse(self):
for dtype in self.allowed_dtypes:
x = np.random.choice([0, 1, 2, 4, 5], size=(5, 5, 5))
x = constant_op.constant(x, dtype=dtype)
y = math_ops.reciprocal_no_nan(math_ops.reciprocal_no_nan(x))
self.assertAllClose(y, x)
self.assertEqual(y.dtype.base_dtype, x.dtype.base_dtype)
class EqualityTest(test_util.TensorFlowTestCase, parameterized.TestCase):
@test_util.run_all_in_graph_and_eager_modes
def testEqualityNone(self):
x = constant_op.constant([1.0, 2.0, 0.0, 4.0], dtype=dtypes.float32)
self.assertNotEqual(x, None)
self.assertNotEqual(None, x)
self.assertFalse(math_ops.tensor_equals(x, None))
self.assertTrue(math_ops.tensor_not_equals(x, None))
@parameterized.named_parameters(
(f"-is_equals={is_equals}-float_literal_type={type(float_literal)}" # pylint: disable=g-complex-comprehension
f"-float_literal={float_literal}", is_equals, float_literal)
for float_literal in [4.6, np.float32(4.6), 4.4, np.float32(4.4)]
for is_equals in [True, False])
def testEqualityNoDowncast(self, is_equals, float_literal):
if (tf2.enabled() and isinstance(float_literal, np.float32) or
not tf2.enabled() and isinstance(float_literal, float)):
# TODO(b/199262800): Remove this skip
self.skipTest("There is a bug in type promotion.")
if is_equals:
op = math_ops.tensor_equals
else:
op = math_ops.tensor_not_equals
x = constant_op.constant(4)
try:
result = op(x, float_literal)
if isinstance(result, ops.Tensor):
result = self.evaluate(result)
except TypeError:
# Throwing a TypeError is OK
return
self.assertEqual(result, not is_equals)
@test_util.run_all_in_graph_and_eager_modes
class RangeTest(test_util.TensorFlowTestCase):
def testConvertToTensorRange(self):
values = range(5)
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((5,), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(tensor))
@test_util.run_all_in_graph_and_eager_modes
class ErfcinvTest(test_util.TensorFlowTestCase):
def testErfcinv(self):
values = np.random.uniform(0.1, 1.9, size=int(1e4)).astype(np.float32)
approx_id = math_ops.erfc(math_ops.erfcinv(values))
self.assertAllClose(values, self.evaluate(approx_id))
@test_util.run_all_in_graph_and_eager_modes
class ArgMaxMinTest(test_util.TensorFlowTestCase):
def _generateRandomTensor(self, dtype, shape):
if dtype.is_integer:
array = np.random.default_rng().integers(
low=dtype.min, high=dtype.max, size=shape, endpoint=True)
return constant_op.constant(array, dtype=dtype)
else:
array = np.random.default_rng().uniform(low=-1.0, high=1.0, size=shape)
return constant_op.constant(array, dtype=dtype)
def _getValidDtypes(self):
return (dtypes.bfloat16, dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.int32, dtypes.int64)
def testArgMax(self):
shape = (24, 8)
for dtype in self._getValidDtypes():
tf_values = self._generateRandomTensor(dtype, shape)
np_values = self.evaluate(tf_values)
for axis in range(0, len(shape)):
np_max = np.argmax(np_values, axis=axis)
tf_max = math_ops.argmax(tf_values, axis=axis)
self.assertAllEqual(tf_max, np_max)
def testArgMaxReturnsFirstOccurence(self):
for dtype in self._getValidDtypes():
values = constant_op.constant(
[[10, 11, 15, 15, 10], [12, 12, 10, 10, 12]], dtype=dtype)
self.assertAllEqual(
math_ops.argmax(values, axis=1),
np.argmax(self.evaluate(values), axis=1))
# Long tensor to ensure works with multithreading/GPU
values = array_ops.zeros(shape=(193681,), dtype=dtype)
self.assertAllEqual(math_ops.argmax(values), 0)
def testArgMaxUint16(self):
shape = (24, 8)
for dtype in self._getValidDtypes():
tf_values = self._generateRandomTensor(dtype, shape)
np_values = self.evaluate(tf_values)
for axis in range(0, len(shape)):
np_max = np.argmax(np_values, axis=axis)
tf_max = math_ops.argmax(
tf_values, axis=axis, output_type=dtypes.uint16)
self.assertAllEqual(tf_max, np_max)
def testArgMin(self):
shape = (24, 8)
for dtype in self._getValidDtypes():
tf_values = self._generateRandomTensor(dtype, shape)
np_values = self.evaluate(tf_values)
for axis in range(0, len(shape)):
np_min = np.argmin(np_values, axis=axis)
tf_min = math_ops.argmin(tf_values, axis=axis)
self.assertAllEqual(tf_min, np_min)
def testArgMinReturnsFirstOccurence(self):
for dtype in self._getValidDtypes():
values = constant_op.constant(
[[10, 11, 15, 15, 10], [12, 12, 10, 10, 12]], dtype=dtype)
self.assertAllEqual(
math_ops.argmin(values, axis=1),
np.argmin(self.evaluate(values), axis=1))
# Long tensor to ensure works with multithreading/GPU
values = array_ops.zeros(shape=(193681,), dtype=dtype)
self.assertAllEqual(math_ops.argmin(values), 0)
if __name__ == "__main__":
googletest.main()
| {
"content_hash": "18aa9a13621d08e49d6fb4499de8a69d",
"timestamp": "",
"source": "github",
"line_count": 1231,
"max_line_length": 116,
"avg_line_length": 39.588952071486595,
"alnum_prop": 0.6287807280338162,
"repo_name": "Intel-Corporation/tensorflow",
"id": "5e871a6904ebef24d1bb3a7804b77ef8528c0433",
"size": "49423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/math_ops_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "183416"
},
{
"name": "C++",
"bytes": "24549804"
},
{
"name": "CMake",
"bytes": "160888"
},
{
"name": "Go",
"bytes": "849081"
},
{
"name": "HTML",
"bytes": "681293"
},
{
"name": "Java",
"bytes": "307123"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37393"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64142"
},
{
"name": "Protocol Buffer",
"bytes": "218430"
},
{
"name": "Python",
"bytes": "21875003"
},
{
"name": "Shell",
"bytes": "337846"
},
{
"name": "TypeScript",
"bytes": "849555"
}
],
"symlink_target": ""
} |
import sys
sys.path.append("..")
from hackathon import Component, RequiredFeature
from flask import g
from hackathon.database.models import UserHackathonRel, Experiment, UserProfile
from hackathon.hackathon_response import bad_request, precondition_failed, internal_server_error, not_found, ok
from hackathon.constants import EStatus, RGStatus, ReservedUser
import json
class RegisterManager(Component):
hackathon_manager = RequiredFeature("hackathon_manager")
user_manager = RequiredFeature("user_manager")
def get_hackathon_registration(self, num=None):
registers = self.db.find_all_objects_order_by(UserHackathonRel,
num, # limit num
UserHackathonRel.create_time.desc(),
hackathon_id=g.hackathon.id)
return map(lambda x: self.get_registration_with_profile(x), registers)
def get_registration_with_profile(self, register):
register_dic = register.dic()
register_dic['user'] = self.user_manager.user_display_info(register.user)
return register_dic
def get_registration_by_id(self, id):
return self.db.get_object(UserHackathonRel, id)
def get_registration_by_user_and_hackathon(self, user_id, hackathon_id):
return self.db.find_first_object_by(UserHackathonRel, user_id=user_id, hackathon_id=hackathon_id)
def check_register_enrollment(self, hackathon):
max = int(json.loads(hackathon.basic_info)['max_enrollment'])
if max == 0: # means no limit
return True
else:
current_num = self.db.count(UserHackathonRel, UserHackathonRel.hackathon_id == hackathon.id)
return max > current_num
def validate_created_args(self, hackathon, args):
self.log.debug("create_register: %r" % args)
user_id = args['user_id']
register = self.get_registration_by_user_and_hackathon(user_id, hackathon.id)
if register is not None and register.deleted == 0:
self.log.debug("user %d already registered on hackathon %d" % (user_id, hackathon.id))
return False, register.dic()
if hackathon.registration_start_time > self.util.get_now():
return False, precondition_failed("hackathon registration not opened", friendly_message="报名尚未开始")
if hackathon.registration_end_time < self.util.get_now():
return False, precondition_failed("hackathon registration has ended", friendly_message="报名已经结束")
if not self.check_register_enrollment(hackathon):
return False, precondition_failed("hackathon registers reach the upper threshold",
friendly_message="报名人数已满")
return True, ""
def create_registration(self, hackathon, args):
state, return_info = self.validate_created_args(hackathon, args)
if not state:
return return_info
try:
args["status"] = hackathon.is_auto_approve() and RGStatus.AUTO_PASSED or RGStatus.UNAUDIT
args['create_time'] = self.util.get_now()
return self.db.add_object_kwargs(UserHackathonRel, **args).dic()
except Exception as e:
self.log.error(e)
return internal_server_error("fail to create register")
def update_registration(self, args):
self.log.debug("update_registration: %r" % args)
try:
id = args['id']
register = self.get_registration_by_id(id)
if register is None:
# we can also create a new object here.
return not_found("registration not found")
self.log.debug("update a existed register")
update_items = dict(dict(args).viewitems() - register.dic().viewitems())
if "create_time" in update_items: update_items.pop("create_time")
update_items["update_time"] = self.util.get_now()
self.db.update_object(register, **update_items)
return register.dic()
except Exception as e:
self.log.error(e)
return internal_server_error("fail to update register")
def delete_registration(self, args):
if "id" not in args:
return bad_request("id not invalid")
try:
register = self.db.find_first_object_by(UserHackathonRel, id == args['id'])
if register is not None:
self.db.delete_object(register)
return ok()
except Exception as ex:
self.log.error(ex)
return internal_server_error("failed in delete register: %s" % args["id"])
def get_registration_detail(self, user_id, hackathon):
detail = {
"hackathon": hackathon.dic(),
"user": self.user_manager.user_display_info(g.user)
}
rel = self.get_registration_by_user_and_hackathon(user_id, hackathon.id)
if rel is None:
# return nothing
return detail
detail["registration"] = rel.dic()
# experiment
try:
experiment = self.db.find_first_object(Experiment,
Experiment.user_id == user_id,
Experiment.hackathon_id == hackathon.id,
Experiment.status.in_([EStatus.STARTING, EStatus.RUNNING]))
if experiment is not None:
detail["experiment"] = experiment.dic()
except Exception as e:
self.log.error(e)
return detail
def is_user_registered(self, user_id, hackathon):
# reservedUser (-1)
if user_id == ReservedUser.DefaultUserID:
return True
# admin
if self.hackathon_manager.validate_admin_privilege(user_id, hackathon.id):
return True
# user
reg = self.get_registration_by_user_and_hackathon(user_id, hackathon.id)
if reg is not None:
return reg.status == RGStatus.AUTO_PASSED or reg.status == RGStatus.AUDIT_PASSED
return False
def get_user_profile(self, user_id):
return self.db.find_first_object_by(UserProfile, user_id=user_id)
def create_user_profile(self, args):
self.log.debug("create_user_profile: %r" % args)
try:
exist = self.get_user_profile(g.user.id)
if not exist:
return self.db.add_object_kwargs(UserProfile, **args).dic()
else:
return self.update_user_profile(args)
except Exception as e:
self.log.debug(e)
return internal_server_error("failed to create user profile")
def update_user_profile(self, args):
self.log.debug("update_user_profile")
try:
u_id = args["user_id"]
user_profile = self.db.find_first_object_by(UserProfile, user_id=u_id)
if user_profile:
self.db.update_object(user_profile, **args)
return user_profile.dic()
else:
return not_found("fail to update user profile")
except Exception as e:
self.log.debug(e)
return internal_server_error("failed to update user profile")
| {
"content_hash": "76a4861e9d89dc2e41c1adf6fd1eedad",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 111,
"avg_line_length": 42.50867052023121,
"alnum_prop": 0.5974979602937177,
"repo_name": "Fendoe/open-hackathon-o",
"id": "aaf0c143f91ee25929b8498324a4c63117eeea03",
"size": "8758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "open-hackathon-server/src/hackathon/registration/register_mgr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "109082"
},
{
"name": "HTML",
"bytes": "426116"
},
{
"name": "Java",
"bytes": "12108"
},
{
"name": "JavaScript",
"bytes": "414512"
},
{
"name": "Python",
"bytes": "2270532"
},
{
"name": "Ruby",
"bytes": "1518308"
},
{
"name": "Shell",
"bytes": "18652"
}
],
"symlink_target": ""
} |
from enum import Enum
class Language(Enum):
english = 'en'
turkish = 'tr'
| {
"content_hash": "bae8f2e95929745a20c9154016c41fff",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 21,
"avg_line_length": 14,
"alnum_prop": 0.6309523809523809,
"repo_name": "metinsay/docluster",
"id": "971d9900ca909d7a1d444b0a90833c3bf80f97e5",
"size": "84",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docluster/utils/constants/language.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2275"
},
{
"name": "Python",
"bytes": "66306"
}
],
"symlink_target": ""
} |
import sys
import subprocess
procs = []
runserver = subprocess.Popen([sys.executable, "runserver.py"])
runsheduler = subprocess.Popen([sys.executable, "runsheduler.py"])
runworker = subprocess.Popen([sys.executable, "runworker.py"])
procs.append(runserver)
procs.append(runsheduler)
procs.append(runworker)
for proc in procs:
proc.wait()
| {
"content_hash": "deec605f18a18c18330f0c8d3c9b9a93",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 66,
"avg_line_length": 26.46153846153846,
"alnum_prop": 0.7616279069767442,
"repo_name": "bzyx/precious",
"id": "3759ebb4a6cdf0f1ecfd37c273a5dd7837c50f1e",
"size": "344",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run_all.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "27930"
}
],
"symlink_target": ""
} |
class ApiPackage1Class1:
def __init__(self, arg1, arg2):
self.value1 = "value1"
def public1(self, arg1):
pass
def public2(self):
pass
@property
def public_property1(self):
return self._public_property
@property
def public_property2(self):
return self._public_property2
@public_property2.setter
def public_property2(self, value):
self._public_property2 = value
def _fake_private(self):
pass
def __real_private(self):
pass
def ApiPackage1_function1():
pass
def _ApiPackage1_fake_private_function():
pass
def __ApiPackage1_real_private_function():
pass
| {
"content_hash": "7aa58e11beb859de9545bf30e53d03ba",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 42,
"avg_line_length": 17.512820512820515,
"alnum_prop": 0.6193265007320644,
"repo_name": "touilleMan/samarche",
"id": "30bcc297a7ee49633b66bae767491a23f3afcd31",
"size": "684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/api_module/api_package1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13386"
}
],
"symlink_target": ""
} |
import account_voucher
| {
"content_hash": "e9e15df8c9a235a88fbf4c4e9b680996",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 22,
"avg_line_length": 23,
"alnum_prop": 0.8695652173913043,
"repo_name": "vileopratama/vitech",
"id": "b4d48ed534edfa4e021c80fc294cf2dd25bbe903",
"size": "123",
"binary": false,
"copies": "48",
"ref": "refs/heads/master",
"path": "src/addons/account_voucher/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "CSS",
"bytes": "2125999"
},
{
"name": "HTML",
"bytes": "252393"
},
{
"name": "Java",
"bytes": "1840167"
},
{
"name": "JavaScript",
"bytes": "6176224"
},
{
"name": "Makefile",
"bytes": "19072"
},
{
"name": "Mako",
"bytes": "7659"
},
{
"name": "NSIS",
"bytes": "16782"
},
{
"name": "Python",
"bytes": "9438805"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "22312"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "11489"
}
],
"symlink_target": ""
} |
import os
import math
import json
from string import punctuation
from pymongo import MongoClient
from nltk.corpus import stopwords
class Article:
'Article Class'
def __init__(self, filepath, domain):
self.filepath = filepath
self.wordDic = {}
self.wordAmount = 0
self.domain = domain
def getTF(self):
f = open(self.filepath,'r')
for line in f.readlines():
line = "".join([c for c in line if c not in punctuation]) # remove all punctuation in line
vector = line.split(' ')
for i in range(len(vector)):
if(self.wordDic.has_key(vector[i])):
self.wordDic[vector[i]] += 1.0
else:
self.wordDic[vector[i]] = 1.0
self.wordAmount += len(vector)
for key in self.wordDic.keys():
self.wordDic[key] /= self.wordAmount
def sortDic(self):
self.sortedWordList = sorted(self.wordDic.iteritems(),key=lambda d:d[1],reverse=True) # Dic sort
def getKeywords(self, count):
self.keywords = []
for word in self.sortedWordList:
if word[0].lower() not in set(stopwords.words('english')):
self.keywords.append(word[0])
if len(self.keywords) == count:
break
def display(self):
print 'wordAmount: %d' % self.wordAmount
print 'Different words: %d' % len(self.wordDic.keys())
for keyword in self.keywords:
print keyword
class Corpus:
'Corpus Class'
def __init__(self, rootdir):
self.rootdir = rootdir
self.domain = os.listdir(self.rootdir)
self.corpus = []
for entry in self.domain:
for name in os.listdir(self.rootdir + '\\' + entry):
article = Article(self.rootdir + '\\' + entry + '\\' + name, entry)
self.corpus.append(article)
def calculateTF(self):
for i in range(len(self.corpus)):
self.corpus[i].getTF()
def calculateTFIDF(self):
for article in self.corpus:
for word in article.wordDic.keys():
count = 0
for i in range(len(self.corpus)):
if(self.corpus[i].wordDic.has_key(word)):
count += 1
article.wordDic[word] *= math.log(len(self.corpus) / count)
def getKeywords(self, count):
for article in self.corpus:
article.sortDic()
article.getKeywords(count)
def display(self):
for article in self.corpus:
article.display()
class Twitter:
'Twitter Class'
def __init__(self, JSONObject):
self.json = JSONObject
self.text = JSONObject['text']
self.dic = {}
self.wordAmount = 0.0
self.TFIDF = {}
def getTF(self):
text = "".join([c for c in self.text if c not in punctuation]) # remove all punctuation in self.text
vector = text.split(' ')
for i in range(len(vector)):
if(self.dic.has_key(vector[i])):
self.dic[vector[i]] += 1.0
else:
self.dic[vector[i]] = 1.0
self.wordAmount += len(vector)
for key in self.dic.keys():
self.dic[key] /= self.wordAmount
class TwitterList:
def __init__(self):
self.twitterList = []
self.client = MongoClient()
self.db = self.client.test
self.collection = self.db.Twitter
cursor = self.collection.find()
for document in cursor:
JSONObject = document
twitter = Twitter(JSONObject)
self.twitterList.append(twitter)
def calculateTF(self):
for twitter in self.twitterList:
twitter.getTF()
def calculateTFIDF(self):
for twitter in self.twitterList:
for word in twitter.dic.keys():
count = 0
for i in range(len(self.twitterList)):
if(self.twitterList[i].dic.has_key(word)):
count += 1
twitter.dic[word] *= math.log(len(self.twitterList) / count)
def getCorrelation(self, domain, keywords):
for twitter in self.twitterList:
twitter.TFIDF[domain] = 0
for keyword in keywords:
if twitter.dic.has_key(keyword):
twitter.TFIDF[domain] += twitter.dic[keyword]
#self.twitterList.sort(key=lambda x:x.TFIDF,reverse=True) # List sorted by TFIDF
def updateToDatabase(self):
for twitter in self.twitterList:
result = self.collection.update_one({"_id":twitter.json['_id']},{"$set":{"importance":twitter.TFIDF}})
if result.matched_count != 1:
return False
return True
def display(self):
for twitter in self.twitterList:
print twitter.TFIDF
class DataTransformer:
def __init__(self):
self.client = MongoClient()
self.db = self.client.test
self.collection = self.db.Twitter
self.data = ""
def toJson(self):
self.data += '{"type":"FeatureCollection","id":"tweetsyoulike.c22ab257","features":['
cursor = self.collection.find()
amount = self.collection.count()
for document in cursor:
dic = {}
dic['type'] = 'Feature'
dic['id'] = str(document['_id'])
dic['geometry'] = {'coordinates':document['coordinates'], 'type':'Point'}
dic['properties'] = {'id':str(document['_id']), 'time':document['created_at'],
'name':document['name'], 'text':document['text'],
'location':document['location'], 'media_url':document['media_url'],
'marker-size':'medium', 'marker-color':'#7ec9b1', 'marker-symbol':'3',
'importance':document['importance']}
tmp = str(json.dumps(dic))
self.data += tmp
amount -= 1
if amount != 0:
self.data += ','
self.data += ']}'
return self.data | {
"content_hash": "432dbfffc58685e8f099dd36d8f51973",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 114,
"avg_line_length": 35.73142857142857,
"alnum_prop": 0.5338237645929954,
"repo_name": "riteshkasat/GeoTweets",
"id": "b66a5a152bad9d5f0abe997b3f13055e76e1160d",
"size": "6253",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "data/tool/legacy/Tool/ToolClass.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "280"
},
{
"name": "HTML",
"bytes": "1202"
},
{
"name": "JavaScript",
"bytes": "15052"
},
{
"name": "PHP",
"bytes": "12396"
},
{
"name": "Python",
"bytes": "33458"
}
],
"symlink_target": ""
} |
"""Install Dallinger as a command line utility."""
import pathlib
from setuptools import setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text(encoding="utf-8")
setup_args = dict(
name="dallinger",
packages=["dallinger", "dallinger_scripts"],
version="9.2.1",
description="Laboratory automation for the behavioral and social sciences",
long_description=README,
long_description_content_type="text/markdown",
url="http://github.com/Dallinger/Dallinger",
maintainer="Jordan Suchow",
maintainer_email="suchow@berkeley.edu",
license="MIT",
keywords=["science", "cultural evolution", "experiments", "psychology"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Framework :: Pytest",
],
include_package_data=True,
zip_safe=False,
entry_points={
"console_scripts": [
"dallinger = dallinger.command_line:dallinger",
"dallinger-housekeeper = dallinger.command_line:dallinger_housekeeper",
"dallinger_heroku_web = dallinger_scripts.web:main",
"dallinger_heroku_worker = dallinger_scripts.worker:main",
"dallinger_heroku_clock = dallinger_scripts.clock:main",
],
"dallinger.experiments": [],
"pytest11": ["pytest_dallinger = dallinger.pytest_dallinger"],
},
install_requires=[
"APScheduler",
"cached-property",
"boto3",
"build",
"click",
"faker",
"Flask-Sockets",
"Flask",
"flask-crossdomain",
"flask-login",
"Flask-WTF",
"future",
"gevent",
"greenlet",
"gunicorn",
"heroku3",
"localconfig",
"pexpect",
"pip>=20",
"pip-tools",
"psycopg2",
"psutil",
"redis",
"requests",
"rq",
"selenium",
"six",
"SQLAlchemy",
"sqlalchemy-postgres-copy",
"tabulate",
"tenacity",
"timeago",
"tzlocal",
"ua-parser",
"user-agents",
],
extras_require={
"jupyter": [
"jupyter",
"ipywidgets",
],
"data": [
"pandas",
"tablib[all]",
],
"dev": [
"alabaster",
"black",
"bumpversion",
"coverage",
"coverage_pth",
"codecov",
"flake8",
"mock",
"pre-commit",
"pycodestyle",
"pypandoc",
"pytest",
"pytest-rerunfailures",
"recommonmark",
"sphinxcontrib-spelling",
"Sphinx",
"tox",
"sphinx_rtd_theme",
],
"docker": ["docker", "docker-compose", "paramiko", "sshtunnel"],
':python_version <= "3.7"': ["importlib_metadata"],
},
)
setup(**setup_args)
| {
"content_hash": "651dab5a2dd082ec6fb974e08b1cab6a",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 83,
"avg_line_length": 28.076271186440678,
"alnum_prop": 0.5203742831270751,
"repo_name": "Dallinger/Dallinger",
"id": "6799dda55be2c459addd5c588fe74b33eaab39e7",
"size": "3313",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2204"
},
{
"name": "Dockerfile",
"bytes": "4288"
},
{
"name": "HTML",
"bytes": "62909"
},
{
"name": "JavaScript",
"bytes": "49602"
},
{
"name": "Jinja",
"bytes": "4871"
},
{
"name": "Procfile",
"bytes": "88"
},
{
"name": "Python",
"bytes": "1131695"
},
{
"name": "Ruby",
"bytes": "1769"
},
{
"name": "Shell",
"bytes": "2905"
}
],
"symlink_target": ""
} |
import pika
import json
import base64
import time
import os
import redis
import logging
import construct_graph
global connection
class Remote_Interface_server():
def __init__(self, redis_handle ):
self.redis = redis_handle
self.cmds = {}
self.cmds["PING"] = True
self.cmds["REDIS_GET"] = self.redis_get
self.cmds["REDIS_SET"] = self.redis_set
self.cmds["REDIS_LLEN"] = self.redis_llen
self.cmds["REDIS_LINDEX"] = self.redis_lindex
self.cmds["REDIS_LSET"] = self.redis_lset
self.cmds["REDIS_TRIM"] = self.redis_trim
self.cmds["REDIS_PUSH"] = self.redis_lpush
self.cmds["REDIS_POP"] = self.redis_rpop
self.cmds["REDIS_DEL"] = self.redis_del
self.cmds["REDIS_HGET"] = self.redis_hget
self.cmds["REDIS_HSET"] = self.redis_hset
self.cmds["REDIS_HGET_ALL"] = self.redis_hget_all
self.cmds["REDIS_HDEL"] = self.redis_hdel
self.cmds["REDIS_HKEYS"] = self.redis_hkeys
self.cmds["REDIS_KEYS"] = self.redis_keys
def redis_hkeys( self, command_data ):
object_data = {}
object_data["results"] = []
for i in command_data:
object_data["results"].append( self.redis.hkeys(i["hash"]) )
return object_data
def redis_keys( self, command_data):
object_data = {}
object_data["results"] = []
for i in command_data:
object_data["results"].append( self.redis.keys(i["key"]))
return object_data
def redis_get( self, command_data ):
object_data = {}
object_data["results"] = []
for i in command_data:
object_data["results"].append({"key":i, "data": self.redis.get(i) } )
return object_data
def redis_set( self, command_data ):
object_data = {}
object_data["results"] = []
for i in command_data:
key = i["key"]
data = i["data"]
self.redis.set(key,data )
return object_data
def redis_llen( self, command_data ):
object_data = {}
object_data["results"] = []
for i in command_data:
key = i
object_data["results"].append({"key":i, "data":self.redis.llen(i)})
return object_data
def redis_lindex( self, command_data ):
object_data = {}
object_data["results"] = []
for i in command_data:
key = i["key"]
index = int(i["index"])
object_data["results"].append({"key":key, "index":index, "data":self.redis.lindex( key, index ) })
return object_data
def redis_lset( self, command_data ):
object_data = {}
object_data["results"] = []
for i in command_data:
key = i["key"]
index = int(i["index"])
value = i["data"]
self.redis.lset(key,index,value)
return object_data
def redis_trim( self, command_data ):
object_data = {}
object_data["results"] = []
for i in command_data:
#print "i",i
key = i["key"]
start = i["start"]
end = i["end"]
self.redis.ltrim(key,start, end)
return object_data
def redis_lpush( self, command_data ):
object_data = {}
object_data["results"] = []
for i in command_data:
key = i["key"]
for j in i["data"]:
self.redis.lpush( key, j )
return object_data
def redis_rpop( self, command_data ):
object_data = {}
object_data["results"] = []
for i in command_data:
key = i["key"]
number = i["number"]
temp1 = {"key": key,"number":number }
temp = []
for j in range(0,number):
temp_1 = self.redis.rpop(key)
if temp_1 != None:
temp.append(temp_1)
temp1["data"] = temp
object_data["results"].append(temp1)
return object_data
def redis_del( self, command_data):
object_data = {}
object_data["results"] = []
for i in command_data:
self.redis.delete(i)
return object_data
def redis_hdel( self, command_data):
object_data = {}
object_data["results"] = []
for i in command_data:
self.redis.hdel(i["hash"], i["key"] )
return object_data
#
# Array of dictionary where each element has the following values
# hash = i["hash"]
# key = i["key"]
#
#
# returns array of dictionaries where each dictionary has the following elements
# "hash"
# "key"
# "value"
# hash = i["hash"]
# key = i["key"]
def redis_hget( self, command_data):
object_data = {}
object_data["results"] = []
for i in command_data:
object_data["results"].append({"hash":i["hash"], "key":i["key"], "data": self.redis.hget(i["hash"],i["key"] ) })
return object_data
#
# Array of dictionary where each element has the following values
# hash = i["hash"]
# key = i["key"]
# value = i["data"]
#
# returns true
def redis_hset( self, command_data):
object_data = {}
object_data["results"] = []
for i in command_data:
#print i
hash = i["hash"]
key = i["key"]
data = i["data"]
self.redis.hset(hash, key,data )
return object_data
#
# Array of dictionary where each element is dictionary key
# key = i["key"]
# number = i["number"]
#
# returns array of dictionarys
def redis_hget_all( self, command_data):
object_data = {}
object_data["results"] = []
for i in command_data:
object_data["results"].append({"hash":i["hash"], "data": self.redis.hgetall(i["hash"]) } )
return object_data
def process_commands( self, command_data ):
#print "command ",command_data["command"]
try:
if self.cmds.has_key( command_data["command"] ) == True:
if command_data["command"] == "PING":
object_data = command_data
object_data["reply"] = command_data["command"]
else:
object_data = self.cmds[ command_data["command"] ]( command_data["data"] )
object_data["reply"] = command_data["command"]
object_data["command"] = command_data["command"]
else:
object_data = {}
object_data["reply"] = "BAD_COMMAND"
except:
print "exception"
object_data = {}
object_data["reply"] = "BAD_COMMAND"
object_data["results"] = None
return object_data
def on_request(self, ch, method, props, body):
try:
input_data = json.loads( base64.b64decode(body))
#print "input_data",input_data
output_data = self.process_commands( input_data )
#print "output_data",output_data
except:
print "exception"
output_data = {}
output_data["reply"] = "BAD_COMMAND"
output_data["results"] = None
output_data = json.dumps(output_data)
#print "data output = ",output_data
response = base64.b64encode( json.dumps(output_data ) )
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id = \
props.correlation_id),
body= str(response) )
ch.basic_ack(delivery_tag = method.delivery_tag)
if __name__ == "__main__":
gm = construct_graph.Graph_Management("PI_1","main_remote","LaCima_DataStore")
#
# Now Find Data Stores
#
#
#
data_store_nodes = gm.find_data_stores()
# find ip and port for redis data store
data_server_ip = data_store_nodes[0]["ip"]
data_server_port = data_store_nodes[0]["port"]
# find ip and port for ip server
print "data_server_ip",data_server_ip,data_server_port
redis_handle = redis.StrictRedis( host = data_server_ip, port=data_server_port, db = 2 )
user_name = redis_handle.hget("redis_gateway", "user_name" )
password = redis_handle.hget("redis_gateway", "password" )
vhost = redis_handle.hget("redis_gateway", "vhost" )
queue = redis_handle.hget("redis_gateway", "queue" )
port = int(redis_handle.hget("redis_gateway", "port" ))
server = redis_handle.hget("redis_gateway", "server" )
print "server",server
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.CRITICAL)
command_handler = Remote_Interface_server(redis_handle)
credentials = pika.PlainCredentials( user_name, password )
parameters = pika.ConnectionParameters( server,
port, #ssl port
vhost,
credentials,
ssl = True ,
)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
#channel.queue_delete(queue=queue)
channel.queue_declare(queue=queue)
channel.basic_qos(prefetch_count=1)
channel.basic_consume( command_handler.on_request, queue=queue)
print " [x] Awaiting RPC requests"
channel.start_consuming()
| {
"content_hash": "b3b5f3d97cce2b1c0784f94854eccf75",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 124,
"avg_line_length": 31.37735849056604,
"alnum_prop": 0.5139306474243336,
"repo_name": "glenn-edgar/local_controller_2",
"id": "b40bfc0d311ead311679335494ba0e6eddd0434d",
"size": "10000",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rabbit_redis_access.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1392"
},
{
"name": "Batchfile",
"bytes": "2452"
},
{
"name": "CSS",
"bytes": "3169864"
},
{
"name": "HTML",
"bytes": "1762520"
},
{
"name": "JavaScript",
"bytes": "7044628"
},
{
"name": "Makefile",
"bytes": "5136"
},
{
"name": "PHP",
"bytes": "93357"
},
{
"name": "Python",
"bytes": "3189928"
},
{
"name": "Shell",
"bytes": "532"
},
{
"name": "Smalltalk",
"bytes": "189"
},
{
"name": "TeX",
"bytes": "3153"
}
],
"symlink_target": ""
} |
import numpy as np
from .base import OdeSolver, DenseOutput
from .common import (validate_max_step, validate_tol, select_initial_step,
norm, warn_extraneous, validate_first_step)
from . import dop853_coefficients
# Multiply steps computed from asymptotic behaviour of errors by this.
SAFETY = 0.9
MIN_FACTOR = 0.2 # Minimum allowed decrease in a step size.
MAX_FACTOR = 10 # Maximum allowed increase in a step size.
def rk_step(fun, t, y, f, h, A, B, C, K):
"""Perform a single Runge-Kutta step.
This function computes a prediction of an explicit Runge-Kutta method and
also estimates the error of a less accurate method.
Notation for Butcher tableau is as in [1]_.
Parameters
----------
fun : callable
Right-hand side of the system.
t : float
Current time.
y : ndarray, shape (n,)
Current state.
f : ndarray, shape (n,)
Current value of the derivative, i.e., ``fun(x, y)``.
h : float
Step to use.
A : ndarray, shape (n_stages, n_stages)
Coefficients for combining previous RK stages to compute the next
stage. For explicit methods the coefficients at and above the main
diagonal are zeros.
B : ndarray, shape (n_stages,)
Coefficients for combining RK stages for computing the final
prediction.
C : ndarray, shape (n_stages,)
Coefficients for incrementing time for consecutive RK stages.
The value for the first stage is always zero.
K : ndarray, shape (n_stages + 1, n)
Storage array for putting RK stages here. Stages are stored in rows.
The last row is a linear combination of the previous rows with
coefficients
Returns
-------
y_new : ndarray, shape (n,)
Solution at t + h computed with a higher accuracy.
f_new : ndarray, shape (n,)
Derivative ``fun(t + h, y_new)``.
References
----------
.. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
Equations I: Nonstiff Problems", Sec. II.4.
"""
K[0] = f
for s, (a, c) in enumerate(zip(A[1:], C[1:]), start=1):
dy = np.dot(K[:s].T, a[:s]) * h
K[s] = fun(t + c * h, y + dy)
y_new = y + h * np.dot(K[:-1].T, B)
f_new = fun(t + h, y_new)
K[-1] = f_new
return y_new, f_new
class RungeKutta(OdeSolver):
"""Base class for explicit Runge-Kutta methods."""
C = NotImplemented
A = NotImplemented
B = NotImplemented
E = NotImplemented
P = NotImplemented
order = NotImplemented
error_estimator_order = NotImplemented
n_stages = NotImplemented
def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
rtol=1e-3, atol=1e-6, vectorized=False,
first_step=None, **extraneous):
warn_extraneous(extraneous)
super(RungeKutta, self).__init__(fun, t0, y0, t_bound, vectorized,
support_complex=True)
self.y_old = None
self.max_step = validate_max_step(max_step)
self.rtol, self.atol = validate_tol(rtol, atol, self.n)
self.f = self.fun(self.t, self.y)
if first_step is None:
self.h_abs = select_initial_step(
self.fun, self.t, self.y, self.f, self.direction,
self.error_estimator_order, self.rtol, self.atol)
else:
self.h_abs = validate_first_step(first_step, t0, t_bound)
self.K = np.empty((self.n_stages + 1, self.n), dtype=self.y.dtype)
self.error_exponent = -1 / (self.error_estimator_order + 1)
self.h_previous = None
def _estimate_error(self, K, h):
return np.dot(K.T, self.E) * h
def _estimate_error_norm(self, K, h, scale):
return norm(self._estimate_error(K, h) / scale)
def _step_impl(self):
t = self.t
y = self.y
max_step = self.max_step
rtol = self.rtol
atol = self.atol
min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
if self.h_abs > max_step:
h_abs = max_step
elif self.h_abs < min_step:
h_abs = min_step
else:
h_abs = self.h_abs
step_accepted = False
step_rejected = False
while not step_accepted:
if h_abs < min_step:
return False, self.TOO_SMALL_STEP
h = h_abs * self.direction
t_new = t + h
if self.direction * (t_new - self.t_bound) > 0:
t_new = self.t_bound
h = t_new - t
h_abs = np.abs(h)
y_new, f_new = rk_step(self.fun, t, y, self.f, h, self.A,
self.B, self.C, self.K)
scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol
error_norm = self._estimate_error_norm(self.K, h, scale)
if error_norm < 1:
if error_norm == 0:
factor = MAX_FACTOR
else:
factor = min(MAX_FACTOR,
SAFETY * error_norm ** self.error_exponent)
if step_rejected:
factor = min(1, factor)
h_abs *= factor
step_accepted = True
else:
h_abs *= max(MIN_FACTOR,
SAFETY * error_norm ** self.error_exponent)
step_rejected = True
self.h_previous = h
self.y_old = y
self.t = t_new
self.y = y_new
self.h_abs = h_abs
self.f = f_new
return True, None
def _dense_output_impl(self):
Q = self.K.T.dot(self.P)
return RkDenseOutput(self.t_old, self.t, self.y_old, Q)
class RK23(RungeKutta):
"""Explicit Runge-Kutta method of order 3(2).
This uses the Bogacki-Shampine pair of formulas [1]_. The error is controlled
assuming accuracy of the second-order method, but steps are taken using the
third-order accurate formula (local extrapolation is done). A cubic Hermite
polynomial is used for the dense output.
Can be applied in the complex domain.
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here ``t`` is a scalar and there are two options for ndarray ``y``.
It can either have shape (n,), then ``fun`` must return array_like with
shape (n,). Or alternatively it can have shape (n, k), then ``fun``
must return array_like with shape (n, k), i.e. each column
corresponds to a single column in ``y``. The choice between the two
options is determined by `vectorized` argument (see below).
t0 : float
Initial time.
y0 : array_like, shape (n,)
Initial state.
t_bound : float
Boundary time - the integration won't continue beyond it. It also
determines the direction of the integration.
first_step : float or None, optional
Initial step size. Default is ``None`` which means that the algorithm
should choose.
max_step : float, optional
Maximum allowed step size. Default is np.inf, i.e., the step size is not
bounded and determined solely by the solver.
rtol, atol : float and array_like, optional
Relative and absolute tolerances. The solver keeps the local error
estimates less than ``atol + rtol * abs(y)``. Here, `rtol` controls a
relative accuracy (number of correct digits). But if a component of `y`
is approximately below `atol`, the error only needs to fall within
the same `atol` threshold, and the number of correct digits is not
guaranteed. If components of y have different scales, it might be
beneficial to set different `atol` values for different components by
passing array_like with shape (n,) for `atol`. Default values are
1e-3 for `rtol` and 1e-6 for `atol`.
vectorized : bool, optional
Whether `fun` is implemented in a vectorized fashion. Default is False.
Attributes
----------
n : int
Number of equations.
status : string
Current status of the solver: 'running', 'finished' or 'failed'.
t_bound : float
Boundary time.
direction : float
Integration direction: +1 or -1.
t : float
Current time.
y : ndarray
Current state.
t_old : float
Previous time. None if no steps were made yet.
step_size : float
Size of the last successful step. None if no steps were made yet.
nfev : int
Number evaluations of the system's right-hand side.
njev : int
Number of evaluations of the Jacobian. Is always 0 for this solver as it does not use the Jacobian.
nlu : int
Number of LU decompositions. Is always 0 for this solver.
References
----------
.. [1] P. Bogacki, L.F. Shampine, "A 3(2) Pair of Runge-Kutta Formulas",
Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989.
"""
order = 3
error_estimator_order = 2
n_stages = 3
C = np.array([0, 1/2, 3/4])
A = np.array([
[0, 0, 0],
[1/2, 0, 0],
[0, 3/4, 0]
])
B = np.array([2/9, 1/3, 4/9])
E = np.array([5/72, -1/12, -1/9, 1/8])
P = np.array([[1, -4 / 3, 5 / 9],
[0, 1, -2/3],
[0, 4/3, -8/9],
[0, -1, 1]])
class RK45(RungeKutta):
"""Explicit Runge-Kutta method of order 5(4).
This uses the Dormand-Prince pair of formulas [1]_. The error is controlled
assuming accuracy of the fourth-order method accuracy, but steps are taken
using the fifth-order accurate formula (local extrapolation is done).
A quartic interpolation polynomial is used for the dense output [2]_.
Can be applied in the complex domain.
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here ``t`` is a scalar, and there are two options for the ndarray ``y``:
It can either have shape (n,); then ``fun`` must return array_like with
shape (n,). Alternatively it can have shape (n, k); then ``fun``
must return an array_like with shape (n, k), i.e., each column
corresponds to a single column in ``y``. The choice between the two
options is determined by `vectorized` argument (see below).
t0 : float
Initial time.
y0 : array_like, shape (n,)
Initial state.
t_bound : float
Boundary time - the integration won't continue beyond it. It also
determines the direction of the integration.
first_step : float or None, optional
Initial step size. Default is ``None`` which means that the algorithm
should choose.
max_step : float, optional
Maximum allowed step size. Default is np.inf, i.e., the step size is not
bounded and determined solely by the solver.
rtol, atol : float and array_like, optional
Relative and absolute tolerances. The solver keeps the local error
estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
relative accuracy (number of correct digits). But if a component of `y`
is approximately below `atol`, the error only needs to fall within
the same `atol` threshold, and the number of correct digits is not
guaranteed. If components of y have different scales, it might be
beneficial to set different `atol` values for different components by
passing array_like with shape (n,) for `atol`. Default values are
1e-3 for `rtol` and 1e-6 for `atol`.
vectorized : bool, optional
Whether `fun` is implemented in a vectorized fashion. Default is False.
Attributes
----------
n : int
Number of equations.
status : string
Current status of the solver: 'running', 'finished' or 'failed'.
t_bound : float
Boundary time.
direction : float
Integration direction: +1 or -1.
t : float
Current time.
y : ndarray
Current state.
t_old : float
Previous time. None if no steps were made yet.
step_size : float
Size of the last successful step. None if no steps were made yet.
nfev : int
Number evaluations of the system's right-hand side.
njev : int
Number of evaluations of the Jacobian. Is always 0 for this solver as it does not use the Jacobian.
nlu : int
Number of LU decompositions. Is always 0 for this solver.
References
----------
.. [1] J. R. Dormand, P. J. Prince, "A family of embedded Runge-Kutta
formulae", Journal of Computational and Applied Mathematics, Vol. 6,
No. 1, pp. 19-26, 1980.
.. [2] L. W. Shampine, "Some Practical Runge-Kutta Formulas", Mathematics
of Computation,, Vol. 46, No. 173, pp. 135-150, 1986.
"""
order = 5
error_estimator_order = 4
n_stages = 6
C = np.array([0, 1/5, 3/10, 4/5, 8/9, 1])
A = np.array([
[0, 0, 0, 0, 0],
[1/5, 0, 0, 0, 0],
[3/40, 9/40, 0, 0, 0],
[44/45, -56/15, 32/9, 0, 0],
[19372/6561, -25360/2187, 64448/6561, -212/729, 0],
[9017/3168, -355/33, 46732/5247, 49/176, -5103/18656]
])
B = np.array([35/384, 0, 500/1113, 125/192, -2187/6784, 11/84])
E = np.array([-71/57600, 0, 71/16695, -71/1920, 17253/339200, -22/525,
1/40])
# Corresponds to the optimum value of c_6 from [2]_.
P = np.array([
[1, -8048581381/2820520608, 8663915743/2820520608,
-12715105075/11282082432],
[0, 0, 0, 0],
[0, 131558114200/32700410799, -68118460800/10900136933,
87487479700/32700410799],
[0, -1754552775/470086768, 14199869525/1410260304,
-10690763975/1880347072],
[0, 127303824393/49829197408, -318862633887/49829197408,
701980252875 / 199316789632],
[0, -282668133/205662961, 2019193451/616988883, -1453857185/822651844],
[0, 40617522/29380423, -110615467/29380423, 69997945/29380423]])
class DOP853(RungeKutta):
"""Explicit Runge-Kutta method of order 8.
This is a Python implementation of "DOP853" algorithm originally written
in Fortran [1]_, [2]_. Note that this is not a literate translation, but
the algorithmic core and coefficients are the same.
Can be applied in the complex domain.
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here, ``t`` is a scalar, and there are two options for the ndarray ``y``:
It can either have shape (n,); then ``fun`` must return array_like with
shape (n,). Alternatively it can have shape (n, k); then ``fun``
must return an array_like with shape (n, k), i.e. each column
corresponds to a single column in ``y``. The choice between the two
options is determined by `vectorized` argument (see below).
t0 : float
Initial time.
y0 : array_like, shape (n,)
Initial state.
t_bound : float
Boundary time - the integration won't continue beyond it. It also
determines the direction of the integration.
first_step : float or None, optional
Initial step size. Default is ``None`` which means that the algorithm
should choose.
max_step : float, optional
Maximum allowed step size. Default is np.inf, i.e. the step size is not
bounded and determined solely by the solver.
rtol, atol : float and array_like, optional
Relative and absolute tolerances. The solver keeps the local error
estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
relative accuracy (number of correct digits). But if a component of `y`
is approximately below `atol`, the error only needs to fall within
the same `atol` threshold, and the number of correct digits is not
guaranteed. If components of y have different scales, it might be
beneficial to set different `atol` values for different components by
passing array_like with shape (n,) for `atol`. Default values are
1e-3 for `rtol` and 1e-6 for `atol`.
vectorized : bool, optional
Whether `fun` is implemented in a vectorized fashion. Default is False.
Attributes
----------
n : int
Number of equations.
status : string
Current status of the solver: 'running', 'finished' or 'failed'.
t_bound : float
Boundary time.
direction : float
Integration direction: +1 or -1.
t : float
Current time.
y : ndarray
Current state.
t_old : float
Previous time. None if no steps were made yet.
step_size : float
Size of the last successful step. None if no steps were made yet.
nfev : int
Number evaluations of the system's right-hand side.
njev : int
Number of evaluations of the Jacobian. Is always 0 for this solver
as it does not use the Jacobian.
nlu : int
Number of LU decompositions. Is always 0 for this solver.
References
----------
.. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
Equations I: Nonstiff Problems", Sec. II.
.. [2] `Page with original Fortran code of DOP853
<http://www.unige.ch/~hairer/software.html>`_.
"""
n_stages = dop853_coefficients.N_STAGES
order = 8
error_estimator_order = 7
A = dop853_coefficients.A[:n_stages, :n_stages]
B = dop853_coefficients.B
C = dop853_coefficients.C[:n_stages]
E3 = dop853_coefficients.E3
E5 = dop853_coefficients.E5
D = dop853_coefficients.D
A_EXTRA = dop853_coefficients.A[n_stages + 1:]
C_EXTRA = dop853_coefficients.C[n_stages + 1:]
def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
rtol=1e-3, atol=1e-6, vectorized=False,
first_step=None, **extraneous):
super(DOP853, self).__init__(fun, t0, y0, t_bound, max_step,
rtol, atol, vectorized, first_step,
**extraneous)
self.K_extended = np.empty((dop853_coefficients.N_STAGES_EXTENDED,
self.n), dtype=self.y.dtype)
self.K = self.K_extended[:self.n_stages + 1]
def _estimate_error(self, K, h): # Left for testing purposes.
err5 = np.dot(K.T, self.E5)
err3 = np.dot(K.T, self.E3)
denom = np.hypot(np.abs(err5), 0.1 * np.abs(err3))
correction_factor = np.ones_like(err5)
mask = denom > 0
correction_factor[mask] = np.abs(err5[mask]) / denom[mask]
return h * err5 * correction_factor
def _estimate_error_norm(self, K, h, scale):
err5 = np.dot(K.T, self.E5) / scale
err3 = np.dot(K.T, self.E3) / scale
err5_norm_2 = np.sum(err5**2)
err3_norm_2 = np.sum(err3**2)
denom = err5_norm_2 + 0.01 * err3_norm_2
return np.abs(h) * err5_norm_2 / np.sqrt(denom * len(scale))
def _dense_output_impl(self):
K = self.K_extended
h = self.h_previous
for s, (a, c) in enumerate(zip(self.A_EXTRA, self.C_EXTRA),
start=self.n_stages + 1):
dy = np.dot(K[:s].T, a[:s]) * h
K[s] = self.fun(self.t_old + c * h, self.y_old + dy)
F = np.empty((dop853_coefficients.INTERPOLATOR_POWER, self.n),
dtype=self.y_old.dtype)
f_old = K[0]
delta_y = self.y - self.y_old
F[0] = delta_y
F[1] = h * f_old - delta_y
F[2] = 2 * delta_y - h * (self.f + f_old)
F[3:] = h * np.dot(self.D, K)
return Dop853DenseOutput(self.t_old, self.t, self.y_old, F)
class RkDenseOutput(DenseOutput):
def __init__(self, t_old, t, y_old, Q):
super(RkDenseOutput, self).__init__(t_old, t)
self.h = t - t_old
self.Q = Q
self.order = Q.shape[1] - 1
self.y_old = y_old
def _call_impl(self, t):
x = (t - self.t_old) / self.h
if t.ndim == 0:
p = np.tile(x, self.order + 1)
p = np.cumprod(p)
else:
p = np.tile(x, (self.order + 1, 1))
p = np.cumprod(p, axis=0)
y = self.h * np.dot(self.Q, p)
if y.ndim == 2:
y += self.y_old[:, None]
else:
y += self.y_old
return y
class Dop853DenseOutput(DenseOutput):
def __init__(self, t_old, t, y_old, F):
super(Dop853DenseOutput, self).__init__(t_old, t)
self.h = t - t_old
self.F = F
self.y_old = y_old
def _call_impl(self, t):
x = (t - self.t_old) / self.h
if t.ndim == 0:
y = np.zeros_like(self.y_old)
else:
x = x[:, None]
y = np.zeros((len(x), len(self.y_old)), dtype=self.y_old.dtype)
for i, f in enumerate(reversed(self.F)):
y += f
if i % 2 == 0:
y *= x
else:
y *= 1 - x
y += self.y_old
return y.T
| {
"content_hash": "e785426c982b3a36a625d4baee1265b8",
"timestamp": "",
"source": "github",
"line_count": 575,
"max_line_length": 107,
"avg_line_length": 37.02608695652174,
"alnum_prop": 0.5810709253170503,
"repo_name": "pizzathief/scipy",
"id": "fa3504e36d0228f7ac3876154682676d2b462330",
"size": "21290",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "scipy/integrate/_ivp/rk.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4395775"
},
{
"name": "C++",
"bytes": "649767"
},
{
"name": "Dockerfile",
"bytes": "1236"
},
{
"name": "Fortran",
"bytes": "5367672"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Python",
"bytes": "12449825"
},
{
"name": "Shell",
"bytes": "538"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
from parser import Parser
import re
import pdb
import csv
debug=False
def load_yaml(filename):
"""
Reads any yaml file and returns it as object.
"""
import yaml
stream = file(filename)
return yaml.load(stream)
class Scanner:
"""
Handles metrically scansion.
"""
def __init__(self, meter_file = 'settings/urdu-meter.yaml', #terrible name for this
short_file='settings/short.yaml',
long_file='settings/long.yaml',
meters_file = 'settings/gh-meters.yaml',
bad_combos_file = 'settings/bad_combos.csv', # csv for this one
meter_description_file='settings/gh-reference.yaml'):
self.pp = Parser(meter_file)
self.sp = Parser(short_file)
self.lp = Parser(long_file)
self.meters_with_feet = load_yaml(meters_file)
self.meters_without_feet = {}#load_yaml(gh_meters_file)
for i,v in self.meters_with_feet.iteritems():
new_i = i.replace('/','')
self.meters_without_feet[new_i] = i # save a list for later
self.ok_meters_re = '|'.join(self.meters_without_feet)
self.meter_descriptions = load_yaml(meter_description_file)
bad_combos_in = []
with open(bad_combos_file, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar="'")
for row in reader:
assert len(row) == 2
bad_combos_in.append(tuple(row))
self.bad_combos = tuple(bad_combos_in)
def meter_ok(self, so_far):
return re.search('(?:^|\|)'+so_far, self.ok_meters_re)
def bad_combo(self,prev_matches, this_match):
'''
Makes sure illegal metrical combinations are removed.
'''
try:
prev_match = prev_matches[-1]
return (prev_match['rule']['production'], this_match['rule']['production']) in self.bad_combos
except IndexError:
return False
# return (p_m, t_m) in self.bad_combos
#
# try:
# prev_match = prev_matches[-1]
# for (p_m,t_m) in self.bad_combos:
# if prev_match['rule']['production']==p_m and this_match['rule']['production']==t_m:
# return True
# except IndexError:
# pass
# return False
def scan(self, s, known_only=True, debug=False, parser_debug = False):
"""
Scans an input string(s), ignoring unacceptable combinations if known_only set
Returns a dictionary with ['results', 'tkns', 'orig_parse']
results, a dictionary, hold the details
matches: list of dictionaries holding the match details
tokens: tokens matching long or short (e.g. c,s,c)
start: the index in the processed string
rule_id: index for the rule (* needs to account for source)
rule: copy of the rule (* don't need to copy with rule_id passed)
meter_string: production of match (e.g. = or -)
index: the last match (ignore, but used in scanning)
meter_string: the last meter string (ignore, but used in scanning)
scan: the cumulative meter string (e.g. =-===-===-===-=)
tkns the tokens of the original input string (s)
orig_parse hold the production of the tkns (used by the scanner)
"""
pp = self.pp # Parser("urdu-meter.yaml")
sp = self.sp # Parser('short.yaml')
lp = self.lp # Parser('long.yaml')
sss = pp.parse(s, debug = parser_debug)
if debug:
import pprint
ppr = pprint.PrettyPrinter(indent=4)
ppr.pprint( sss)
self.pd = pp.parse_details # save info about tokens here
if debug:
print self.pd
tkns= lp.tokenize(sss) # now tokenize that--problem here w/ kyaa
if debug:
print tkns
match_results = [{'matches':[], 'index':0}]
final_results = []
while (len(match_results)>0):
mr = match_results.pop()
for p in (sp, lp): # go through short and long parsers
newMatches = p.match_all_at(tkns, mr['index'])
if len(newMatches)==0: continue # move along if no matches
for m in newMatches:
if self.bad_combo(mr['matches'],m): # remove unacceptable combinations
continue
new_index = mr['index'] + len(m['tokens'])
new_matches = list(mr['matches']) # have to make a copy of the matches here
if re.match('l_', m['rule']['production']):
meter_string = '='
elif re.match('s_', m['rule']['production']):
meter_string = '-'
else:
meter_string = '?'
m['meter_string'] = meter_string
new_matches.append(m)
new_mr = { 'matches': new_matches, 'index': new_index, 'meter_string':meter_string}
scan_line = ''
for m in new_matches:
scan_line +=m['meter_string']
new_mr['scan'] = scan_line
if (known_only==True) and not (self.meter_ok(scan_line)):
#print "Bad meter: "+scan_line
continue
if new_index==len(tkns) or (new_index+1==len(tkns) and tkns[-1]=='b'):
if (known_only==True) and not (scan_line in self.meters_without_feet):
# in case meter is okay until now but not complete
continue
final_results.append(new_mr)
continue
else:
match_results.append(new_mr)
if debug:
pprint.pprint(final_results)
return ({'results':final_results, 'orig_parse':self.pd, 'tkns':tkns})
def quick_results(self, scan_results):
"""
print quick results
"""
final_results = scan_results['results']
scan_lines=[]
for r in final_results:
scan_line = "( "
for m in r['matches']:
scan_line += m['meter_string']+' '
scan_line += ")"
scan_lines.append(scan_line)
return ' '.join(scan_lines)
def id_meter(self,scan_string):
'''
takes a scan string without feet, returns id, e.g. G1
'''
meter_with_feet = self.meters_without_feet[scan_string]
meter_id = self.meters_with_feet[meter_with_feet]
#meter_description = self.meter_descriptions[meter_id]
return meter_id
def describe_meter(self,scan_string):
'''
takes a scan string without feet, returns meter string with feet and variations
'''
meter_with_feet = self.meters_without_feet[scan_string]
meter_id = self.meters_with_feet[meter_with_feet]
meter_description = self.meter_descriptions[meter_id]
return meter_description
# todo: copy this into print_scan
def print_scan_result(self,r, orig_parse, details=False, known_only = False, no_description=False,
no_tkns = False, no_numbers=False, no_orig_tkns=False, no_match_production=False):
'''
takes a single scan result (r) and the orginal parse (of the tokens) as input
'''
scan_line = ''
tkn_line = ''
orig_tkn_line = ''
match_production_line = '' # eg l_bcsc
if no_description == False:
if (r['scan'] in self.meters_without_feet):
meter_with_feet = self.meters_without_feet[r['scan']]
meter_id = self.meters_with_feet[meter_with_feet]
meter_description = self.meter_descriptions[meter_id]
print 'matches '+meter_description+' <'+meter_id+'> as '+meter_with_feet
for m in r['matches']:
scan_line += m['meter_string'].ljust(10)
tkn_line += ''.join(m['tokens']).ljust(10)
orig_tkns = ''
for t in orig_parse[m['start']:(m['start']+len(m['tokens']))]:
orig_tkns += ''.join(t['tokens'])
orig_tkn_line += orig_tkns.ljust(10)
match_production_line +=m['rule']['production'].ljust(10)
m
print scan_line
if no_tkns == False:
print tkn_line
if no_orig_tkns == False:
print orig_tkn_line
if no_match_production == False:
print match_production_line
def matched_meters(self, scan):
'''
Gives list of matched meters as meter id, e.g. ['G1','G2']
'''
print "** "
print scan.keys()
meters = self.meters_without_feet # acceptable meters
final_results = scan['results']
for i, r in enumerate(final_results):
if (not(r['scan'] in meters)): # skip if no match
continue
meter_id = self.meters_with_feet[meter_with_feet]
results.append(meter_id)
return results # return list of meters
def print_scan(self,scan_results, details=False, no_tkns = False, no_numbers=False, no_orig_tkns=False,known_only=False,
no_match_production = True):
meters = self.meters_without_feet#load_yaml('gh-meters.yaml')
final_results = scan_results['results']
final_results = sorted(final_results, key=lambda k: k['scan']) # sort by scan
_orig_parse = scan_results['orig_parse'] # parser detail of original scan (preserves original tokens)
tkns = scan_results['tkns'] # tokens of second-level parser
#pdb.set_trace()
for i, r in enumerate(final_results):
if known_only and (not (r['scan'] in meters)): # allows override
continue
if no_numbers==False: print 'result #'+str(i)
if (r['scan'] in meters):
meter_with_feet = self.meters_without_feet[r['scan']]
meter_id = self.meters_with_feet[meter_with_feet]
meter_description = self.meter_descriptions[meter_id]
print 'matches '+meter_description+' <'+meter_id+'> as '+meter_with_feet
scan_line = ''
tkn_line = ''
orig_tkn_line = ''
for m in r['matches']:
scan_line += m['meter_string'].ljust(10)
tkn_line += ''.join(m['tokens']).ljust(10)
orig_tkns = ''
for t in _orig_parse[m['start']:(m['start']+len(m['tokens']))]:
orig_tkns += ''.join(t['tokens'])
orig_tkn_line += orig_tkns.ljust(10)
print scan_line
if no_tkns == False:
print tkn_line
if no_orig_tkns == False:
print orig_tkn_line
if __name__ == '__main__':
s = Scanner()
_ = " ;xvush uuftaadagii kih bah .sa;hraa-e inti:zaar"
_ = " ;gara.z shast-e but-e naavuk-figan kii aazmaa))ish hai"
_ = "naqsh faryaadii hai kis kii sho;xii-e ta;hriir kaa"
pdb.set_trace()
scn = s.scan(_, known_only=True, debug=True)
print s.matched_meters(scn)
pd = s.pd
print s.print_scan(scn)
print s.print_scan_result(scn['results'][0], scn['orig_parse'])
print s.meter_descriptions
#print "****"
print s.print_scan(scn, known_only=True)
print scn.keys()
| {
"content_hash": "64f1f8616019efa60cb8cedc3f698e7c",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 124,
"avg_line_length": 41.83453237410072,
"alnum_prop": 0.5343938091143594,
"repo_name": "seanpue/hastac2015_pue",
"id": "91c2b16ad21dadf039bd53f089e7b1561834c03b",
"size": "11630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scanner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26122"
}
],
"symlink_target": ""
} |
"""
instabot example
Workflow:
Like and follow likers of last medias from your timeline feed.
"""
import sys
import os
import time
import random
from tqdm import tqdm
import argparse
sys.path.append(os.path.join(sys.path[0], '../'))
from instabot import Bot
def like_and_follow(bot, user_id, nlikes=3):
bot.like_user(user_id, amount=nlikes)
bot.follow(user_id)
return True
def like_and_follow_media_likers(bot, media, nlikes=3):
for user in tqdm(bot.get_media_likers(media), desc="Media likers"):
like_and_follow(bot, user)
time.sleep(10 + 20 * random.random())
return True
def like_and_follow_your_feed_likers(bot, nlikes=3):
last_media = bot.get_your_medias()[0]
return like_and_follow_media_likers(bot, last_media, nlikes=3)
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('-u', type=str, help="username")
parser.add_argument('-p', type=str, help="password")
parser.add_argument('-proxy', type=str, help="proxy")
args = parser.parse_args()
bot = Bot()
bot.login(username=args.u, password=args.p,
proxy=args.proxy)
like_and_follow_your_feed_likers(bot)
| {
"content_hash": "f00ee7e6c34f8196447c3983504dee9e",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 71,
"avg_line_length": 24.638297872340427,
"alnum_prop": 0.6899827288428325,
"repo_name": "rasperepodvipodvert/instabot",
"id": "0872e03ff5f0a9a2901f41108fb862367088ebee",
"size": "1158",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/like_and_follow_your_last_media_likers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "72963"
}
],
"symlink_target": ""
} |
from abc import ABC, abstractmethod
class Pipe(ABC):
@abstractmethod
def pipe(self, data):
pass
@abstractmethod
def __enter__(self):
pass
@abstractmethod
def __exit__(self, exit_type, value, traceback):
pass
| {
"content_hash": "ed3ad3da6feda15a23e2aa82a07acdcb",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 52,
"avg_line_length": 17.333333333333332,
"alnum_prop": 0.6,
"repo_name": "jstriebel/webcam-effects",
"id": "52e2d873b403c55c42efc16d2c123dbf2087c472",
"size": "260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pipes/pipe.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1187"
},
{
"name": "Python",
"bytes": "12888"
}
],
"symlink_target": ""
} |
import os
import cv2
import glob
import datetime
import logging
import numpy as np
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from data_io import load_imageset, split_cv
from metrics import precision, error_rate
module_dir = os.path.dirname(os.path.abspath(__file__))
module_name = os.path.basename(__file__).split('.')[0]
log_path = os.path.join(module_dir, os.path.pardir, 'logs', module_name + '_' + datetime.date.today().strftime('%Y%m%d') + '.log')
logger = logging.getLogger(module_name)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(log_path)
ch = logging.StreamHandler()
fh.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s][%(name)s][%(levelname)s]: %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
img_height = 96
img_width = 128
batch_size = 64
nb_classes = 10
nb_epoch = 2
nb_filters = 32
nb_pool = 2
nb_conv = 3
if __name__ == "__main__":
logger.info("start training")
train_set_folder = os.path.join(module_dir, os.path.pardir, os.path.pardir, 'data/test')
test_set_folder = os.path.join(module_dir, os.path.pardir, os.path.pardir, 'data/test')
# read training data
train_data, train_labels = load_imageset(train_set_folder, to_img_size = (img_height, img_width, 1), ext = 'png')
# split for cross validation
train, train_label, validation, validation_label = split_cv(train_data, train_labels)
logger.info("data split complete")
# build stacking layers
model = Sequential()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv, border_mode = 'valid', input_shape = (1, img_height, img_width)))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss = 'categorical_crossentropy', optimizer = 'adadelta')
model.fit(train, train_label, batch_size = batch_size, nb_epoch = nb_epoch, verbose = 1, validation_data = (validation, validation_label))
logger.info("model training complete")
score = model.evaluate(validation, validation_label, verbose = 0)
logger.info("validation score: %f" % (score))
save_model(model)
logger.info("model saved")
| {
"content_hash": "21bd6625fd64324def13edfbc381f2ca",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 140,
"avg_line_length": 30.229885057471265,
"alnum_prop": 0.7190114068441065,
"repo_name": "yao-matrix/mLearning",
"id": "2c5fdacd562b6e29ae8e9d74848369fff78f5534",
"size": "2668",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "use_case/image_cnn/apps/distracted_driver/train.py",
"mode": "33261",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from streams import BufferedStream, Stream
from multiprocessing import Process
from twython import TwythonStreamer
import json
import sys
class FileInputStream(Stream):
"""
Loads the lines of a text file into a Stream for processing.
"""
def __init__(self, filename):
self.input = open(filename, 'r')
self.next_line = None
def has_next(self):
if self.next_line is not None:
return True
try:
self.next_line = self.input.next()
except StopIteration:
self.next_line = None
return self.next_line is not None
def next(self):
if not self.has_next():
raise Exception("No more elements in file!")
to_return = self.next_line
self.next_line = None
return to_return
def __del__(self):
self.input.close()
class JSONInputStream(FileInputStream):
"""
Loads a file containing a separate JSON object on each line into a Stream for processing.
"""
def __init__(self, filename):
super(JSONInputStream, self).__init__(filename)
def next(self):
return json.loads(super(JSONInputStream, self).next())
def __del__(self):
super(JSONInputStream, self).__del__()
class TweetStream(BufferedStream):
"""
Publishes tweets fetched from the Twitter Sample stream to a BufferedStream for processing.
"""
def __init__(self, buf, app_key, app_secret, access_token, access_token_secret):
super(TweetStream, self).__init__(buf)
self.source = self.TweetSource(self, app_key, app_secret, access_token,
access_token_secret)
self.worker = Process(target=self.source.statuses.sample)
self.worker.daemon = True
def connect(self):
super(TweetStream, self).connect()
self.worker.start()
def disconnect(self):
super(TweetStream, self).disconnect()
self.source.disconnect()
self.worker.terminate()
class TweetSource(TwythonStreamer):
"""
Acts as the 'listener' to the official Twitter Sample stream.
"""
def __init__(self, publish_stream, app_key, app_secret, access_token, access_token_secret):
super(TweetStream.TweetSource, self).__init__(app_key, app_secret, access_token,
access_token_secret)
self.publish_stream = publish_stream
def on_success(self, data):
if 'text' in data:
self.publish_stream.register(data)
def on_error(self, status_code, data):
sys.stderr.write('{0} - {1}'.format(status_code, data))
| {
"content_hash": "38dc413fec5a8e39d3a03e81de2213e5",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 99,
"avg_line_length": 29.172043010752688,
"alnum_prop": 0.6008109104312569,
"repo_name": "daviesjamie/spout",
"id": "2cf15996e334726d60d3f62597a658cd27df12be",
"size": "2713",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spout/sources.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17531"
}
],
"symlink_target": ""
} |
from random import choice
startups = ["Google", "Facebook", "Dropbox", "SpaceX", "Uber", "Snapchat", "Twitter", "Stripe",
"AirBnB", "Digital Ocean", "Github", "Tinder"]
demographies = ["Pets", "the elderly", "Robots", "noisy kids", "Cats", "Fruits",
"Programming Languages", "Teachers", "Computer Scientists", "Janitors", "Film Producers",
"Actors"]
for _ in xrange(10000):
print choice(startups) + " for " + choice(demographies)
| {
"content_hash": "73ca524416477d4b04f88ce3ab01de0b",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 105,
"avg_line_length": 40.166666666666664,
"alnum_prop": 0.6058091286307054,
"repo_name": "MAPSuio/spring-challenge16",
"id": "21b3db3dc38e10b94e7e36099b0738a6ef2e9820",
"size": "482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dejavu/generate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Clojure",
"bytes": "4921"
},
{
"name": "Haskell",
"bytes": "2557"
},
{
"name": "Java",
"bytes": "722"
},
{
"name": "Python",
"bytes": "19088"
},
{
"name": "Scheme",
"bytes": "1100"
},
{
"name": "Shell",
"bytes": "591"
}
],
"symlink_target": ""
} |
"""
AUTHOR : Lang
PURPOSE : Multi Self Deep Learning
"""
__author__ = 'Lang'
import os;
tracing = open("After_First_Purification.txt",'w')
tracing.close()
total_count = 0
for dirs in sorted(os.listdir(".")):
try:
count = 0
for files in sorted(os.listdir(dirs)):
if ".jpg" in files:
count = count + 1
total_count = total_count + 1
tracing = open("After_First_Purification.txt",'a')
tracing.write("No." + str(total_count) + " " + dirs + " Set contains " + str(count) + " files\n")
tracing.close()
except:
print("error")
| {
"content_hash": "e3d02882dc6bb926bda6164df5464ac7",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 105,
"avg_line_length": 24.64,
"alnum_prop": 0.5568181818181818,
"repo_name": "HeavenMin/PlantImageRecognition",
"id": "df4ea8a3a52e5a5fb4d65dad9cd65620b4e491e7",
"size": "617",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Dataset Process/Count_Each_Dri_Files_Number.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "119507"
},
{
"name": "Python",
"bytes": "168234"
}
],
"symlink_target": ""
} |
import time
import roslib; roslib.load_manifest('aubo_new_driver')
import rospy
import actionlib
from control_msgs.msg import *
from trajectory_msgs.msg import *
from sensor_msgs.msg import JointState
from math import pi
JOINT_NAMES = ['shoulder_joint', 'upperArm_joint', 'foreArm_joint',
'wrist1_joint', 'wrist2_joint', 'wrist3_joint']
Q1 = [2.2,1.57,1.57,0,-1.57,1.0]
Q2 = [1.0,1.50,1.57,0,-1.0,2.0]
Q3 = [1.0,1.40,1.57,0,-0,3.0]
client = None
def move1():
global joints_pos
g = FollowJointTrajectoryGoal()
g.trajectory = JointTrajectory()
g.trajectory.joint_names = JOINT_NAMES
try:
joint_states = rospy.wait_for_message("joint_states", JointState)
joints_pos = joint_states.position
g.trajectory.points = [
JointTrajectoryPoint(positions=joints_pos, velocities=[0]*6, time_from_start=rospy.Duration(0.0)),
JointTrajectoryPoint(positions=Q1, velocities=[0]*6, time_from_start=rospy.Duration(2.0)),
JointTrajectoryPoint(positions=Q2, velocities=[0]*6, time_from_start=rospy.Duration(3.0)),
JointTrajectoryPoint(positions=Q3, velocities=[0]*6, time_from_start=rospy.Duration(4.0))]
client.send_goal(g)
client.wait_for_result()
except KeyboardInterrupt:
client.cancel_goal()
raise
except:
raise
def move_disordered():
order = [4, 2, 3, 1, 5, 0]
g = FollowJointTrajectoryGoal()
g.trajectory = JointTrajectory()
g.trajectory.joint_names = [JOINT_NAMES[i] for i in order]
q1 = [Q1[i] for i in order]
q2 = [Q2[i] for i in order]
q3 = [Q3[i] for i in order]
try:
joint_states = rospy.wait_for_message("joint_states", JointState)
joints_pos = joint_states.position
g.trajectory.points = [
JointTrajectoryPoint(positions=joints_pos, velocities=[0]*6, time_from_start=rospy.Duration(0.0)),
JointTrajectoryPoint(positions=q1, velocities=[0]*6, time_from_start=rospy.Duration(2.0)),
JointTrajectoryPoint(positions=q2, velocities=[0]*6, time_from_start=rospy.Duration(3.0)),
JointTrajectoryPoint(positions=q3, velocities=[0]*6, time_from_start=rospy.Duration(4.0))]
client.send_goal(g)
client.wait_for_result()
except KeyboardInterrupt:
client.cancel_goal()
raise
except:
raise
def move_repeated():
g = FollowJointTrajectoryGoal()
g.trajectory = JointTrajectory()
g.trajectory.joint_names = JOINT_NAMES
try:
joint_states = rospy.wait_for_message("joint_states", JointState)
joints_pos = joint_states.position
d = 2.0
g.trajectory.points = [JointTrajectoryPoint(positions=joints_pos, velocities=[0]*6, time_from_start=rospy.Duration(0.0))]
for i in range(10):
g.trajectory.points.append(
JointTrajectoryPoint(positions=Q1, velocities=[0]*6, time_from_start=rospy.Duration(d)))
d += 1
g.trajectory.points.append(
JointTrajectoryPoint(positions=Q2, velocities=[0]*6, time_from_start=rospy.Duration(d)))
d += 1
g.trajectory.points.append(
JointTrajectoryPoint(positions=Q3, velocities=[0]*6, time_from_start=rospy.Duration(d)))
d += 2
client.send_goal(g)
client.wait_for_result()
except KeyboardInterrupt:
client.cancel_goal()
raise
except:
raise
def move_interrupt():
g = FollowJointTrajectoryGoal()
g.trajectory = JointTrajectory()
g.trajectory.joint_names = JOINT_NAMES
try:
joint_states = rospy.wait_for_message("joint_states", JointState)
joints_pos = joint_states.position
g.trajectory.points = [
JointTrajectoryPoint(positions=joints_pos, velocities=[0]*6, time_from_start=rospy.Duration(0.0)),
JointTrajectoryPoint(positions=Q1, velocities=[0]*6, time_from_start=rospy.Duration(2.0)),
JointTrajectoryPoint(positions=Q2, velocities=[0]*6, time_from_start=rospy.Duration(3.0)),
JointTrajectoryPoint(positions=Q3, velocities=[0]*6, time_from_start=rospy.Duration(4.0))]
client.send_goal(g)
time.sleep(3.0)
print "Interrupting"
joint_states = rospy.wait_for_message("joint_states", JointState)
joints_pos = joint_states.position
g.trajectory.points = [
JointTrajectoryPoint(positions=joints_pos, velocities=[0]*6, time_from_start=rospy.Duration(0.0)),
JointTrajectoryPoint(positions=Q1, velocities=[0]*6, time_from_start=rospy.Duration(2.0)),
JointTrajectoryPoint(positions=Q2, velocities=[0]*6, time_from_start=rospy.Duration(3.0)),
JointTrajectoryPoint(positions=Q3, velocities=[0]*6, time_from_start=rospy.Duration(4.0))]
client.send_goal(g)
client.wait_for_result()
except KeyboardInterrupt:
client.cancel_goal()
raise
except:
raise
def main():
global client
try:
rospy.init_node("test_move", anonymous=True, disable_signals=True)
client = actionlib.SimpleActionClient('follow_joint_trajectory', FollowJointTrajectoryAction)
print "Waiting for server..."
client.wait_for_server()
print "Connected to server"
parameters = rospy.get_param(None)
index = str(parameters).find('prefix')
if (index > 0):
prefix = str(parameters)[index+len("prefix': '"):(index+len("prefix': '")+str(parameters)[index+len("prefix': '"):-1].find("'"))]
for i, name in enumerate(JOINT_NAMES):
JOINT_NAMES[i] = prefix + name
print "This program makes the robot move between the following three poses:"
print str([Q1[i]*180./pi for i in xrange(0,6)])
print str([Q2[i]*180./pi for i in xrange(0,6)])
print str([Q3[i]*180./pi for i in xrange(0,6)])
print "Please make sure that your robot can move freely between these poses before proceeding!"
inp = raw_input("Continue? y/n: ")[0]
if (inp == 'y'):
#move1()
move_repeated()
#move_disordered()
#move_interrupt()
else:
print "Halting program"
except KeyboardInterrupt:
rospy.signal_shutdown("KeyboardInterrupt")
raise
if __name__ == '__main__': main()
| {
"content_hash": "5565caf4fb77eeaabcd083693d744f58",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 141,
"avg_line_length": 40.62658227848101,
"alnum_prop": 0.628602586072597,
"repo_name": "robotlinker/robotlinker_core",
"id": "a3dd87af538a4b07fcf59fac4f06f8ae4fdba94a",
"size": "6441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/aubo_robot/aubo_new_driver/test/test_move.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11643"
},
{
"name": "C++",
"bytes": "427040"
},
{
"name": "CMake",
"bytes": "71031"
},
{
"name": "CSS",
"bytes": "6420"
},
{
"name": "HTML",
"bytes": "266390"
},
{
"name": "JavaScript",
"bytes": "53686"
},
{
"name": "Python",
"bytes": "1585372"
},
{
"name": "Shell",
"bytes": "311"
}
],
"symlink_target": ""
} |
from collections import Counter
class Solution(object): # bad solution
def minMoves2(self, _nums):
"""
:type nums: List[int]
:rtype: int
"""
if not _nums: return 0
C = Counter(_nums)
nums = C.items()
nums.sort()
prevTarget = nums[0][0] - 1
moves = minMoves = sum((n - prevTarget) * c for n, c in nums)
equalNums = 0; prevNums = 0; postNums = len(_nums)
# print prevTarget, minMoves, prevNums, postNums
for target, count in nums:
# let num be new target, recalculate minMoves
moves = moves + (prevNums + equalNums - postNums) * (target - prevTarget)
minMoves = min(minMoves, moves)
prevTarget, equalNums, prevNums, postNums = target, count, prevNums+equalNums, postNums-count
return minMoves
print Solution().minMoves2([1,2,3]) | {
"content_hash": "b338fd63bf44a9b9bf18e44a4b29a20f",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 96,
"avg_line_length": 28.59259259259259,
"alnum_prop": 0.6761658031088082,
"repo_name": "xiaonanln/myleetcode-python",
"id": "02c0efa4f8ce7278dcc0abe22d4c9efb1e53e755",
"size": "772",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/462. Minimum Moves to Equal Array Elements II.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1956173"
}
],
"symlink_target": ""
} |
config = {
'CONTEXT': 'We are in TestFrontend context',
'IntanceType': 'medium',
'InstanceMin': '2',
'InstanceMax': '6',
'ServiceVersion': 'v3.65'
}
| {
"content_hash": "6facda8da577fb4f5974dea6858562e4",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 48,
"avg_line_length": 25.142857142857142,
"alnum_prop": 0.5568181818181818,
"repo_name": "aljim/deploymentmanager-samples",
"id": "f5f0f954c2b8d2a48b7f16cbe1af17507a203d29",
"size": "176",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "community/hierarchical_configuration/Basic/configs/test/frontend.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "6428"
},
{
"name": "HTML",
"bytes": "106754"
},
{
"name": "JavaScript",
"bytes": "70015"
},
{
"name": "Makefile",
"bytes": "4430"
},
{
"name": "Python",
"bytes": "443622"
},
{
"name": "Shell",
"bytes": "251698"
}
],
"symlink_target": ""
} |
import unittest
import paddle.fluid.core as core
from paddle.fluid.executor import Executor
import paddle.fluid.layers as layers
from paddle.fluid.backward import append_backward
from paddle.fluid.framework import default_main_program, switch_main_program
from paddle.fluid.framework import Program
import numpy as np
class TestShrinkRNNMemoryBase(unittest.TestCase):
def setUp(self):
self.main_program = Program()
switch_main_program(self.main_program)
x = layers.data('x', shape=[100], dtype='float32')
x.stop_gradient = False
rank_table_tensor = layers.data(
'rank_table_tensor', shape=[1], dtype='float32', lod_level=1)
table = layers.lod_rank_table(x=rank_table_tensor)
i = layers.zeros(dtype='int64', shape=[1])
self.mem1 = layers.shrink_memory(x=x, i=i, table=table)
i = layers.increment(x=i)
i.stop_gradient = True
self.mem2 = layers.shrink_memory(x=self.mem1, i=i, table=table)
i = layers.increment(x=i)
i.stop_gradient = True
self.mem3 = layers.shrink_memory(x=self.mem2, i=i, table=table)
mem3_mean = layers.mean(self.mem3)
append_backward(loss=mem3_mean)
self.x_grad = self.main_program.global_block().var('x@GRAD')
def sum_lodtensor(self, tensor):
sum_res = 0.0
for i in xrange(np.product(tensor.get_dims())):
sum_res += tensor.get_float_element(i)
return sum_res
class TestShrinkRNNMemoryReferLoD(TestShrinkRNNMemoryBase):
def test_refer_lod(self):
cpu = core.CPUPlace()
x_tensor = core.LoDTensor()
x_tensor.set_lod([[0, 2, 5, 6]])
tensor_np = np.random.random(size=(6, 100)).astype('float32')
x_tensor.set(tensor_np, cpu)
rank_table_tensor = core.LoDTensor()
rank_table_tensor.set_lod([[0, 1, 3, 6]])
rank_table_tensor.set(np.random.random(size=(6, 1)).astype('float32'),
cpu)
exe = Executor(cpu)
outs = exe.run(
feed={'x': x_tensor,
'rank_table_tensor': rank_table_tensor},
fetch_list=[self.mem1, self.mem2, self.mem3, self.x_grad],
return_numpy=False)
self.assertTrue(np.allclose(tensor_np[0:6], outs[0]))
self.assertTrue(np.allclose(tensor_np[0:5], outs[1]))
self.assertTrue(np.allclose(tensor_np[0:2], outs[2]))
self.assertAlmostEqual(1.0, self.sum_lodtensor(outs[3]), delta=0.01)
class TestShrinkRNNMemoryNoLoD(TestShrinkRNNMemoryBase):
def test_no_lod(self):
cpu = core.CPUPlace()
x_tensor = core.LoDTensor()
tensor_np = np.random.random(size=(3, 100)).astype('float32')
x_tensor.set(tensor_np, cpu)
rank_table_tensor = core.LoDTensor()
rank_table_tensor.set_lod([[0, 1, 3, 6]])
rank_table_tensor.set(np.random.random(size=(6, 1)).astype('float32'),
cpu)
exe = Executor(cpu)
outs = exe.run(
feed={'x': x_tensor,
'rank_table_tensor': rank_table_tensor},
fetch_list=[self.mem1, self.mem2, self.mem3, self.x_grad],
return_numpy=False)
self.assertTrue(np.allclose(tensor_np[0:3], outs[0]))
self.assertTrue(np.allclose(tensor_np[0:2], outs[1]))
self.assertTrue(np.allclose(tensor_np[0:1], outs[2]))
self.assertAlmostEqual(1.0, self.sum_lodtensor(outs[3]), delta=0.01)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "b4e25013d29d54c3d5d5ef94ee3ad553",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 78,
"avg_line_length": 39.640449438202246,
"alnum_prop": 0.608843537414966,
"repo_name": "putcn/Paddle",
"id": "1d93230e7b74c5b6c00bbe125e3ae2d3a649b4b9",
"size": "4141",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "272910"
},
{
"name": "C++",
"bytes": "7598375"
},
{
"name": "CMake",
"bytes": "269313"
},
{
"name": "Cuda",
"bytes": "1078779"
},
{
"name": "Go",
"bytes": "109501"
},
{
"name": "Perl",
"bytes": "11456"
},
{
"name": "Python",
"bytes": "3637137"
},
{
"name": "Shell",
"bytes": "157071"
}
],
"symlink_target": ""
} |
import unittest
from parameterized import parameterized
from conans.test.utils.tools import TestClient
conanfile = """from conans import ConanFile
class Pkg(ConanFile):
def requirements(self):
if self.develop:
self.output.info("Develop requirements!")
def source(self):
if self.develop:
self.output.info("Develop source!")
def build(self):
if self.develop:
self.output.info("Develop build!")
def package(self):
if self.develop:
self.output.info("Develop package!")
def package_info(self):
if self.develop:
self.output.info("Develop package_info!")
def package_id(self):
if self.develop:
self.output.info("Develop package_id!")
"""
class DevelopTest(unittest.TestCase):
@parameterized.expand([(True, ), (False, )])
def develop_test(self, with_test):
client = TestClient()
if with_test:
client.save({"conanfile.py": conanfile})
else:
test_conanfile = """from conans import ConanFile
class MyPkg(ConanFile):
def test(self):
pass
"""
client.save({"conanfile.py": conanfile,
"test_package/conanfile.py": test_conanfile})
client.run("create . Pkg/0.1@user/testing")
self.assertIn("Develop requirements!", client.out)
self.assertIn("Develop source!", client.out)
self.assertIn("Develop build!", client.out)
self.assertIn("Develop package!", client.out)
self.assertIn("Develop package_info!", client.out)
self.assertIn("Develop package_id!", client.out)
client.run("install Pkg/0.1@user/testing --build")
self.assertNotIn("Develop", client.out)
consumer = """from conans import ConanFile
class Pkg(ConanFile):
requires = "Pkg/0.1@user/testing"
"""
client.save({"conanfile.py": consumer})
client.run("create . Other/1.0@user/testing")
self.assertNotIn("Develop", client.out)
def local_commands_test(self):
client = TestClient()
client.save({"conanfile.py": conanfile})
client.run("install .")
self.assertIn("Develop requirements!", client.out)
self.assertNotIn("Develop source!", client.out)
self.assertNotIn("Develop build!", client.out)
self.assertNotIn("Develop package!", client.out)
self.assertNotIn("Develop package_info!", client.out)
self.assertIn("Develop package_id!", client.out)
client.run("source .")
self.assertNotIn("Develop requirements!", client.out)
self.assertIn("Develop source!", client.out)
self.assertNotIn("Develop build!", client.out)
self.assertNotIn("Develop package!", client.out)
self.assertNotIn("Develop package_info!", client.out)
self.assertNotIn("Develop package_id!", client.out)
client.run("build .")
self.assertNotIn("Develop requirements!", client.out)
self.assertNotIn("Develop source!", client.out)
self.assertIn("Develop build!", client.out)
self.assertNotIn("Develop package!", client.out)
self.assertNotIn("Develop package_info!", client.out)
self.assertNotIn("Develop package_id!", client.out)
client.run("package .")
self.assertNotIn("Develop requirements!", client.out)
self.assertNotIn("Develop source!", client.out)
self.assertNotIn("Develop build!", client.out)
self.assertIn("Develop package!", client.out)
self.assertNotIn("Develop package_info!", client.out)
self.assertNotIn("Develop package_id!", client.out)
| {
"content_hash": "e5b31842fa78e30342a6f7ed9f55a330",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 70,
"avg_line_length": 37.8041237113402,
"alnum_prop": 0.6310335424052359,
"repo_name": "memsharded/conan",
"id": "a0c893a6eb78c228a3fa974197b72d877ae6307c",
"size": "3667",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "conans/test/functional/old/develop_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1100"
},
{
"name": "C",
"bytes": "264"
},
{
"name": "C++",
"bytes": "425"
},
{
"name": "CMake",
"bytes": "447"
},
{
"name": "Groovy",
"bytes": "12586"
},
{
"name": "Python",
"bytes": "4334185"
},
{
"name": "Shell",
"bytes": "1864"
}
],
"symlink_target": ""
} |
import bluebox, fec
import config, parser, tracker, ircreporter
import threading
import argparse
import time
import binascii
from datetime import datetime
class bb_frontend(threading.Thread):
LOGFILE = "log.txt"
def __init__(self, qth=None, config_file=None, enable_tracking=False, enable_auth=False):
c = config.Config(config_file)
self.qth = qth
self.config = c.get_config()
self.center_freq = self.config['radio_settings']['frequency']
self.enable_tracking = enable_tracking
self.bb_lock = threading.Lock()
with self.bb_lock:
self.bluebox = bluebox.Bluebox()
self.parser = parser.Parser()
if self.enable_tracking:
self.tle = '\n'.join(self.config['tle'])
self.tracker = tracker.Tracker(self.qth, self.tle, self.center_freq)
self.enable_auth = enable_auth
if self.enable_auth:
self.irc_reporter = ircreporter.IRCReporter()
self.update(self.config)
if not config_file:
c.add_observer(self)
def update(self, config):
settings = config['radio_settings']
self.center_freq = settings['frequency']
self.tracker.set_center_frequncy(self.center_freq)
with self.bb_lock:
self.bluebox.set_frequency(self.center_freq)
time.sleep(0.01)
self.bluebox.set_modindex(settings['modindex'])
time.sleep(0.01)
self.bluebox.set_bitrate(settings['bitrate'])
time.sleep(0.01)
self.bluebox.set_power(settings['power'])
time.sleep(0.01)
self.bluebox.set_training(settings['training'])
time.sleep(0.01)
self.bluebox.set_syncword(int(settings['syncword'], 16))
time.sleep(0.01)
def run(self):
if self.enable_tracking:
self.__run_pass__()
else:
self.__receive_mode__()
def receive(self):
with self.bb_lock:
data, rssi, freq = self.bluebox.receive(5000)
if data:
print("\n" + "#="*40 + "#\n")
print("Received packet {}".format(datetime.now().isoformat(' ')))
if self.enable_auth:
hex_str = binascii.b2a_hex(data)
self.irc_reporter.send("VERIFY,1,%s" % hex_str[0:len(hex_str)/2])
self.irc_reporter.send("VERIFY,2,%s" % hex_str[len(hex_str)/2:])
# Parse data
try:
beacon_str = self.parser.parse_data(data)
except Exception as e:
print e
else:
return beacon_str
def __run_pass__(self):
while True:
while self.tracker.in_range():
freq = self.config['radio_settings']['frequency'] + self.tracker.get_doppler()
print "Doppler freq:", freq
self.bluebox.set_frequency(freq)
beacon = self.receive()
if beacon:
print beacon
next_pass, duration, max_elvation = self.tracker.next_pass()
print """Next pass: {0}
Duration: {1:.0f} minutes
Max elevation: {2:.2f} degrees""".format(datetime.fromtimestamp(next_pass), duration / 60, max_elvation)
time.sleep(next_pass-time.time())
def __receive_mode__(self):
while True:
beacon = self.receive()
if beacon:
print beacon
if __name__ == '__main__':
args_parser = argparse.ArgumentParser(description='AAUSAT4 Bluebox based Beacon Parser')
args_parser.add_argument('--lat', dest='lat', required=False, type=float, default=None,
help='Latitude of ground station (N), e.g. 55.6167')
args_parser.add_argument('--lon', dest='lon', required=False, type=float, default=None,
help='Longitude of ground station (W), e.g. -12.6500')
args_parser.add_argument('--alt', dest='alt', required=False, type=float, default=None,
help='Altitude of ground station (meters), e.g. 10')
args_parser.add_argument('--disable-tracking', dest='disable_tracking',
action='store_true',
required=False, default=False,
help='Disables doppler correction and pass planning.')
args_parser.add_argument('--enable-verification', dest='enable_verification',
action='store_true',
required=False,
help='Enables automatic reporting of received packets.')
args_parser.add_argument('--config-file', dest='config_file', required=False, type=str, default=None,
help='Use a confguration file from the local disk instead of the one provided by AAUSAT (on github).')
args = args_parser.parse_args()
if args.disable_tracking:
qth = None
else:
qth = (args.lat, args.lon, args.alt)
if None in qth:
raise Exception("latitude longitude and altitude arguments are required for tracking")
bb = bb_frontend(qth, args.config_file, not args.disable_tracking, args.enable_verification)
bb.run()
| {
"content_hash": "d6154eb464ef85787451db702959adf7",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 131,
"avg_line_length": 37.50349650349651,
"alnum_prop": 0.5578966996084281,
"repo_name": "aausat/aausat4_beacon_parser",
"id": "d6686d8c78e9388271dd0319075c1907480cbcf4",
"size": "5363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bb_frontend.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18390"
}
],
"symlink_target": ""
} |
from redlock import RedLock
import time
def test_default_connection_details_value():
"""
Test that RedLock instance could be created with
default value of `connection_details` argument.
"""
lock = RedLock("test_simple_lock")
def test_simple_lock():
"""
Test a RedLock can be acquired.
"""
lock = RedLock("test_simple_lock", [{"host": "localhost"}], ttl=1000)
locked = lock.acquire()
lock.release()
assert locked == True
def test_context_manager():
"""
Test a RedLock can be released by the context manager automically.
"""
with RedLock("test_context_manager", [{"host": "localhost"}], ttl=1000):
lock = RedLock("test_context_manager", [{"host": "localhost"}], ttl=1000)
locked = lock.acquire()
assert locked == False
lock = RedLock("test_context_manager", [{"host": "localhost"}], ttl=1000)
locked = lock.acquire()
assert locked == True
lock.release()
def test_fail_to_lock_acquired():
lock1 = RedLock("test_fail_to_lock_acquired", [{"host": "localhost"}], ttl=1000)
lock2 = RedLock("test_fail_to_lock_acquired", [{"host": "localhost"}], ttl=1000)
lock1_locked = lock1.acquire()
lock2_locked = lock2.acquire()
lock1.release()
assert lock1_locked == True
assert lock2_locked == False
def test_lock_expire():
lock1 = RedLock("test_lock_expire", [{"host": "localhost"}], ttl=500)
lock1.acquire()
time.sleep(1)
# Now lock1 has expired, we can accquire a lock
lock2 = RedLock("test_lock_expire", [{"host": "localhost"}], ttl=1000)
locked = lock2.acquire()
assert locked == True
lock1.release()
lock3 = RedLock("test_lock_expire", [{"host": "localhost"}], ttl=1000)
locked = lock3.acquire()
assert locked == False
| {
"content_hash": "17f04fa49088d48625ca17af397ff4e9",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 84,
"avg_line_length": 27.70769230769231,
"alnum_prop": 0.6296501943364797,
"repo_name": "mjschultz/redlock",
"id": "5115498b4902f84ef73d8cbed060868774e88a78",
"size": "1801",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_lock.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1658"
},
{
"name": "Python",
"bytes": "9470"
}
],
"symlink_target": ""
} |
"""Rofication indicator
https://github.com/DaveDavenport/Rofication
simple module to show an icon + the number of notifications stored in rofication
module will have normal highlighting if there are zero notifications,
"warning" highlighting if there are nonzero notifications,
"critical" highlighting if there are any critical notifications
Parameters:
* rofication.regolith: Switch to regolith fork of rofication, see <https://github.com/regolith-linux/regolith-rofication>.
"""
import core.module
import core.widget
import core.decorators
import sys
import socket
class Module(core.module.Module):
@core.decorators.every(seconds=5)
def __init__(self, config, theme):
super().__init__(config, theme, core.widget.Widget(self.full_text))
self.__critical = False
self.__numnotifications = 0
self.__regolith = self.parameter("regolith", False)
def full_text(self, widgets):
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as client:
client.connect("/tmp/rofi_notification_daemon")
# below code will fetch two numbers in a list, e.g. ['22', '1']
# first is total number of notifications, second is number of critical notifications
if self.__regolith:
client.sendall(bytes("num\n", "utf-8"))
else:
client.sendall(bytes("num", "utf-8"))
val = client.recv(512)
val = val.decode("utf-8")
if self.__regolith:
l = val.split(',',2)
else:
l = val.split('\n',2)
self.__numnotifications = int(l[0])
self.__critical = bool(int(l[1]))
return self.__numnotifications
def state(self, widget):
# rofication doesn't really support the idea of seen vs unseen notifications
# marking a message as "seen" actually just sets its urgency to normal
# so, doing highlighting if any notifications are present
if self.__critical:
return ["critical"]
elif self.__numnotifications:
return ["warning"]
return []
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| {
"content_hash": "3d352fed3fbacf0c380ad5925a6b6e3a",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 126,
"avg_line_length": 38.13559322033898,
"alnum_prop": 0.6204444444444445,
"repo_name": "tobi-wan-kenobi/bumblebee-status",
"id": "1fd53553beb1ba7ab0b62888223da6e979be50d1",
"size": "2250",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "bumblebee_status/modules/contrib/rofication.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "149"
},
{
"name": "Python",
"bytes": "629537"
},
{
"name": "Shell",
"bytes": "2431"
}
],
"symlink_target": ""
} |
import sys
def createReasonsForCheckFailure(clauses):
output=[]
for clause in clauses:
output.append(0)
for l in clause:
output.append(l)
if len(output) == 0:
output.append(0)
output.append(0)
return output
def createWeakConstraints(weak):
output=[]
for w in weak:
output.append(0)
for l in w:
output.append(l)
if len(output) == 0:
output.append(0)
output.append(0)
return output
def createWeights(weights):
if sys.version_info >= (3,0):
return weights
else:
return [long(w) for w in weights]
def fromNogood(conj):
clause=[]
for l in conj:
clause.append(-l)
return clause
#modeling lit -> l1 ^ ... ^ ln (for l1,...,ln in conj)
#return a list of clauses c1 ^ ... ^ cn, where ci = -lit v li (for i = 1,...,n)
def fromLitImplConj(lit, conj):
clauses=[]
for l in conj:
clause=[]
clause.append(-lit)
clause.append(l)
clauses.append(clause)
return clauses
#modeling lit -> l1 v ... v ln (for l1,...,ln in disj)
#return a clause -lit v l1 v ... v ln
def fromLitImplDisj(lit, disj):
clause=[]
clause.append(-lit)
for l in disj:
clause.append(l)
return clause
#modeling l1 ^ ... ^ ln -> lit (for l1,...,ln in conj)
#return a clause -l1 v ... v -ln v lit (for l1,...,ln in conj)
def fromConjImplLit(lit, conj):
clause=[]
clause.append(lit)
for l in conj:
clause.append(-l)
return clause
#modeling l1 v ... v ln -> lit (for l1,...,ln in disj)
#return a list of clauses c1 ^ ... ^ cn, where ci = lit v -li (for i = 1,...,n)
def fromDisjImplLit(lit, disj):
clauses=[]
for l in disj:
clause=[]
clause.append(lit)
clause.append(-l)
clauses.append(clause)
return clauses
def incoherent():
return 0
def coherent():
return 1
def getTerms(predicateName,atomName):
countO = 0
countC = 0
size=len(predicateName)+1
atomName=atomName[:-1][size:] #remove predicateName()
elements=[]
mystring=""
for i in range(0,len(atomName)):
mystring+=atomName[i]
if atomName[i]=="(":
countO+=1;
elif atomName[i]==")":
countC+=1;
elif atomName[i]=="," and countO==countC:
elements.append(mystring[:-1])
mystring=""
if (i+1) == len(atomName):
elements.append(mystring)
mystring=""
return elements
def initFallback(elements):
output = []
for i in elements:
(a,b) = i
output.append(b)
output.append(a)
return output
def factorFallback(elements):
return initFallback(elements)
def choice(l):
return [l, 0, 0]
def restart():
return [1, 0]
def fallback(n):
return [n, 2, 0]
def unroll(v):
return [v, 3, 0]
FALSE = 0
TRUE = 1
UNDEFINED = 2
ELIMINATED = 3
def isTrue(lit, waspInterpretation):
if lit > 0:
return waspInterpretation["%s" % lit] == TRUE
else:
return waspInterpretation["%s" % -lit] == FALSE
def isFalse(lit, waspInterpretation):
if lit > 0:
return waspInterpretation["%s" % lit] == FALSE
else:
return waspInterpretation["%s" % -lit] == TRUE
def isUndefined(lit, waspInterpretation):
return waspInterpretation["%s" % abs(lit)] == UNDEFINED
def isEliminated(lit, waspInterpretation):
return waspInterpretation["%s" % abs(lit)] == ELIMINATED | {
"content_hash": "b2af9e49933b8be003c58ad116bf87d4",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 79,
"avg_line_length": 23.626666666666665,
"alnum_prop": 0.5725169300225733,
"repo_name": "alviano/wasp",
"id": "a692c11fc865285df4fac0908aed190dee88f484",
"size": "3544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python_libraries/wasp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1069357"
},
{
"name": "Classic ASP",
"bytes": "2262412"
},
{
"name": "Makefile",
"bytes": "12472"
},
{
"name": "Python",
"bytes": "185935587"
},
{
"name": "Shell",
"bytes": "847"
}
],
"symlink_target": ""
} |
import time
import threading
import apigen
# automatically added verison command will use module version if present
# rpc exceptions will also include module version if persent
__version__ = "1.0.0"
class Calculator(apigen.Definition): # Programm name taken from class name.
"""Example Programm""" # Programm help text taken from class doc string.
@apigen.command()
def add(self, a, b): # Command name and args taken from method.
"""adds two items""" # Help text taken from method doc string.
return a + b # Returned rpc/cli output (must be JSON serializable).
if __name__ == "__main__":
instance = None
thread = None
try:
instance = Calculator()
thread = threading.Thread(target=instance.startserver,
kwargs={"handle_sigint": False})
thread.start()
while True:
time.sleep(1)
finally:
if instance is not None and thread is not None:
instance.stopserver()
thread.join()
| {
"content_hash": "c029cb021385bd6075f56be39d894ef3",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 77,
"avg_line_length": 31.363636363636363,
"alnum_prop": 0.6270531400966184,
"repo_name": "F483/apigen",
"id": "9daefe66aba77f23c208cf49afd2b6f8c4db125b",
"size": "1172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/threaded.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2163"
},
{
"name": "Python",
"bytes": "10413"
}
],
"symlink_target": ""
} |
import numpy as np
import pandas as pd
def read_matrix(file_handle):
"""
(matrix, tokenlist, category) = read_matrix(file_handle)
Reads the file handle pointed to by 'file_handle', which is of the format of
MATRIX.TEST, and returns a 3-tuple. The first part is 'sp_matrix',
an m-by-n sparse matrix, where m is the number of training/testing
examples and n is the dimension, and each row of sp_matrix consists
of counts of word appearances. (So sp_matrix[i, j] is the number of
times word j appears in document i.)
tokenlist is a list of the words, where tokenlist[1] is the first
word in the dictionary and tokenlist[end] is the last.
category is a {0, 1}-valued vector of positive and negative
examples. Before using in SVM code, you should transform categories
to have signs +/-1.
"""
it = iter(file_handle)
headerline = next(it)
row_col_line = next(it)
row_col_split = row_col_line.rstrip().split()
num_rows, num_cols = map(int, row_col_split)
tokenlist = next(it)
tokenlist = tokenlist.rstrip().split()
# Now to read the matrix into the matrix. Each row represents a
# document (mail), each column represents a distinct token. As the
# data isn't actually that big, we just use a full matrix to save
# time.
full_mat = np.zeros((num_rows, num_cols), dtype=int)
categories = np.zeros((num_rows, 1))
for i in range(num_rows):
str_inds = next(it)
str_split = str_inds.rstrip().split()
categories[i] = int(str_split[0])
offset = 0
for j in range(1, len(str_split)-1, 2):
offset = int(str_split[j]) + offset
count = int(str_split[j + 1])
full_mat[i, offset] = count
return (full_mat, tokenlist, categories)
def read_data(filename):
with open(filename) as handle:
matrix, tokenlist, categories = read_matrix(handle)
matrix = np.concatenate((categories, matrix), axis=1)
tokenlist = ['SPAM'] + tokenlist
df = pd.DataFrame(matrix, columns=tokenlist, dtype=int)
return df
| {
"content_hash": "0531bb2b08284bd051940dd7541917a2",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 80,
"avg_line_length": 36.58620689655172,
"alnum_prop": 0.644674835061263,
"repo_name": "stallmanifold/cs229-machine-learning-stanford-fall-2016",
"id": "602029b86a7f3c55723e179760b6374756c047f2",
"size": "2122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/homework2/q3/read_matrix.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Julia",
"bytes": "2054"
},
{
"name": "Jupyter Notebook",
"bytes": "794523"
},
{
"name": "M",
"bytes": "1320"
},
{
"name": "Matlab",
"bytes": "36436"
},
{
"name": "Python",
"bytes": "49104"
}
],
"symlink_target": ""
} |
import yaml
class Config(object):
def __init__(self):
with open("../config.yml", 'r') as stream:
try:
self.globals = yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
| {
"content_hash": "f1e7d4a06eac883891fd39630d9faf19",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 50,
"avg_line_length": 24.6,
"alnum_prop": 0.5040650406504065,
"repo_name": "innusource/siteg.py",
"id": "1e8a38f7528c3931c12c2f67fef9c0c263b94fe4",
"size": "246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_app/definitions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "24176"
},
{
"name": "Python",
"bytes": "9069"
}
],
"symlink_target": ""
} |
import os.path
from django.core.urlresolvers import reverse
from django.template.defaultfilters import slugify
from django.test import TestCase
from django.test.client import Client
from radpress.compat import User
from radpress.models import Article, Page, Tag
from radpress.readers import get_reader
from radpress.settings import CONTEXT_DATA, MORE_TAG
class RadpressTestCase(TestCase):
fixtures = [os.path.join(os.path.dirname(__file__), 'data.json')]
def setUp(self):
self.client = Client()
# define article
self.article1 = Article.objects.get(pk=1)
# define user
self.user1 = User.objects.get(username='gokmen')
self.user1.set_password('secret')
self.user1.save()
# define second user password
self.user2 = User.objects.get(username='defne')
self.user2.set_password('secret')
self.user2.save()
class RadpressReaderTestCase(RadpressTestCase):
markup = None
file_path = None
def setUp(self):
# default markup name is reStructuredText
self.reader = get_reader(markup=self.markup)
if self.file_path is not None:
# default content_body, metada
file_path = os.path.join(os.path.dirname(__file__), self.file_path)
content = file(file_path).read()
self.content_body, self.metadata = self.reader(content).read()
def test_check_metadata(self):
self.assertEqual(self.metadata['image'], '1')
self.assertTrue(self.metadata['published'])
self.assertEqual(self.metadata['slug'], 'samuel-l-ipsum')
self.assertEqual(self.metadata['title'], 'Samuel L. Ipsum')
for tag in ['ipsum', 'samuel', 'lorem']:
self.assertIn(tag, self.metadata['tags'])
def test_contents(self):
for article in Article.objects.filter(markup=self.markup):
content_body, metadata = self.reader(article.content).read()
self.assertEqual(article.content_body, content_body)
def test_more_tag(self):
self.assertIn(MORE_TAG, self.content_body)
class BaseTest(RadpressTestCase):
def test_all_published_articles(self):
# check published article count
self.assertEqual(Article.objects.all_published().count(), 1)
# check published page count
self.assertEqual(Page.objects.all_published().count(), 2)
def test_open_private_and_public_article_details(self):
for article in Article.objects.all():
status_code = 200 if article.is_published else 404
response = self.client.get(article.get_absolute_url())
self.assertEqual(response.status_code, status_code)
def test_preview_page(self):
# try to get response with GET method
response = self.client.get(reverse('radpress-preview'))
expected_status_code = 302 # because, login required
self.assertEqual(response.status_code, expected_status_code)
self.client.login(username='gokmen', password='secret')
response = self.client.get(reverse('radpress-preview'))
expected_status_code = 405 # because, view only allows `post` method
self.assertEqual(response.status_code, expected_status_code)
def test_slugs(self):
for article in Article.objects.all():
slug = slugify(article.slug)
self.assertEqual(article.slug, slug)
def test_tags(self):
# checks tag count from fixture
self.assertEqual(Tag.objects.count(), 2)
# create new tag and check slug
tag_name = 'how I met your mother'
tag = Tag.objects.create(name=tag_name)
self.assertEqual(tag.slug, slugify(tag_name))
# add tag to a published article and check count of tags
self.article1.articletag_set.create(tag=tag)
self.assertEqual(self.article1.tags.count(), 1)
# try to filter articles for tags
articles = Article.objects.filter(tags__name=tag_name)
self.assertEqual(articles.count(), 1)
def test_access_not_published_article(self):
"""
If user is not authenticated, user can not access not published
articles and pages.
"""
article = Article.objects.get(slug='i-have-a-dream')
page = Page.objects.get(slug='page-3-not-published')
def get_responses():
response_article = self.client.get(
reverse('radpress-article-detail', args=[article.slug]))
response_page = self.client.get(
reverse('radpress-page-detail', args=[page.slug]))
return response_article, response_page
# if user is not authenticated to site:
response_article, response_page = get_responses()
self.assertEqual(response_article.status_code, 404)
self.assertEqual(response_page.status_code, 404)
# if user is not superuser and not author of the entries:
self.client.login(username=self.user2.username, password='secret')
self.assertFalse(self.user2.is_superuser)
response_article, response_page = get_responses()
self.assertEqual(response_article.status_code, 404)
self.assertEqual(response_page.status_code, 404)
# if user is superuser but not the author of entries:
self.user2.is_superuser = True
self.user2.save()
self.assertTrue(self.user2.is_superuser)
response_article, response_page = get_responses()
self.assertEqual(response_article.status_code, 200)
self.assertEqual(response_page.status_code, 200)
# if user is not superuser but the author of entries:
article.author = self.user2
article.save()
self.user2.is_superuser = False
self.user2.save()
self.assertFalse(self.user2.is_superuser)
response_article, response_page = get_responses()
self.assertEqual(response_article.status_code, 200)
self.assertEqual(response_page.status_code, 404)
def test_context_data(self):
"""
Important! All context data keys should be start with `RADPRESS_`
prefix and uppercase.
"""
for context in CONTEXT_DATA.keys():
self.assertTrue(context.startswith('RADPRESS_'))
self.assertEqual(context, context.upper())
class RestructuredtextTest(RadpressReaderTestCase):
markup = 'restructuredtext'
file_path = 'test_content.rst'
def test_pygmentize(self):
self.assertIn('<table class="highlighttable">', self.content_body)
self.assertIn('<td class="linenos">', self.content_body)
| {
"content_hash": "396e9a4daba5db69694c697d76645217",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 79,
"avg_line_length": 37.3728813559322,
"alnum_prop": 0.654119425547997,
"repo_name": "ifearcompilererrors/fle_redesign",
"id": "71613c7108f55f6772c439c02f95fb8ad88d2ab9",
"size": "6615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fle_redesign/apps/radpress/tests/base.py",
"mode": "33261",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import fiona
import pytest
import struct
from collections import OrderedDict
from .conftest import requires_gpkg
@requires_gpkg
def test_binary_field(tmpdir):
meta = {
"driver": "GPKG",
"schema": {
"geometry": "Point",
"properties": OrderedDict([
("name", "str"),
("data", "bytes"),
])
}
}
# create some binary data
input_data = struct.pack("256B", *range(256))
# write the binary data to a BLOB field
filename = str(tmpdir.join("binary_test.gpkg"))
with fiona.open(filename, "w", **meta) as dst:
feature = {
"geometry": {"type": "Point", "coordinates": ((0, 0))},
"properties": {
"name": "test",
u"data": input_data,
}
}
dst.write(feature)
# read the data back and check consistency
with fiona.open(filename, "r") as src:
feature = next(iter(src))
assert feature["properties"]["name"] == "test"
output_data = feature["properties"]["data"]
assert output_data == input_data
| {
"content_hash": "f29b800e1f486a8f2a1451aaf0af0ca7",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 67,
"avg_line_length": 26.928571428571427,
"alnum_prop": 0.5305039787798409,
"repo_name": "rbuffat/Fiona",
"id": "d35868fba0454ef713dcab37beb5f6bca84b9315",
"size": "1131",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_binary_field.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "536189"
},
{
"name": "Shell",
"bytes": "4951"
}
],
"symlink_target": ""
} |
from django.core.urlresolvers import reverse # noqa
from django import http
from django.utils.html import escape # noqa
from horizon.workflows import views
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.project.networks import workflows
INDEX_URL = reverse('horizon:project:networks:index')
def form_data_subnet(subnet,
name=None, cidr=None, ip_version=None,
gateway_ip='', enable_dhcp=None,
allocation_pools=None,
dns_nameservers=None,
host_routes=None):
def get_value(value, default):
return default if value is None else value
data = {}
data['subnet_name'] = get_value(name, subnet.name)
data['cidr'] = get_value(cidr, subnet.cidr)
data['ip_version'] = get_value(ip_version, subnet.ip_version)
gateway_ip = subnet.gateway_ip if gateway_ip == '' else gateway_ip
data['gateway_ip'] = gateway_ip or ''
data['no_gateway'] = (gateway_ip is None)
data['enable_dhcp'] = get_value(enable_dhcp, subnet.enable_dhcp)
pools = get_value(allocation_pools, subnet.allocation_pools)
data['allocation_pools'] = _str_allocation_pools(pools)
nameservers = get_value(dns_nameservers, subnet.dns_nameservers)
data['dns_nameservers'] = _str_dns_nameservers(nameservers)
routes = get_value(host_routes, subnet.host_routes)
data['host_routes'] = _str_host_routes(routes)
return data
def form_data_no_subnet():
return {'subnet_name': '',
'cidr': '',
'ip_version': 4,
'gateway_ip': '',
'no_gateway': False,
'enable_dhcp': True,
'allocation_pools': '',
'dns_nameservers': '',
'host_routes': ''}
def _str_allocation_pools(allocation_pools):
if isinstance(allocation_pools, str):
return allocation_pools
return '\n'.join(['%s,%s' % (pool['start'], pool['end'])
for pool in allocation_pools])
def _str_dns_nameservers(dns_nameservers):
if isinstance(dns_nameservers, str):
return dns_nameservers
return '\n'.join(dns_nameservers)
def _str_host_routes(host_routes):
if isinstance(host_routes, str):
return host_routes
return '\n'.join(['%s,%s' % (route['destination'], route['nexthop'])
for route in host_routes])
class NetworkTests(test.TestCase):
@test.create_stubs({api.neutron: ('network_list',)})
def test_index(self):
api.neutron.network_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).AndReturn(self.networks.list())
api.neutron.network_list(
IsA(http.HttpRequest),
shared=True).AndReturn([])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/networks/index.html')
networks = res.context['networks_table'].data
self.assertItemsEqual(networks, self.networks.list())
@test.create_stubs({api.neutron: ('network_list',)})
def test_index_network_list_exception(self):
api.neutron.network_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/networks/index.html')
self.assertEqual(len(res.context['networks_table'].data), 0)
self.assertMessageCount(res, error=1)
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',)})
def test_network_detail(self):
network_id = self.networks.first().id
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:detail',
args=[network_id]))
self.assertTemplateUsed(res, 'project/networks/detail.html')
subnets = res.context['subnets_table'].data
ports = res.context['ports_table'].data
self.assertItemsEqual(subnets, [self.subnets.first()])
self.assertItemsEqual(ports, [self.ports.first()])
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',)})
def test_network_detail_network_exception(self):
network_id = self.networks.first().id
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:detail', args=[network_id])
res = self.client.get(url)
redir_url = INDEX_URL
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',)})
def test_network_detail_subnet_exception(self):
network_id = self.networks.first().id
api.neutron.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id).\
AndRaise(self.exceptions.neutron)
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id).\
AndReturn([self.ports.first()])
# Called from SubnetTable
api.neutron.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:detail',
args=[network_id]))
self.assertTemplateUsed(res, 'project/networks/detail.html')
subnets = res.context['subnets_table'].data
ports = res.context['ports_table'].data
self.assertEqual(len(subnets), 0)
self.assertItemsEqual(ports, [self.ports.first()])
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',)})
def test_network_detail_port_exception(self):
network_id = self.networks.first().id
api.neutron.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id).\
AndReturn([self.subnets.first()])
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id).\
AndRaise(self.exceptions.neutron)
# Called from SubnetTable
api.neutron.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:detail',
args=[network_id]))
self.assertTemplateUsed(res, 'project/networks/detail.html')
subnets = res.context['subnets_table'].data
ports = res.context['ports_table'].data
self.assertItemsEqual(subnets, [self.subnets.first()])
self.assertEqual(len(ports), 0)
@test.create_stubs({api.neutron: ('profile_list',)})
def test_network_create_get(self):
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
net_profiles = self.net_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:create')
res = self.client.get(url)
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.CreateNetwork.name)
expected_objs = ['<CreateNetworkInfo: createnetworkinfoaction>',
'<CreateSubnetInfo: createsubnetinfoaction>',
'<CreateSubnetDetail: createsubnetdetailaction>']
self.assertQuerysetEqual(workflow.steps, expected_objs)
@test.create_stubs({api.neutron: ('network_create',
'profile_list',)})
def test_network_create_post(self):
network = self.networks.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndReturn(network)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
# subnet
'with_subnet': False}
if api.neutron.is_port_profiles_supported():
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_no_subnet())
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_create',
'subnet_create',
'profile_list',)})
def test_network_create_post_with_subnet(self):
network = self.networks.first()
subnet = self.subnets.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndReturn(network)
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if api.neutron.is_port_profiles_supported():
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_create',
'profile_list',)})
def test_network_create_post_network_exception(self):
network = self.networks.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
# subnet
'with_subnet': False}
if api.neutron.is_port_profiles_supported():
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_no_subnet())
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_create',
'profile_list')})
def test_network_create_post_with_subnet_network_exception(self):
network = self.networks.first()
subnet = self.subnets.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if api.neutron.is_port_profiles_supported():
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_create',
'network_delete',
'subnet_create',
'profile_list')})
def test_network_create_post_with_subnet_subnet_exception(self):
network = self.networks.first()
subnet = self.subnets.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndReturn(network)
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp)\
.AndRaise(self.exceptions.neutron)
api.neutron.network_delete(IsA(http.HttpRequest),
network.id)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if api.neutron.is_port_profiles_supported():
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('profile_list',)})
def test_network_create_post_with_subnet_nocidr(self):
network = self.networks.first()
subnet = self.subnets.first()
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if api.neutron.is_port_profiles_supported():
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, cidr='',
allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertContains(res, escape('Specify "Network Address" or '
'clear "Create Subnet" checkbox.'))
@test.create_stubs({api.neutron: ('profile_list',)})
def test_network_create_post_with_subnet_cidr_without_mask(self):
network = self.networks.first()
subnet = self.subnets.first()
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if api.neutron.is_port_profiles_supported():
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, cidr='10.0.0.0',
allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
expected_msg = "The subnet in the Network Address is too small (/32)."
self.assertContains(res, expected_msg)
@test.create_stubs({api.neutron: ('profile_list',)})
def test_network_create_post_with_subnet_cidr_inconsistent(self):
network = self.networks.first()
subnet = self.subnets.first()
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
self.mox.ReplayAll()
# dummy IPv6 address
cidr = '2001:0DB8:0:CD30:123:4567:89AB:CDEF/60'
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if api.neutron.is_port_profiles_supported():
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, cidr=cidr,
allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
expected_msg = 'Network Address and IP version are inconsistent.'
self.assertContains(res, expected_msg)
@test.create_stubs({api.neutron: ('profile_list',)})
def test_network_create_post_with_subnet_gw_inconsistent(self):
network = self.networks.first()
subnet = self.subnets.first()
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
self.mox.ReplayAll()
# dummy IPv6 address
gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF'
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if api.neutron.is_port_profiles_supported():
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, gateway_ip=gateway_ip,
allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertContains(res, 'Gateway IP and IP version are inconsistent.')
@test.create_stubs({api.neutron: ('network_get',)})
def test_network_update_get(self):
network = self.networks.first()
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:update', args=[network.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/networks/update.html')
@test.create_stubs({api.neutron: ('network_get',)})
def test_network_update_get_exception(self):
network = self.networks.first()
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:update', args=[network.id])
res = self.client.get(url)
redir_url = INDEX_URL
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_update',
'network_get',)})
def test_network_update_post(self):
network = self.networks.first()
api.neutron.network_update(IsA(http.HttpRequest), network.id,
name=network.name,
admin_state_up=network.admin_state_up)\
.AndReturn(network)
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
self.mox.ReplayAll()
form_data = {'network_id': network.id,
'name': network.name,
'admin_state': network.admin_state_up,
'tenant_id': network.tenant_id}
url = reverse('horizon:project:networks:update', args=[network.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_update',
'network_get',)})
def test_network_update_post_exception(self):
network = self.networks.first()
api.neutron.network_update(IsA(http.HttpRequest), network.id,
name=network.name,
admin_state_up=network.admin_state_up)\
.AndRaise(self.exceptions.neutron)
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
self.mox.ReplayAll()
form_data = {'network_id': network.id,
'name': network.name,
'admin_state': network.admin_state_up,
'tenant_id': network.tenant_id}
url = reverse('horizon:project:networks:update', args=[network.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_list',
'subnet_list',
'network_delete')})
def test_delete_network_no_subnet(self):
network = self.networks.first()
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
shared=False)\
.AndReturn([network])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True)\
.AndReturn([])
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network.id)\
.AndReturn([])
api.neutron.network_delete(IsA(http.HttpRequest), network.id)
self.mox.ReplayAll()
form_data = {'action': 'networks__delete__%s' % network.id}
res = self.client.post(INDEX_URL, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_list',
'subnet_list',
'network_delete',
'subnet_delete')})
def test_delete_network_with_subnet(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
shared=False)\
.AndReturn([network])
api.neutron.network_list(IsA(http.HttpRequest), shared=True)\
.AndReturn([])
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network.id)\
.AndReturn([subnet])
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet.id)
api.neutron.network_delete(IsA(http.HttpRequest), network.id)
self.mox.ReplayAll()
form_data = {'action': 'networks__delete__%s' % network.id}
res = self.client.post(INDEX_URL, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_list',
'subnet_list',
'network_delete',
'subnet_delete')})
def test_delete_network_exception(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
shared=False)\
.AndReturn([network])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True)\
.AndReturn([])
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network.id)\
.AndReturn([subnet])
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet.id)
api.neutron.network_delete(IsA(http.HttpRequest), network.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'action': 'networks__delete__%s' % network.id}
res = self.client.post(INDEX_URL, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
class NetworkSubnetTests(test.TestCase):
@test.create_stubs({api.neutron: ('subnet_get',)})
def test_subnet_detail(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(self.subnets.first())
self.mox.ReplayAll()
url = reverse('horizon:project:networks:subnets:detail',
args=[subnet.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/networks/subnets/detail.html')
self.assertEqual(res.context['subnet'].id, subnet.id)
@test.create_stubs({api.neutron: ('subnet_get',)})
def test_subnet_detail_exception(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:subnets:detail',
args=[subnet.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_get(self):
network = self.networks.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
url = reverse('horizon:project:networks:addsubnet',
args=[network.id])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_with_additional_attributes(self):
network = self.networks.list()[1]
subnet = self.subnets.list()[1]
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools,
dns_nameservers=subnet.dns_nameservers,
host_routes=subnet.host_routes)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_with_additional_attributes_no_gateway(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=None,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet, gateway_ip=None)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_network_exception(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_subnet_exception(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_cidr_inconsistent(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
# dummy IPv6 address
cidr = '2001:0DB8:0:CD30:123:4567:89AB:CDEF/60'
form_data = form_data_subnet(subnet, cidr=cidr,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
expected_msg = 'Network Address and IP version are inconsistent.'
self.assertFormErrors(res, 1, expected_msg)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_gw_inconsistent(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
# dummy IPv6 address
gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF'
form_data = form_data_subnet(subnet, gateway_ip=gateway_ip,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res, 'Gateway IP and IP version are inconsistent.')
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_pools_start_only(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# Start only allocation_pools
allocation_pools = '10.0.0.2'
form_data = form_data_subnet(subnet,
allocation_pools=allocation_pools)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Start and end addresses must be specified '
'(value=%s)' % allocation_pools)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_pools_three_entries(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# pool with three entries
allocation_pools = '10.0.0.2,10.0.0.3,10.0.0.4'
form_data = form_data_subnet(subnet,
allocation_pools=allocation_pools)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Start and end addresses must be specified '
'(value=%s)' % allocation_pools)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_pools_invalid_address(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# end address is not a valid IP address
allocation_pools = '10.0.0.2,invalid_address'
form_data = form_data_subnet(subnet,
allocation_pools=allocation_pools)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'allocation_pools: Invalid IP address '
'(value=%s)' % allocation_pools.split(',')[1])
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_pools_ip_network(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# start address is CIDR
allocation_pools = '10.0.0.2/24,10.0.0.5'
form_data = form_data_subnet(subnet,
allocation_pools=allocation_pools)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'allocation_pools: Invalid IP address '
'(value=%s)' % allocation_pools.split(',')[0])
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_pools_start_larger_than_end(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# start address is larger than end address
allocation_pools = '10.0.0.254,10.0.0.2'
form_data = form_data_subnet(subnet,
allocation_pools=allocation_pools)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Start address is larger than end address '
'(value=%s)' % allocation_pools)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_nameservers(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# invalid DNS server address
dns_nameservers = ['192.168.0.2', 'invalid_address']
form_data = form_data_subnet(subnet, dns_nameservers=dns_nameservers,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'dns_nameservers: Invalid IP address '
'(value=%s)' % dns_nameservers[1])
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_routes_destination_only(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# Start only host_route
host_routes = '192.168.0.0/24'
form_data = form_data_subnet(subnet,
allocation_pools=[],
host_routes=host_routes)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)' % host_routes)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_routes_three_entries(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# host_route with three entries
host_routes = 'aaaa,bbbb,cccc'
form_data = form_data_subnet(subnet,
allocation_pools=[],
host_routes=host_routes)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)' % host_routes)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_routes_invalid_destination(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# invalid destination network
host_routes = '172.16.0.0/64,10.0.0.253'
form_data = form_data_subnet(subnet,
host_routes=host_routes,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'host_routes: Invalid IP address '
'(value=%s)' % host_routes.split(',')[0])
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_routes_nexthop_ip_network(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# nexthop is not an IP address
host_routes = '172.16.0.0/24,10.0.0.253/24'
form_data = form_data_subnet(subnet,
host_routes=host_routes,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'host_routes: Invalid IP address '
'(value=%s)' % host_routes.split(',')[1])
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
enable_dhcp=subnet.enable_dhcp,
dns_nameservers=[],
host_routes=[])\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_with_gateway_ip(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
gateway_ip = '10.0.0.100'
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
gateway_ip=gateway_ip,
enable_dhcp=subnet.enable_dhcp,
dns_nameservers=[],
host_routes=[])\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
gateway_ip=gateway_ip,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_no_gateway(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
gateway_ip=None,
enable_dhcp=subnet.enable_dhcp,
dns_nameservers=[],
host_routes=[])\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
gateway_ip=None,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_with_additional_attributes(self):
subnet = self.subnets.list()[1]
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
enable_dhcp=False,
dns_nameservers=subnet.dns_nameservers,
host_routes=subnet.host_routes)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
enable_dhcp=False)
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_gw_inconsistent(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# dummy IPv6 address
gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF'
form_data = form_data_subnet(subnet, gateway_ip=gateway_ip,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res, 'Gateway IP and IP version are inconsistent.')
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_nameservers(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# invalid DNS server address
dns_nameservers = ['192.168.0.2', 'invalid_address']
form_data = form_data_subnet(subnet, dns_nameservers=dns_nameservers,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'dns_nameservers: Invalid IP address '
'(value=%s)' % dns_nameservers[1])
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_routes_destination_only(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# Start only host_route
host_routes = '192.168.0.0/24'
form_data = form_data_subnet(subnet,
allocation_pools=[],
host_routes=host_routes)
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)' % host_routes)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_routes_three_entries(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# host_route with three entries
host_routes = 'aaaa,bbbb,cccc'
form_data = form_data_subnet(subnet,
allocation_pools=[],
host_routes=host_routes)
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)' % host_routes)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_routes_invalid_destination(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# invalid destination network
host_routes = '172.16.0.0/64,10.0.0.253'
form_data = form_data_subnet(subnet,
host_routes=host_routes,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'host_routes: Invalid IP address '
'(value=%s)' % host_routes.split(',')[0])
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_routes_nexthop_ip_network(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# nexthop is not an IP address
host_routes = '172.16.0.0/24,10.0.0.253/24'
form_data = form_data_subnet(subnet,
host_routes=host_routes,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'host_routes: Invalid IP address '
'(value=%s)' % host_routes.split(',')[1])
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'network_get',
'port_list',)})
def test_subnet_delete(self):
subnet = self.subnets.first()
network_id = subnet.network_id
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet.id)
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
# Called from SubnetTable
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
form_data = {'action': 'subnets__delete__%s' % subnet.id}
url = reverse('horizon:project:networks:detail',
args=[network_id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'network_get',
'port_list',)})
def test_subnet_delete_excceeption(self):
subnet = self.subnets.first()
network_id = subnet.network_id
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet.id)\
.AndRaise(self.exceptions.neutron)
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
# Called from SubnetTable
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
form_data = {'action': 'subnets__delete__%s' % subnet.id}
url = reverse('horizon:project:networks:detail',
args=[network_id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
class NetworkPortTests(test.TestCase):
@test.create_stubs({api.neutron: ('port_get',)})
def test_port_detail(self):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(self.ports.first())
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:ports:detail',
args=[port.id]))
self.assertTemplateUsed(res, 'project/networks/ports/detail.html')
self.assertEqual(res.context['port'].id, port.id)
@test.create_stubs({api.neutron: ('port_get',)})
def test_port_detail_exception(self):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:ports:detail',
args=[port.id]))
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('port_get',)})
def test_port_update_get(self):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest),
port.id)\
.AndReturn(port)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:editport',
args=[port.network_id, port.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/networks/ports/update.html')
@test.create_stubs({api.neutron: ('port_get',
'port_update')})
def test_port_update_post(self):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(port)
api.neutron.port_update(IsA(http.HttpRequest), port.id,
name=port.name,
admin_state_up=port.admin_state_up)\
.AndReturn(port)
self.mox.ReplayAll()
form_data = {'network_id': port.network_id,
'port_id': port.id,
'name': port.name,
'admin_state': port.admin_state_up}
url = reverse('horizon:project:networks:editport',
args=[port.network_id, port.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[port.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('port_get',
'port_update')})
def test_port_update_post_exception(self):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(port)
api.neutron.port_update(IsA(http.HttpRequest), port.id,
name=port.name,
admin_state_up=port.admin_state_up)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'network_id': port.network_id,
'port_id': port.id,
'name': port.name,
'admin_state': port.admin_state_up}
url = reverse('horizon:project:networks:editport',
args=[port.network_id, port.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[port.network_id])
self.assertRedirectsNoFollow(res, redir_url)
| {
"content_hash": "2915a2e13a0e40a0acdbdb25d2650c59",
"timestamp": "",
"source": "github",
"line_count": 1468,
"max_line_length": 79,
"avg_line_length": 44.237057220708444,
"alnum_prop": 0.55247921157992,
"repo_name": "ikargis/horizon_fod",
"id": "9d2919ef1ef5c16ff35659b5ec5d88fab12d0331",
"size": "65594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/project/networks/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "167455"
},
{
"name": "JavaScript",
"bytes": "1099746"
},
{
"name": "Python",
"bytes": "3023860"
},
{
"name": "Shell",
"bytes": "13740"
}
],
"symlink_target": ""
} |
"""
This module can do slight modifications to tidy a wiki page's source code.
The changes are not supposed to change the look of the rendered wiki page.
If you wish to run this as an stand-alone script, use scripts/cosmetic_changes.py
For regular use, it is recommended to put this line into your user-config.py:
cosmetic_changes = True
You may enable cosmetic changes for additional languages by adding the
dictionary cosmetic_changes_enable to your user-config.py. It should contain
a tuple of languages for each site where you wish to enable in addition to
your own langlanguage if cosmetic_changes_mylang_only is True (see below).
Please set your dictionary by adding such lines to your user-config.py:
cosmetic_changes_enable['wikipedia'] = ('de', 'en', 'fr')
There is another config variable: You can set
cosmetic_changes_mylang_only = False
if you're running a bot on multiple sites and want to do cosmetic changes on
all of them, but be careful if you do.
You may disable cosmetic changes by adding the all unwanted languages to the
dictionary cosmetic_changes_disable in your user-config.py. It should contain
a tuple of languages for each site where you wish to disable cosmetic changes.
You may use it with cosmetic_changes_mylang_only is False, but you can also
disable your own language. This also overrides the settings in the dictionary
cosmetic_changes_enable. Please set this dictionary by adding such lines to
your user-config.py:
cosmetic_changes_disable['wikipedia'] = ('de', 'en', 'fr')
You may disable cosmetic changes for a given script by appending the all
unwanted scripts to the list cosmetic_changes_deny_script in your
user-config.py. By default it contains cosmetic_changes.py itself and touch.py.
This overrides all other enabling settings for cosmetic changes. Please modify
the given list by adding such lines to your user-config.py:
cosmetic_changes_deny_script.append('your_script_name_1')
or by adding a list to the given one:
cosmetic_changes_deny_script += ['your_script_name_1', 'your_script_name_2']
"""
#
# (C) xqt, 2009-2015
# (C) Pywikibot team, 2006-2016
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
#
import re
from warnings import warn
try:
import stdnum.isbn as stdnum_isbn
except ImportError:
stdnum_isbn = None
import pywikibot
from pywikibot import config, textlib
from pywikibot.textlib import _MultiTemplateMatchBuilder
from pywikibot.tools import deprecate_arg, first_lower, first_upper
from pywikibot.tools import MediaWikiVersion
# This is from interwiki.py;
# move it to family file and implement global instances
moved_links = {
'ca': (u'ús de la plantilla', u'/ús'),
'cs': (u'dokumentace', u'/doc'),
'de': (u'dokumentation', u'/Meta'),
'en': ([u'documentation',
u'template documentation',
u'template doc',
u'doc',
u'documentation, template'], u'/doc'),
'es': ([u'documentación', u'documentación de plantilla'], u'/doc'),
'fa': ([u'documentation', u'توضیحات', u'توضیحات الگو',
u'doc'], u'/توضیحات'),
'fr': (u'/documentation', u'/Documentation'),
'hu': (u'sablondokumentáció', u'/doc'),
'id': (u'template doc', u'/doc'),
'ja': (u'documentation', u'/doc'),
'ka': (u'თარგის ინფო', u'/ინფო'),
'ko': (u'documentation', u'/설명문서'),
'ms': (u'documentation', u'/doc'),
'pl': (u'dokumentacja', u'/opis'),
'pt': ([u'documentação', u'/doc'], u'/doc'),
'ro': (u'documentaţie', u'/doc'),
'ru': (u'doc', u'/doc'),
'sv': (u'dokumentation', u'/dok'),
'vi': (u'documentation', u'/doc'),
'zh': ([u'documentation', u'doc'], u'/doc'),
}
# Template which should be replaced or removed.
# Use a list with two entries. The first entry will be replaced by the second.
# Examples:
# For removing {{Foo}}, the list must be:
# (u'Foo', None),
#
# The following also works:
# (u'Foo', ''),
#
# For replacing {{Foo}} with {{Bar}} the list must be:
# (u'Foo', u'Bar'),
#
# This also removes all template parameters of {{Foo}}
# For replacing {{Foo}} with {{Bar}} but keep the template
# parameters in its original order, please use:
# (u'Foo', u'Bar\g<parameters>'),
deprecatedTemplates = {
'wikipedia': {
'de': [
(u'Belege', u'Belege fehlen\\g<parameters>'),
(u'Quelle', u'Belege fehlen\\g<parameters>'),
(u'Quellen', u'Belege fehlen\\g<parameters>'),
(u'Quellen fehlen', u'Belege fehlen\\g<parameters>'),
],
}
}
CANCEL_ALL = False
CANCEL_PAGE = 1
CANCEL_METHOD = 2
CANCEL_MATCH = 3
def _format_isbn_match(match, strict=True):
"""Helper function to validate and format a single matched ISBN."""
scripts_isbn = None
if not stdnum_isbn:
# For backwards compatibility, if stdnum.isbn is not available
# attempt loading scripts.isbn as an alternative implementation.
try:
import scripts.isbn as scripts_isbn
except ImportError:
raise NotImplementedError(
'ISBN functionality not available. Install stdnum package.')
warn('package stdnum.isbn not found; using scripts.isbn',
ImportWarning)
isbn = match.group('code')
if stdnum_isbn:
try:
stdnum_isbn.validate(isbn)
except stdnum_isbn.ValidationError as e:
if strict:
raise
pywikibot.log('ISBN "%s" validation error: %s' % (isbn, e))
return isbn
return stdnum_isbn.format(isbn)
else:
try:
scripts_isbn.is_valid(isbn)
except scripts_isbn.InvalidIsbnException as e:
if strict:
raise
pywikibot.log('ISBN "%s" validation error: %s' % (isbn, e))
return isbn
isbn = scripts_isbn.getIsbn(isbn)
try:
isbn.format()
except scripts_isbn.InvalidIsbnException as e:
if strict:
raise
pywikibot.log('ISBN "%s" validation error: %s' % (isbn, e))
return isbn.code
def _reformat_ISBNs(text, strict=True):
"""Helper function to normalise ISBNs in text.
@raises Exception: Invalid ISBN encountered when strict enabled
"""
return textlib.reformat_ISBNs(
text, lambda match: _format_isbn_match(match, strict=strict))
class CosmeticChangesToolkit(object):
"""Cosmetic changes toolkit."""
@deprecate_arg('debug', 'diff')
def __init__(self, site, diff=False, redirect=False, namespace=None,
pageTitle=None, ignore=CANCEL_ALL):
"""Constructor."""
self.site = site
self.diff = diff
self.redirect = redirect
self.namespace = namespace
self.template = (self.namespace == 10)
self.talkpage = self.namespace >= 0 and self.namespace % 2 == 1
self.title = pageTitle
self.ignore = ignore
self.common_methods = (
self.commonsfiledesc,
self.fixSelfInterwiki,
self.standardizePageFooter,
self.fixSyntaxSave,
self.cleanUpLinks,
self.cleanUpSectionHeaders,
self.putSpacesInLists,
self.translateAndCapitalizeNamespaces,
# FIXME: fix bugs and re-enable
# self.translateMagicWords,
self.replaceDeprecatedTemplates,
# FIXME: fix bugs and re-enable
# self.resolveHtmlEntities,
self.removeUselessSpaces,
self.removeNonBreakingSpaceBeforePercent,
self.fixHtml,
self.fixReferences,
self.fixStyle,
self.fixTypo,
self.fixArabicLetters,
self.fix_ISBN,
)
@classmethod
def from_page(cls, page, diff, ignore):
"""Create toolkit based on the page."""
return cls(page.site, diff=diff, namespace=page.namespace(),
pageTitle=page.title(), ignore=ignore)
def safe_execute(self, method, text):
"""Execute the method and catch exceptions if enabled."""
result = None
try:
result = method(text)
except Exception as e:
if self.ignore == CANCEL_METHOD:
pywikibot.warning(u'Unable to perform "{0}" on "{1}"!'.format(
method.__name__, self.title))
pywikibot.exception(e)
else:
raise
return text if result is None else result
def _change(self, text):
"""Execute all clean up methods."""
for method in self.common_methods:
text = self.safe_execute(method, text)
return text
def change(self, text):
"""Execute all clean up methods and catch errors if activated."""
try:
new_text = self._change(text)
except Exception as e:
if self.ignore == CANCEL_PAGE:
pywikibot.warning(u'Skipped "{0}", because an error occurred.'.format(self.title))
pywikibot.exception(e)
return False
else:
raise
else:
if self.diff:
pywikibot.showDiff(text, new_text)
return new_text
def fixSelfInterwiki(self, text):
"""
Interwiki links to the site itself are displayed like local links.
Remove their language code prefix.
"""
if not self.talkpage and pywikibot.calledModuleName() != 'interwiki':
interwikiR = re.compile(r'\[\[%s\s?:([^\[\]\n]*)\]\]'
% self.site.code)
text = interwikiR.sub(r'[[\1]]', text)
return text
def standardizePageFooter(self, text):
"""
Standardize page footer.
Makes sure that interwiki links, categories and star templates are
put to the correct position and into the right order. This combines the
old instances standardizeInterwiki and standardizeCategories
The page footer has the following section in that sequence:
1. categories
2. ## TODO: template beyond categories ##
3. additional information depending on local site policy
4. stars templates for featured and good articles
5. interwiki links
"""
# TODO: T123150
starsList = [
u'bueno',
u'bom interwiki',
u'cyswllt[ _]erthygl[ _]ddethol', u'dolen[ _]ed',
u'destacado', u'destaca[tu]',
u'enllaç[ _]ad',
u'enllaz[ _]ad',
u'leam[ _]vdc',
u'legătură[ _]a[bcf]',
u'liamm[ _]pub',
u'lien[ _]adq',
u'lien[ _]ba',
u'liên[ _]kết[ _]bài[ _]chất[ _]lượng[ _]tốt',
u'liên[ _]kết[ _]chọn[ _]lọc',
u'ligam[ _]adq',
u'ligazón[ _]a[bd]',
u'ligoelstara',
u'ligoleginda',
u'link[ _][afgu]a', u'link[ _]adq', u'link[ _]f[lm]', u'link[ _]km',
u'link[ _]sm', u'linkfa',
u'na[ _]lotura',
u'nasc[ _]ar',
u'tengill[ _][úg]g',
u'ua',
u'yüm yg',
u'רא',
u'وصلة مقالة جيدة',
u'وصلة مقالة مختارة',
]
categories = None
interwikiLinks = None
allstars = []
# Pywikibot is no longer allowed to touch categories on the
# German Wikipedia. See
# https://de.wikipedia.org/wiki/Hilfe_Diskussion:Personendaten/Archiv/1#Position_der_Personendaten_am_.22Artikelende.22
# ignoring nn-wiki of cause of the comment line above iw section
if not self.template and '{{Personendaten' not in text and \
'{{SORTIERUNG' not in text and '{{DEFAULTSORT' not in text and \
self.site.code not in ('et', 'it', 'bg', 'ru'):
categories = textlib.getCategoryLinks(text, site=self.site)
if not self.talkpage: # and pywikibot.calledModuleName() <> 'interwiki':
subpage = False
if self.template:
loc = None
try:
tmpl, loc = moved_links[self.site.code]
del tmpl
except KeyError:
pass
if loc is not None and loc in self.title:
subpage = True
interwikiLinks = textlib.getLanguageLinks(
text, insite=self.site, template_subpage=subpage)
# Removing the interwiki
text = textlib.removeLanguageLinks(text, site=self.site)
# Removing the stars' issue
starstext = textlib.removeDisabledParts(text)
for star in starsList:
regex = re.compile(r'(\{\{(?:template:|)%s\|.*?\}\}[\s]*)'
% star, re.I)
found = regex.findall(starstext)
if found != []:
text = regex.sub('', text)
allstars += found
# Adding categories
if categories:
# TODO: Sorting categories in alphabetic order.
# e.g. using categories.sort()
# TODO: Taking main cats to top
# for name in categories:
# if (re.search(u"(.+?)\|(.{,1}?)",name.title()) or
# name.title() == name.title().split(":")[0] + title):
# categories.remove(name)
# categories.insert(0, name)
text = textlib.replaceCategoryLinks(text, categories,
site=self.site)
# Adding stars templates
if allstars:
text = text.strip() + self.site.family.interwiki_text_separator
allstars.sort()
for element in allstars:
text += '%s%s' % (element.strip(), config.line_separator)
pywikibot.log(u'%s' % element.strip())
# Adding the interwiki
if interwikiLinks:
text = textlib.replaceLanguageLinks(text, interwikiLinks,
site=self.site,
template=self.template,
template_subpage=subpage)
return text
def translateAndCapitalizeNamespaces(self, text):
"""Use localized namespace names."""
# arz uses english stylish codes
if self.site.sitename == 'wikipedia:arz':
return text
family = self.site.family
# wiki links aren't parsed here.
exceptions = ['nowiki', 'comment', 'math', 'pre']
for namespace in self.site.namespaces.values():
if namespace.id in (0, 2, 3):
# skip main (article) namespace
# skip user namespace, maybe gender is used
continue
# a clone is needed. Won't change the namespace dict
namespaces = list(namespace)
thisNs = namespaces.pop(0)
if namespace.id == 6 and family.name == 'wikipedia':
if self.site.code in ('en', 'fr') and \
MediaWikiVersion(self.site.version()) >= MediaWikiVersion('1.14'):
# do not change "Image" on en-wiki and fr-wiki
assert u'Image' in namespaces
namespaces.remove(u'Image')
if self.site.code == 'hu':
# do not change "Kép" on hu-wiki
assert u'Kép' in namespaces
namespaces.remove(u'Kép')
elif self.site.code == 'pt':
# TODO: bug T57242
continue
# lowerspaced and underscored namespaces
for i in range(len(namespaces)):
item = namespaces[i].replace(' ', '[ _]')
item = u'[%s%s]' % (item[0], item[0].lower()) + item[1:]
namespaces[i] = item
namespaces.append(first_lower(thisNs))
if thisNs and namespaces:
text = textlib.replaceExcept(
text,
r'\[\[\s*(%s) *:(?P<nameAndLabel>.*?)\]\]'
% '|'.join(namespaces),
r'[[%s:\g<nameAndLabel>]]' % thisNs,
exceptions)
return text
def translateMagicWords(self, text):
"""Use localized magic words."""
# not wanted at ru
# arz uses english stylish codes
if self.site.code not in ['arz', 'ru']:
exceptions = ['nowiki', 'comment', 'math', 'pre']
for magicWord in ['img_thumbnail', 'img_left', 'img_center',
'img_right', 'img_none', 'img_framed',
'img_frameless', 'img_border', 'img_upright', ]:
aliases = self.site.getmagicwords(magicWord)
if not aliases:
continue
text = textlib.replaceExcept(
text,
r'\[\[(?P<left>.+?:.+?\..+?\|) *(' + '|'.join(aliases) +
r') *(?P<right>(\|.*?)?\]\])',
r'[[\g<left>' + aliases[0] + r'\g<right>', exceptions)
return text
def cleanUpLinks(self, text):
"""Tidy up wikilinks found in a string.
This function will:
* Replace underscores with spaces
* Move leading and trailing spaces out of the wikilink and into the
surrounding text
* Convert URL-encoded characters into Unicode-encoded characters
* Move trailing characters out of the link and make the link without
using a pipe, if possible
* Capitalize the article title of the link, if appropriate
@param text: string to perform the clean-up on
@type text: str
@return: text with tidied wikilinks
@rtype: str
"""
# helper function which works on one link and either returns it
# unmodified, or returns a replacement.
def handleOneLink(match):
titleWithSection = match.group('titleWithSection')
label = match.group('label')
trailingChars = match.group('linktrail')
newline = match.group('newline')
if not self.site.isInterwikiLink(titleWithSection):
# The link looks like this:
# [[page_title|link_text]]trailing_chars
# We only work on namespace 0 because pipes and linktrails work
# differently for images and categories.
page = pywikibot.Page(pywikibot.Link(titleWithSection,
self.site))
try:
namespace = page.namespace()
except pywikibot.InvalidTitle:
return match.group()
if namespace == 0:
# Replace underlines by spaces, also multiple underlines
titleWithSection = re.sub('_+', ' ', titleWithSection)
# Remove double spaces
titleWithSection = re.sub(' +', ' ', titleWithSection)
# Remove unnecessary leading spaces from title,
# but remember if we did this because we eventually want
# to re-add it outside of the link later.
titleLength = len(titleWithSection)
titleWithSection = titleWithSection.lstrip()
hadLeadingSpaces = (len(titleWithSection) != titleLength)
hadTrailingSpaces = False
# Remove unnecessary trailing spaces from title,
# but remember if we did this because it may affect
# the linktrail and because we eventually want to
# re-add it outside of the link later.
if not trailingChars:
titleLength = len(titleWithSection)
titleWithSection = titleWithSection.rstrip()
hadTrailingSpaces = (len(titleWithSection) !=
titleLength)
# Convert URL-encoded characters to unicode
from pywikibot.page import url2unicode
titleWithSection = url2unicode(titleWithSection,
encodings=self.site)
if titleWithSection == '':
# just skip empty links.
return match.group()
# Remove unnecessary initial and final spaces from label.
# Please note that some editors prefer spaces around pipes.
# (See [[en:Wikipedia:Semi-bots]]). We remove them anyway.
if label is not None:
# Remove unnecessary leading spaces from label,
# but remember if we did this because we want
# to re-add it outside of the link later.
labelLength = len(label)
label = label.lstrip()
hadLeadingSpaces = (len(label) != labelLength)
# Remove unnecessary trailing spaces from label,
# but remember if we did this because it affects
# the linktrail.
if not trailingChars:
labelLength = len(label)
label = label.rstrip()
hadTrailingSpaces = (len(label) != labelLength)
else:
label = titleWithSection
if trailingChars:
label += trailingChars
if titleWithSection == label or \
first_lower(titleWithSection) == label:
newLink = "[[%s]]" % label
# Check if we can create a link with trailing characters
# instead of a pipelink
elif (len(titleWithSection) <= len(label) and
label[:len(titleWithSection)] == titleWithSection and
re.sub(trailR, '',
label[len(titleWithSection):]) == ''):
newLink = "[[%s]]%s" % (label[:len(titleWithSection)],
label[len(titleWithSection):])
else:
# Try to capitalize the first letter of the title.
# Not useful for languages that don't capitalize nouns.
# TODO: Add a configuration variable for each site,
# which determines if the link target is written in
# uppercase
if self.site.sitename == 'wikipedia:de':
titleWithSection = first_upper(titleWithSection)
newLink = "[[%s|%s]]" % (titleWithSection, label)
# re-add spaces that were pulled out of the link.
# Examples:
# text[[ title ]]text -> text [[title]] text
# text[[ title | name ]]text -> text [[title|name]] text
# text[[ title |name]]text -> text[[title|name]]text
# text[[title| name]]text -> text [[title|name]]text
if hadLeadingSpaces and not newline:
newLink = ' ' + newLink
if hadTrailingSpaces:
newLink = newLink + ' '
if newline:
newLink = newline + newLink
return newLink
# don't change anything
return match.group()
trailR = re.compile(self.site.linktrail())
# The regular expression which finds links. Results consist of four groups:
# group <newline> depends whether the links starts with a new line.
# group <titleWithSection> is the page title and section, that is,
# everything before | or ]. It'll include the # to make life easier for us.
# group <label> is the alternative link title between | and ].
# group <linktrail> is the link trail after ]] which are part of the word.
# note that the definition of 'letter' varies from language to language.
linkR = re.compile(
r'(?P<newline>[\n]*)\[\[(?P<titleWithSection>[^\]\|]+)'
r'(\|(?P<label>[^\]\|]*))?\]\](?P<linktrail>' +
self.site.linktrail() + ')')
text = textlib.replaceExcept(text, linkR, handleOneLink,
['comment', 'math', 'nowiki', 'pre',
'startspace'])
return text
def resolveHtmlEntities(self, text):
""""Resolve html entities."""
ignore = [
38, # Ampersand (&)
39, # Single quotation mark (") - bug T26093
60, # Less than (<)
62, # Great than (>)
91, # Opening square bracket ([)
# - sometimes used intentionally inside links
93, # Closing square bracket (])
# - used intentionally inside links
124, # Vertical bar (|)
# - used intentionally in navigation bar templates on w:de
160, # Non-breaking space ( )
# - not supported by Firefox textareas
173, # Soft-hypen (­) - enable editing
8206, # Left-to-right mark (<r;)
8207, # Right-to-left mark (&rtl;)
]
if self.template:
ignore += [58]
text = pywikibot.html2unicode(text, ignore=ignore)
return text
def removeUselessSpaces(self, text):
"""Cleanup multiple or trailing spaces."""
exceptions = ['comment', 'math', 'nowiki', 'pre', 'startspace', 'table']
if self.site.sitename != 'wikipedia:cs':
exceptions.append('template')
text = textlib.replaceExcept(text, r'(?m) +( |$)', r'\1', exceptions,
site=self.site)
return text
def removeNonBreakingSpaceBeforePercent(self, text):
"""
Remove a non-breaking space between number and percent sign.
Newer MediaWiki versions automatically place a non-breaking space in
front of a percent sign, so it is no longer required to place it
manually.
FIXME: which version should this be run on?
"""
text = textlib.replaceExcept(text, r'(\d) %', r'\1 %',
['timeline'])
return text
def cleanUpSectionHeaders(self, text):
"""
Add a space between the equal signs and the section title.
Example: ==Section title== becomes == Section title ==
NOTE: This space is recommended in the syntax help on the English and
German Wikipedia. It might be that it is not wanted on other wikis.
If there are any complaints, please file a bug report.
"""
return textlib.replaceExcept(
text,
r'(?m)^(={1,7}) *(?P<title>[^=]+?) *\1 *\r?\n',
r'\1 \g<title> \1%s' % config.LS,
['comment', 'math', 'nowiki', 'pre'])
def putSpacesInLists(self, text):
"""
Add a space between the * or # and the text.
NOTE: This space is recommended in the syntax help on the English,
German, and French Wikipedia. It might be that it is not wanted on other
wikis. If there are any complaints, please file a bug report.
"""
if not self.template:
exceptions = ['comment', 'math', 'nowiki', 'pre', 'source', 'template',
'timeline', self.site.redirectRegex()]
text = textlib.replaceExcept(
text,
r'(?m)^(?P<bullet>[:;]*(\*+|#+)[:;\*#]*)(?P<char>[^\s\*#:;].+?)',
r'\g<bullet> \g<char>',
exceptions)
return text
def replaceDeprecatedTemplates(self, text):
"""Replace deprecated templates."""
exceptions = ['comment', 'math', 'nowiki', 'pre']
builder = _MultiTemplateMatchBuilder(self.site)
if self.site.family.name in deprecatedTemplates and \
self.site.code in deprecatedTemplates[self.site.family.name]:
for template in deprecatedTemplates[
self.site.family.name][self.site.code]:
old = template[0]
new = template[1]
if new is None:
new = ''
else:
new = '{{%s}}' % new
text = textlib.replaceExcept(
text,
builder.pattern(old),
new, exceptions)
return text
# from fixes.py
def fixSyntaxSave(self, text):
"""Convert weblinks to wikilink, fix link syntax."""
def replace_link(match):
"""Create a string to replace a single link."""
replacement = '[[' + match.group('link')
if match.group('title'):
replacement += '|' + match.group('title')
return replacement + ']]'
exceptions = ['nowiki', 'comment', 'math', 'pre', 'source',
'startspace']
# link to the wiki working on
# Only use suffixes for article paths
for suffix in self.site._interwiki_urls(True):
http_url = self.site.base_url(suffix, 'http')
if self.site.protocol() == 'http':
https_url = None
else:
https_url = self.site.base_url(suffix, 'https')
# compare strings without the protocol, if they are empty support
# also no prefix (//en.wikipedia.org/…)
if https_url is not None and http_url[4:] == https_url[5:]:
urls = ['(?:https?:)?' + re.escape(http_url[5:])]
else:
urls = [re.escape(url) for url in (http_url, https_url)
if url is not None]
for url in urls:
# Only include links which don't include the separator as
# the wikilink won't support additional parameters
separator = '?'
if '?' in suffix:
separator += '&'
# Match first a non space in the title to prevent that multiple
# spaces at the end without title will be matched by it
text = textlib.replaceExcept(
text,
r'\[\[?' + url + r'(?P<link>[^' + separator + r']+?)'
r'(\s+(?P<title>[^\s].*?))?\s*\]\]?',
replace_link, exceptions, site=self.site)
# external link in/starting with double brackets
text = textlib.replaceExcept(
text,
r'\[\[(?P<url>https?://[^\]]+?)\]\]?',
r'[\g<url>]', exceptions, site=self.site)
# external link and description separated by a pipe, with
# whitespace in front of the pipe, so that it is clear that
# the dash is not a legitimate part of the URL.
text = textlib.replaceExcept(
text,
r'\[(?P<url>https?://[^\|\] \r\n]+?) +\| *(?P<label>[^\|\]]+?)\]',
r'[\g<url> \g<label>]', exceptions)
# dash in external link, where the correct end of the URL can
# be detected from the file extension. It is very unlikely that
# this will cause mistakes.
extensions = [r'\.{0}'.format(ext)
for ext in ['pdf', 'html?', 'php', 'aspx?', 'jsp']]
text = textlib.replaceExcept(
text,
r'\[(?P<url>https?://[^\|\] ]+?(' + '|'.join(extensions) + r')) *'
r'\| *(?P<label>[^\|\]]+?)\]',
r'[\g<url> \g<label>]', exceptions)
return text
def fixHtml(self, text):
"""Relace html markups with wikitext markups."""
def replace_header(match):
"""Create a header string for replacing."""
depth = int(match.group(1))
return r'{0} {1} {0}'.format('=' * depth, match.group(2))
# Everything case-insensitive (?i)
# Keep in mind that MediaWiki automatically converts <br> to <br />
exceptions = ['nowiki', 'comment', 'math', 'pre', 'source',
'startspace']
text = textlib.replaceExcept(text, r'(?i)<(b|strong)>(.*?)</\1>',
r"'''\2'''", exceptions, site=self.site)
text = textlib.replaceExcept(text, r'(?i)<(i|em)>(.*?)</\1>',
r"''\2''", exceptions, site=self.site)
# horizontal line without attributes in a single line
text = textlib.replaceExcept(text, r'(?i)([\r\n])<hr[ /]*>([\r\n])',
r'\1----\2', exceptions)
# horizontal line with attributes; can't be done with wiki syntax
# so we only make it XHTML compliant
text = textlib.replaceExcept(text, r'(?i)<hr ([^>/]+?)>',
r'<hr \1 />',
exceptions)
# a header where only spaces are in the same line
text = textlib.replaceExcept(
text,
r'(?i)(?<=[\r\n]) *<h([1-7])> *([^<]+?) *</h\1> *(?=[\r\n])',
replace_header,
exceptions)
# TODO: maybe we can make the bot replace <p> tags with \r\n's.
return text
def fixReferences(self, text):
"""Fix references tags."""
# See also https://en.wikipedia.org/wiki/User:AnomieBOT/source/tasks/OrphanReferenceFixer.pm
exceptions = ['nowiki', 'comment', 'math', 'pre', 'source',
'startspace']
# it should be name = " or name=" NOT name ="
text = re.sub(r'(?i)<ref +name(= *| *=)"', r'<ref name="', text)
# remove empty <ref/>-tag
text = textlib.replaceExcept(text,
r'(?i)(<ref\s*/>|<ref *>\s*</ref>)',
r'', exceptions)
text = textlib.replaceExcept(text,
r'(?i)<ref\s+([^>]+?)\s*>\s*</ref>',
r'<ref \1/>', exceptions)
return text
def fixStyle(self, text):
"""Convert prettytable to wikitable class."""
exceptions = ['nowiki', 'comment', 'math', 'pre', 'source',
'startspace']
if self.site.code in ('de', 'en'):
text = textlib.replaceExcept(text,
r'(class="[^"]*)prettytable([^"]*")',
r'\1wikitable\2', exceptions)
return text
def fixTypo(self, text):
"""Fix units."""
exceptions = ['nowiki', 'comment', 'math', 'pre', 'source',
'startspace', 'gallery', 'hyperlink', 'interwiki', 'link']
# change <number> ccm -> <number> cm³
text = textlib.replaceExcept(text, r'(\d)\s*(?: )?ccm',
r'\1 cm³', exceptions,
site=self.site)
# Solve wrong Nº sign with °C or °F
# additional exception requested on fr-wiki for this stuff
pattern = re.compile(u'«.*?»', re.UNICODE)
exceptions.append(pattern)
text = textlib.replaceExcept(text, r'(\d)\s*(?: )?[º°]([CF])',
r'\1 °\2', exceptions, site=self.site)
text = textlib.replaceExcept(text, u'º([CF])', u'°' + r'\1',
exceptions,
site=self.site)
return text
def fixArabicLetters(self, text):
"""Fix arabic and persian letters."""
if self.site.code not in ['ckb', 'fa']:
return text
exceptions = [
'gallery',
'file',
'hyperlink',
'interwiki',
# FIXME: but changes letters inside wikilinks
# 'link',
'math',
'pre',
'template',
'timeline',
'ref',
'source',
'startspace',
'inputbox',
]
# FIXME: use textlib.NON_LATIN_DIGITS
# valid digits
digits = {
'ckb': u'٠١٢٣٤٥٦٧٨٩',
'fa': u'۰۱۲۳۴۵۶۷۸۹',
}
faChrs = u'ءاآأإئؤبپتثجچحخدذرزژسشصضطظعغفقکگلمنوهیةيك' + digits['fa']
new = digits.pop(self.site.code)
# This only works if there are only two items in digits dict
old = digits[list(digits.keys())[0]]
# not to let bot edits in latin content
exceptions.append(re.compile(u"[^%(fa)s] *?\"*? *?, *?[^%(fa)s]"
% {'fa': faChrs}))
text = textlib.replaceExcept(text, ',', '،', exceptions, site=self.site)
if self.site.code == 'ckb':
text = textlib.replaceExcept(text,
'\u0647([.\u060c_<\\]\\s])',
'\u06d5\\1', exceptions,
site=self.site)
text = textlib.replaceExcept(text, 'ه\u200c', 'ە', exceptions,
site=self.site)
text = textlib.replaceExcept(text, 'ه', 'ھ', exceptions,
site=self.site)
text = textlib.replaceExcept(text, 'ك', 'ک', exceptions,
site=self.site)
text = textlib.replaceExcept(text, '[ىي]', 'ی', exceptions,
site=self.site)
return text
# FIXME: split this function into two.
# replace persian/arabic digits
# deactivated due to bug 55185
for i in range(0, 10):
text = textlib.replaceExcept(text, old[i], new[i], exceptions)
# do not change digits in class, style and table params
pattern = re.compile(r'\w+=(".+?"|\d+)', re.UNICODE)
exceptions.append(pattern)
# do not change digits inside html-tags
pattern = re.compile(u'<[/]*?[^</]+?[/]*?>', re.UNICODE)
exceptions.append(pattern)
exceptions.append('table') # exclude tables for now
# replace digits
for i in range(0, 10):
text = textlib.replaceExcept(text, str(i), new[i], exceptions)
return text
def commonsfiledesc(self, text):
"""
Clean up file descriptions on the Wikimedia Commons.
It is working according to [1] and works only on pages in the file
namespace on the Wikimedia Commons.
[1]: https://commons.wikimedia.org/wiki/Commons:Tools/pywiki_file_description_cleanup
"""
if self.site.sitename != 'commons:commons' or self.namespace == 6:
return
# section headers to {{int:}} versions
exceptions = ['comment', 'includeonly', 'math', 'noinclude', 'nowiki',
'pre', 'source', 'ref', 'timeline']
text = textlib.replaceExcept(text,
r"([\r\n]|^)\=\= *Summary *\=\=",
r"\1== {{int:filedesc}} ==",
exceptions, True)
text = textlib.replaceExcept(
text,
r"([\r\n])\=\= *\[\[Commons:Copyright tags\|Licensing\]\]: *\=\=",
r"\1== {{int:license-header}} ==", exceptions, True)
text = textlib.replaceExcept(
text,
r"([\r\n])\=\= *(Licensing|License information|{{int:license}}) *\=\=",
r"\1== {{int:license-header}} ==", exceptions, True)
# frequent field values to {{int:}} versions
text = textlib.replaceExcept(
text,
r'([\r\n]\|[Ss]ource *\= *)'
r'(?:[Oo]wn work by uploader|[Oo]wn work|[Ee]igene [Aa]rbeit) *([\r\n])',
r'\1{{own}}\2', exceptions, True)
text = textlib.replaceExcept(
text,
r'(\| *Permission *\=) *(?:[Ss]ee below|[Ss]iehe unten) *([\r\n])',
r'\1\2', exceptions, True)
# added to transwikied pages
text = textlib.replaceExcept(text, r'__NOTOC__', '', exceptions, True)
# tracker element for js upload form
text = textlib.replaceExcept(
text,
r'<!-- *{{ImageUpload\|(?:full|basic)}} *-->',
'', exceptions[1:], True)
text = textlib.replaceExcept(text, r'{{ImageUpload\|(?:basic|full)}}',
'', exceptions, True)
# duplicated section headers
text = textlib.replaceExcept(
text,
r'([\r\n]|^)\=\= *{{int:filedesc}} *\=\=(?:[\r\n ]*)\=\= *{{int:filedesc}} *\=\=',
r'\1== {{int:filedesc}} ==', exceptions, True)
text = textlib.replaceExcept(
text,
r'([\r\n]|^)\=\= *{{int:license-header}} *\=\=(?:[\r\n ]*)'
r'\=\= *{{int:license-header}} *\=\=',
r'\1== {{int:license-header}} ==', exceptions, True)
return text
def fix_ISBN(self, text):
"""Hyphenate ISBN numbers."""
return _reformat_ISBNs(
text, strict=False if self.ignore == CANCEL_MATCH else True)
| {
"content_hash": "27472cbefc259dc6803ebd9816366e72",
"timestamp": "",
"source": "github",
"line_count": 994,
"max_line_length": 127,
"avg_line_length": 41.99496981891348,
"alnum_prop": 0.5199195074623291,
"repo_name": "darthbhyrava/pywikibot-local",
"id": "7ba4c8e4d74f91362c675a9dd9e7c98992629c82",
"size": "41998",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pywikibot/cosmetic_changes.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "Python",
"bytes": "4195172"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import division
from builtins import range
from past.utils import old_div
import argparse
import time
import matplotlib
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import matplotlib.dates as mdates
import numpy as np
elapsed = [time.time()]
def measure(text=""):
"""Measure execution time."""
global elapsed
elapsed.append(time.time())
if text:
text += " "
print("%s%f" % (text, elapsed[-1] - elapsed[-2]))
def visualize(date, bid, ask, voodoo, spread=30):
"""Plot bid price, asking price and "voodoo" values."""
# Downsample
dateDs, bidDs, askDs, voodooDs = (
date[::spread], bid[::spread], ask[::spread], voodoo[::spread]
)
fig = plt.figure(figsize=(10,7))
ax1 = plt.subplot2grid((40,40), (0,0), rowspan=40, colspan=40)
ax1.plot(date, bid)
ax1.plot(date, ask)
plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)
ax1_2 = ax1.twinx()
ax1_2.fill_between(date, 0, voodoo, facecolor='g', alpha=.3)
plt.subplots_adjust(bottom=.23)
plt.grid(True)
plt.show()
plt.close()
def load_prices(filename):
"""Load stock-prices."""
date, bid, ask = np.loadtxt(
filename,
unpack=True,
delimiter=',',
)
voodoo = np.empty(date.shape)
return date, bid, ask, voodoo
def quant(date, bid, ask, voodoo):
"""Apply some financial voodoo."""
future = 200
voodoo[:] = ask-bid
for i in range(0, future):
voodoo += old_div((ask-bid + ask-bid + ask-bid + ask-bid
+ask-bid + ask-bid + ask-bid + ask-bid
), 8)
voodoo[:] = old_div(voodoo, future)
def main(filename, with_viz=True):
date, bid, ask, voodoo = load_prices(filename) # Load dataset
measure()
quant(date, bid, ask, voodoo) # Compute something
measure("Computation took")
if with_viz:
visualize(date, bid, ask, voodoo) # Visualize the results
return voodoo
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Example illustrating a synthetic finance code.'
)
parser.add_argument(
'--filename', help="Path to filename", type=str, required=True
)
parser.add_argument(
'--no-viz', help="Disable visualization", action='store_true'
)
args = parser.parse_args()
R = main(args.filename, not args.no_viz)
print(R.sum())
import testcase
# contains the general testing method, which allows us to gather output
import os
def test_example():
filename = os.sep.join([
os.path.dirname(os.path.realpath(__file__)), "aux", "GBPUSD1m.txt"
])
out = testcase.runpy(
os.path.realpath(__file__) + ' --filename %s --no-viz' % filename
)
assert out.endswith('171.26344695\n')
| {
"content_hash": "2cd7406d4172ee6165cf6cc53f80fa5b",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 79,
"avg_line_length": 27.00925925925926,
"alnum_prop": 0.6109016112444292,
"repo_name": "russel/pychapel",
"id": "9fbd83faa17ce32d02862865d081d3540cb2e463",
"size": "2939",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/source/examples/test_finance_python_numpy.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "876283"
},
{
"name": "Chapel",
"bytes": "13882"
},
{
"name": "Makefile",
"bytes": "3619"
},
{
"name": "Python",
"bytes": "62598"
},
{
"name": "Shell",
"bytes": "3351"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import pytest
@pytest.fixture(autouse=True)
def no_warnings(recwarn):
yield
warnings = tuple(
f'{warning.filename}:{warning.lineno} {warning.message}'
for warning in recwarn
# cheetah raises this warning when compiling a trivial file
if not (
isinstance(warning.message, UserWarning) and
str(warning.message) == (
'You supplied an empty string for the source!'
)
)
)
assert not warnings
| {
"content_hash": "37cf59cc08052eaaa6488a0e78c9c9f0",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 67,
"avg_line_length": 26.65,
"alnum_prop": 0.6097560975609756,
"repo_name": "asottile/cheetah_lint",
"id": "6e37d556a520655ec3b254fe2cc7f9320a04b1b9",
"size": "533",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38991"
}
],
"symlink_target": ""
} |
import cgi
import cgitb
cgitb.enable() # Show errors to browser.
import sys
import os
import time
import md5
sys.stderr = sys.stdout
cgi.maxlen = 1024*1024
debug = True
print 'Content-type: text/plain\n\n'
if debug:
print 'Debug\n\n'
print 'Python was here 5'
print 'os.environ:', repr(os.environ).replace(',', ',\n') | {
"content_hash": "f1495f2caec998a9678a78ee180b06b1",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 57,
"avg_line_length": 15.857142857142858,
"alnum_prop": 0.6906906906906907,
"repo_name": "svn2github/SVGKit",
"id": "421cb7182b602274cff4be87d1a14ac5cfa95d42",
"size": "352",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cgi-bin/pytest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "177"
},
{
"name": "CSS",
"bytes": "5186"
},
{
"name": "HTML",
"bytes": "219266"
},
{
"name": "JavaScript",
"bytes": "1228476"
},
{
"name": "Mathematica",
"bytes": "95664"
},
{
"name": "Perl",
"bytes": "679"
},
{
"name": "Python",
"bytes": "11694"
}
],
"symlink_target": ""
} |
"""Chromium auto-bisect tool
This script bisects a range of commits using binary search. It starts by getting
reference values for the specified "good" and "bad" commits. Then, for revisions
in between, it will get builds, run tests and classify intermediate revisions as
"good" or "bad" until an adjacent "good" and "bad" revision is found; this is
the culprit.
If the culprit is a roll of a depedency repository (e.g. v8), it will then
expand the revision range and continue the bisect until a culprit revision in
the dependency repository is found.
Example usage using git commit hashes, bisecting a performance test based on
the mean value of a particular metric:
./tools/auto_bisect/bisect_perf_regression.py
--command "out/Release/performance_ui_tests \
--gtest_filter=ShutdownTest.SimpleUserQuit"\
--metric shutdown/simple-user-quit
--good_revision 1f6e67861535121c5c819c16a666f2436c207e7b\
--bad-revision b732f23b4f81c382db0b23b9035f3dadc7d925bb\
Example usage using git commit positions, bisecting a functional test based on
whether it passes or fails.
./tools/auto_bisect/bisect_perf_regression.py\
--command "out/Release/content_unittests -single-process-tests \
--gtest_filter=GpuMemoryBufferImplTests"\
--good_revision 408222\
--bad_revision 408232\
--bisect_mode return_code\
--builder_type full
In practice, the auto-bisect tool is usually run on tryserver.chromium.perf
try bots, and is started by tools/run-bisect-perf-regression.py using
config parameters from tools/auto_bisect/bisect.cfg.
"""
import copy
import errno
import hashlib
import logging
import argparse
import os
import re
import shlex
import shutil
import StringIO
import sys
import time
sys.path.append(os.path.join(
os.path.dirname(__file__), os.path.pardir, 'telemetry'))
from bisect_printer import BisectPrinter
from bisect_results import BisectResults
from bisect_state import BisectState
import bisect_utils
import builder
import fetch_build
import math_utils
import query_crbug
import request_build
import source_control
# The script is in chromium/src/tools/auto_bisect. Throughout this script,
# we use paths to other things in the chromium/src repository.
# Possible return values from BisectPerformanceMetrics.RunTest.
BUILD_RESULT_SUCCEED = 0
BUILD_RESULT_FAIL = 1
BUILD_RESULT_SKIPPED = 2
# How many times to repeat the test on the last known good and first known bad
# revisions in order to assess a more accurate confidence score in the
# regression culprit.
BORDER_REVISIONS_EXTRA_RUNS = 2
# Patch template to add a new file, DEPS.sha under src folder.
# This file contains SHA1 value of the DEPS changes made while bisecting
# dependency repositories. This patch send along with DEPS patch to try server.
# When a build requested is posted with a patch, bisect builders on try server,
# once build is produced, it reads SHA value from this file and appends it
# to build archive filename.
DEPS_SHA_PATCH = """diff --git DEPS.sha DEPS.sha
new file mode 100644
--- /dev/null
+++ DEPS.sha
@@ -0,0 +1 @@
+%(deps_sha)s
"""
REGRESSION_NOT_REPRODUCED_MESSAGE_TEMPLATE = """
Bisect did not clearly reproduce a regression between the given "good"
and "bad" revisions.
Results:
"Good" revision: {good_rev}
\tMean: {good_mean}
\tStandard error: {good_std_err}
\tSample size: {good_sample_size}
"Bad" revision: {bad_rev}
\tMean: {bad_mean}
\tStandard error: {bad_std_err}
\tSample size: {bad_sample_size}
You may want to try bisecting on a different platform or metric.
"""
# Git branch name used to run bisect try jobs.
BISECT_TRYJOB_BRANCH = 'bisect-tryjob'
# Git master branch name.
BISECT_MASTER_BRANCH = 'master'
# File to store 'git diff' content.
BISECT_PATCH_FILE = 'deps_patch.txt'
# SVN repo where the bisect try jobs are submitted.
PERF_SVN_REPO_URL = 'svn://svn.chromium.org/chrome-try/try-perf'
FULL_SVN_REPO_URL = 'svn://svn.chromium.org/chrome-try/try'
ANDROID_CHROME_SVN_REPO_URL = ('svn://svn.chromium.org/chrome-try-internal/'
'try-perf')
class RunGitError(Exception):
def __str__(self):
return '%s\nError executing git command.' % self.args[0]
def GetSHA1HexDigest(contents):
"""Returns SHA1 hex digest of the given string."""
return hashlib.sha1(contents).hexdigest()
def WriteStringToFile(text, file_name):
"""Writes text to a file, raising an RuntimeError on failure."""
try:
with open(file_name, 'wb') as f:
f.write(text)
except IOError:
raise RuntimeError('Error writing to file [%s]' % file_name)
def ReadStringFromFile(file_name):
"""Writes text to a file, raising an RuntimeError on failure."""
try:
with open(file_name) as f:
return f.read()
except IOError:
raise RuntimeError('Error reading file [%s]' % file_name)
def ChangeBackslashToSlashInPatch(diff_text):
"""Formats file paths in the given patch text to Unix-style paths."""
if not diff_text:
return None
diff_lines = diff_text.split('\n')
for i in range(len(diff_lines)):
line = diff_lines[i]
if line.startswith('--- ') or line.startswith('+++ '):
diff_lines[i] = line.replace('\\', '/')
return '\n'.join(diff_lines)
def _ParseRevisionsFromDEPSFileManually(deps_file_contents):
"""Parses the vars section of the DEPS file using regular expressions.
Args:
deps_file_contents: The DEPS file contents as a string.
Returns:
A dictionary in the format {depot: revision} if successful, otherwise None.
"""
# We'll parse the "vars" section of the DEPS file.
rxp = re.compile('vars = {(?P<vars_body>[^}]+)', re.MULTILINE)
re_results = rxp.search(deps_file_contents)
if not re_results:
return None
# We should be left with a series of entries in the vars component of
# the DEPS file with the following format:
# 'depot_name': 'revision',
vars_body = re_results.group('vars_body')
rxp = re.compile(r"'(?P<depot_body>[\w_-]+)':[\s]+'(?P<rev_body>[\w@]+)'",
re.MULTILINE)
re_results = rxp.findall(vars_body)
return dict(re_results)
def _WaitUntilBuildIsReady(fetch_build_func, builder_name, build_request_id,
max_timeout, buildbot_server_url):
"""Waits until build is produced by bisect builder on try server.
Args:
fetch_build_func: Function to check and download build from cloud storage.
builder_name: Builder bot name on try server.
build_request_id: A unique ID of the build request posted to try server.
max_timeout: Maximum time to wait for the build.
buildbot_server_url: Buildbot url to check build status.
Returns:
Downloaded archive file path if exists, otherwise None.
"""
# Build number on the try server.
build_num = None
# Interval to check build on cloud storage.
poll_interval = 60
# Interval to check build status on try server in seconds.
status_check_interval = 600
last_status_check = time.time()
start_time = time.time()
while True:
# Checks for build on gs://chrome-perf and download if exists.
res = fetch_build_func()
if res:
return (res, 'Build successfully found')
elapsed_status_check = time.time() - last_status_check
# To avoid overloading try server with status check requests, we check
# build status for every 10 minutes.
if elapsed_status_check > status_check_interval:
last_status_check = time.time()
if not build_num:
# Get the build number on try server for the current build.
build_num = request_build.GetBuildNumFromBuilder(
build_request_id, builder_name, buildbot_server_url)
# Check the status of build using the build number.
# Note: Build is treated as PENDING if build number is not found
# on the the try server.
build_status, status_link = request_build.GetBuildStatus(
build_num, builder_name, buildbot_server_url)
if build_status == request_build.FAILED:
return (None, 'Failed to produce build, log: %s' % status_link)
elapsed_time = time.time() - start_time
if elapsed_time > max_timeout:
return (None, 'Timed out: %ss without build' % max_timeout)
logging.info('Time elapsed: %ss without build.', elapsed_time)
time.sleep(poll_interval)
# For some reason, mac bisect bots were not flushing stdout periodically.
# As a result buildbot command is timed-out. Flush stdout on all platforms
# while waiting for build.
sys.stdout.flush()
def _UpdateV8Branch(deps_content):
"""Updates V8 branch in DEPS file to process v8_bleeding_edge.
Check for "v8_branch" in DEPS file if exists update its value
with v8_bleeding_edge branch. Note: "v8_branch" is added to DEPS
variable from DEPS revision 254916, therefore check for "src/v8":
<v8 source path> in DEPS in order to support prior DEPS revisions
and update it.
Args:
deps_content: DEPS file contents to be modified.
Returns:
Modified DEPS file contents as a string.
"""
new_branch = r'branches/bleeding_edge'
v8_branch_pattern = re.compile(r'(?<="v8_branch": ")(.*)(?=")')
if re.search(v8_branch_pattern, deps_content):
deps_content = re.sub(v8_branch_pattern, new_branch, deps_content)
else:
# Replaces the branch assigned to "src/v8" key in DEPS file.
# Format of "src/v8" in DEPS:
# "src/v8":
# (Var("googlecode_url") % "v8") + "/trunk@" + Var("v8_revision"),
# So, "/trunk@" is replace with "/branches/bleeding_edge@"
v8_src_pattern = re.compile(
r'(?<="v8"\) \+ "/)(.*)(?=@" \+ Var\("v8_revision"\))', re.MULTILINE)
if re.search(v8_src_pattern, deps_content):
deps_content = re.sub(v8_src_pattern, new_branch, deps_content)
return deps_content
def _UpdateDEPSForAngle(revision, depot, deps_file):
"""Updates DEPS file with new revision for Angle repository.
This is a hack for Angle depot case because, in DEPS file "vars" dictionary
variable contains "angle_revision" key that holds git hash instead of
SVN revision.
And sometimes "angle_revision" key is not specified in "vars" variable,
in such cases check "deps" dictionary variable that matches
angle.git@[a-fA-F0-9]{40}$ and replace git hash.
"""
deps_var = bisect_utils.DEPOT_DEPS_NAME[depot]['deps_var']
try:
deps_contents = ReadStringFromFile(deps_file)
# Check whether the depot and revision pattern in DEPS file vars variable
# e.g. "angle_revision": "fa63e947cb3eccf463648d21a05d5002c9b8adfa".
angle_rev_pattern = re.compile(r'(?<="%s": ")([a-fA-F0-9]{40})(?=")' %
deps_var, re.MULTILINE)
match = re.search(angle_rev_pattern, deps_contents)
if match:
# Update the revision information for the given depot
new_data = re.sub(angle_rev_pattern, revision, deps_contents)
else:
# Check whether the depot and revision pattern in DEPS file deps
# variable. e.g.,
# "src/third_party/angle": Var("chromium_git") +
# "/angle/angle.git@fa63e947cb3eccf463648d21a05d5002c9b8adfa",.
angle_rev_pattern = re.compile(
r'(?<=angle\.git@)([a-fA-F0-9]{40})(?=")', re.MULTILINE)
match = re.search(angle_rev_pattern, deps_contents)
if not match:
logging.info('Could not find angle revision information in DEPS file.')
return False
new_data = re.sub(angle_rev_pattern, revision, deps_contents)
# Write changes to DEPS file
WriteStringToFile(new_data, deps_file)
return True
except IOError, e:
logging.warn('Something went wrong while updating DEPS file, %s', e)
return False
def _TryParseHistogramValuesFromOutput(metric, text):
"""Attempts to parse a metric in the format HISTOGRAM <graph: <trace>.
Args:
metric: The metric as a list of [<trace>, <value>] strings.
text: The text to parse the metric values from.
Returns:
A list of floating point numbers found, [] if none were found.
"""
metric_formatted = 'HISTOGRAM %s: %s= ' % (metric[0], metric[1])
text_lines = text.split('\n')
values_list = []
for current_line in text_lines:
if metric_formatted in current_line:
current_line = current_line[len(metric_formatted):]
try:
histogram_values = eval(current_line)
for b in histogram_values['buckets']:
average_for_bucket = float(b['high'] + b['low']) * 0.5
# Extends the list with N-elements with the average for that bucket.
values_list.extend([average_for_bucket] * b['count'])
except Exception:
pass
return values_list
def _TryParseResultValuesFromOutput(metric, text):
"""Attempts to parse a metric in the format RESULT <graph>: <trace>= ...
Args:
metric: The metric as a list of [<trace>, <value>] string pairs.
text: The text to parse the metric values from.
Returns:
A list of floating point numbers found.
"""
# Format is: RESULT <graph>: <trace>= <value> <units>
metric_re = re.escape('RESULT %s: %s=' % (metric[0], metric[1]))
# The log will be parsed looking for format:
# <*>RESULT <graph_name>: <trace_name>= <value>
single_result_re = re.compile(
metric_re + r'\s*(?P<VALUE>[-]?\d*(\.\d*)?)')
# The log will be parsed looking for format:
# <*>RESULT <graph_name>: <trace_name>= [<value>,value,value,...]
multi_results_re = re.compile(
metric_re + r'\s*\[\s*(?P<VALUES>[-]?[\d\., ]+)\s*\]')
# The log will be parsed looking for format:
# <*>RESULT <graph_name>: <trace_name>= {<mean>, <std deviation>}
mean_stddev_re = re.compile(
metric_re +
r'\s*\{\s*(?P<MEAN>[-]?\d*(\.\d*)?),\s*(?P<STDDEV>\d+(\.\d*)?)\s*\}')
text_lines = text.split('\n')
values_list = []
for current_line in text_lines:
# Parse the output from the performance test for the metric we're
# interested in.
single_result_match = single_result_re.search(current_line)
multi_results_match = multi_results_re.search(current_line)
mean_stddev_match = mean_stddev_re.search(current_line)
if (not single_result_match is None and
single_result_match.group('VALUE')):
values_list += [single_result_match.group('VALUE')]
elif (not multi_results_match is None and
multi_results_match.group('VALUES')):
metric_values = multi_results_match.group('VALUES')
values_list += metric_values.split(',')
elif (not mean_stddev_match is None and
mean_stddev_match.group('MEAN')):
values_list += [mean_stddev_match.group('MEAN')]
values_list = [float(v) for v in values_list
if bisect_utils.IsStringFloat(v)]
return values_list
def _ParseMetricValuesFromOutput(metric, text):
"""Parses output from performance_ui_tests and retrieves the results for
a given metric.
Args:
metric: The metric as a list of [<trace>, <value>] strings.
text: The text to parse the metric values from.
Returns:
A list of floating point numbers found.
"""
metric_values = _TryParseResultValuesFromOutput(metric, text)
if not metric_values:
metric_values = _TryParseHistogramValuesFromOutput(metric, text)
return metric_values
def _GenerateProfileIfNecessary(command_args):
"""Checks the command line of the performance test for dependencies on
profile generation, and runs tools/perf/generate_profile as necessary.
Args:
command_args: Command line being passed to performance test, as a list.
Returns:
False if profile generation was necessary and failed, otherwise True.
"""
if '--profile-dir' in ' '.join(command_args):
# If we were using python 2.7+, we could just use the argparse
# module's parse_known_args to grab --profile-dir. Since some of the
# bots still run 2.6, have to grab the arguments manually.
arg_dict = {}
args_to_parse = ['--profile-dir', '--browser']
for arg_to_parse in args_to_parse:
for i, current_arg in enumerate(command_args):
if arg_to_parse in current_arg:
current_arg_split = current_arg.split('=')
# Check 2 cases, --arg=<val> and --arg <val>
if len(current_arg_split) == 2:
arg_dict[arg_to_parse] = current_arg_split[1]
elif i + 1 < len(command_args):
arg_dict[arg_to_parse] = command_args[i+1]
path_to_generate = os.path.join('tools', 'perf', 'generate_profile')
if '--profile-dir' in arg_dict and '--browser' in arg_dict:
profile_path, profile_type = os.path.split(arg_dict['--profile-dir'])
return not bisect_utils.RunProcess(
[
'python', path_to_generate,
'--profile-type-to-generate', profile_type,
'--browser', arg_dict['--browser'],
'--output-dir', profile_path
])
return False
return True
def _IsRegressionReproduced(known_good_result, known_bad_result,
required_initial_confidence):
"""Checks whether the regression was reproduced based on the initial values.
Args:
known_good_result: A dict with the keys "values", "mean" and "std_err".
known_bad_result: Same as above.
required_initial_confidence: Minimum confidence score for the given
good and bad revisions to avoid early aborting.
Returns:
True if there is a clear change between the result values for the given
good and bad revisions, False otherwise.
"""
def PossiblyFlatten(values):
"""Flattens if needed, by averaging the values in each nested list."""
if isinstance(values, list) and all(isinstance(x, list) for x in values):
return map(math_utils.Mean, values)
return values
initial_confidence = BisectResults.ConfidenceScore(
PossiblyFlatten(known_bad_result['values']),
PossiblyFlatten(known_good_result['values']),
accept_single_bad_or_good=True)
return initial_confidence >= required_initial_confidence
def _RegressionNotReproducedWarningMessage(
good_revision, bad_revision, known_good_value, known_bad_value):
return REGRESSION_NOT_REPRODUCED_MESSAGE_TEMPLATE.format(
good_rev=good_revision,
good_mean=known_good_value['mean'],
good_std_err=known_good_value['std_err'],
good_sample_size=len(known_good_value['values']),
bad_rev=bad_revision,
bad_mean=known_bad_value['mean'],
bad_std_err=known_bad_value['std_err'],
bad_sample_size=len(known_bad_value['values']))
class DepotDirectoryRegistry(object):
def __init__(self, src_cwd):
self.depot_cwd = {}
for depot in bisect_utils.DEPOT_NAMES:
# The working directory of each depot is just the path to the depot, but
# since we're already in 'src', we can skip that part.
path_in_src = bisect_utils.DEPOT_DEPS_NAME[depot]['src'][4:]
self.SetDepotDir(depot, os.path.join(src_cwd, path_in_src))
self.SetDepotDir('chromium', src_cwd)
def SetDepotDir(self, depot_name, depot_dir):
self.depot_cwd[depot_name] = depot_dir
def GetDepotDir(self, depot_name):
if depot_name in self.depot_cwd:
return self.depot_cwd[depot_name]
else:
assert False, ('Unknown depot [ %s ] encountered. Possibly a new one '
'was added without proper support?' % depot_name)
def ChangeToDepotDir(self, depot_name):
"""Given a depot, changes to the appropriate working directory.
Args:
depot_name: The name of the depot (see DEPOT_NAMES).
"""
os.chdir(self.GetDepotDir(depot_name))
def _PrepareBisectBranch(parent_branch, new_branch):
"""Creates a new branch to submit bisect try job.
Args:
parent_branch: Parent branch to be used to create new branch.
new_branch: New branch name.
"""
current_branch, returncode = bisect_utils.RunGit(
['rev-parse', '--abbrev-ref', 'HEAD'])
if returncode:
raise RunGitError('Must be in a git repository to send changes to trybots.')
current_branch = current_branch.strip()
# Make sure current branch is master.
if current_branch != parent_branch:
output, returncode = bisect_utils.RunGit(['checkout', '-f', parent_branch])
if returncode:
raise RunGitError('Failed to checkout branch: %s.' % output)
# Delete new branch if exists.
output, returncode = bisect_utils.RunGit(['branch', '--list'])
if new_branch in output:
output, returncode = bisect_utils.RunGit(['branch', '-D', new_branch])
if returncode:
raise RunGitError('Deleting branch failed, %s', output)
# Check if the tree is dirty: make sure the index is up to date and then
# run diff-index.
bisect_utils.RunGit(['update-index', '--refresh', '-q'])
output, returncode = bisect_utils.RunGit(['diff-index', 'HEAD'])
if output:
raise RunGitError('Cannot send a try job with a dirty tree.')
# Create and check out the telemetry-tryjob branch, and edit the configs
# for the try job there.
output, returncode = bisect_utils.RunGit(['checkout', '-b', new_branch])
if returncode:
raise RunGitError('Failed to checkout branch: %s.' % output)
output, returncode = bisect_utils.RunGit(
['branch', '--set-upstream-to', parent_branch])
if returncode:
raise RunGitError('Error in git branch --set-upstream-to')
def _StartBuilderTryJob(
builder_type, git_revision, builder_name, job_name, patch=None):
"""Attempts to run a try job from the current directory.
Args:
builder_type: One of the builder types in fetch_build, e.g. "perf".
git_revision: A git commit hash.
builder_name: Name of the bisect bot to be used for try job.
bisect_job_name: Try job name, used to identify which bisect
job was responsible for requesting a build.
patch: A DEPS patch (used while bisecting dependency repositories),
or None if we're bisecting the top-level repository.
"""
# TODO(prasadv, qyearsley): Make this a method of BuildArchive
# (which may be renamed to BuilderTryBot or Builder).
try:
# Temporary branch for running a try job.
_PrepareBisectBranch(BISECT_MASTER_BRANCH, BISECT_TRYJOB_BRANCH)
patch_content = '/dev/null'
# Create a temporary patch file.
if patch:
WriteStringToFile(patch, BISECT_PATCH_FILE)
patch_content = BISECT_PATCH_FILE
try_command = [
'try',
'--bot=%s' % builder_name,
'--revision=%s' % git_revision,
'--name=%s' % job_name,
'--svn_repo=%s' % _TryJobSvnRepo(builder_type),
'--diff=%s' % patch_content,
]
# Execute try job to build revision.
print try_command
output, return_code = bisect_utils.RunGit(try_command)
command_string = ' '.join(['git'] + try_command)
if return_code:
raise RunGitError('Could not execute try job: %s.\n'
'Error: %s' % (command_string, output))
logging.info('Try job successfully submitted.\n TryJob Details: %s\n%s',
command_string, output)
finally:
# Delete patch file if exists.
try:
os.remove(BISECT_PATCH_FILE)
except OSError as e:
if e.errno != errno.ENOENT:
raise
# Checkout master branch and delete bisect-tryjob branch.
bisect_utils.RunGit(['checkout', '-f', BISECT_MASTER_BRANCH])
bisect_utils.RunGit(['branch', '-D', BISECT_TRYJOB_BRANCH])
def _TryJobSvnRepo(builder_type):
"""Returns an SVN repo to use for try jobs based on the builder type."""
if builder_type == fetch_build.PERF_BUILDER:
return PERF_SVN_REPO_URL
if builder_type == fetch_build.FULL_BUILDER:
return FULL_SVN_REPO_URL
if builder_type == fetch_build.ANDROID_CHROME_PERF_BUILDER:
return ANDROID_CHROME_SVN_REPO_URL
raise NotImplementedError('Unknown builder type "%s".' % builder_type)
class BisectPerformanceMetrics(object):
"""This class contains functionality to perform a bisection of a range of
revisions to narrow down where performance regressions may have occurred.
The main entry-point is the Run method.
"""
def __init__(self, opts, src_cwd):
"""Constructs a BisectPerformancesMetrics object.
Args:
opts: BisectOptions object containing parsed options.
src_cwd: Root src/ directory of the test repository (inside bisect/ dir).
"""
super(BisectPerformanceMetrics, self).__init__()
self.opts = opts
self.src_cwd = src_cwd
self.depot_registry = DepotDirectoryRegistry(self.src_cwd)
self.printer = BisectPrinter(self.opts, self.depot_registry)
self.cleanup_commands = []
self.warnings = []
self.builder = builder.Builder.FromOpts(opts)
def PerformCleanup(self):
"""Performs cleanup when script is finished."""
os.chdir(self.src_cwd)
for c in self.cleanup_commands:
if c[0] == 'mv':
shutil.move(c[1], c[2])
else:
assert False, 'Invalid cleanup command.'
def GetRevisionList(self, depot, bad_revision, good_revision):
"""Retrieves a list of all the commits between the bad revision and
last known good revision."""
cwd = self.depot_registry.GetDepotDir(depot)
return source_control.GetRevisionList(bad_revision, good_revision, cwd=cwd)
def _ParseRevisionsFromDEPSFile(self, depot):
"""Parses the local DEPS file to determine blink/skia/v8 revisions which may
be needed if the bisect recurses into those depots later.
Args:
depot: Name of depot being bisected.
Returns:
A dict in the format {depot:revision} if successful, otherwise None.
"""
try:
deps_data = {
'Var': lambda _: deps_data["vars"][_],
'From': lambda *args: None,
}
deps_file = bisect_utils.FILE_DEPS_GIT
if not os.path.exists(deps_file):
deps_file = bisect_utils.FILE_DEPS
execfile(deps_file, {}, deps_data)
deps_data = deps_data['deps']
rxp = re.compile(".git@(?P<revision>[a-fA-F0-9]+)")
results = {}
for depot_name, depot_data in bisect_utils.DEPOT_DEPS_NAME.iteritems():
if (depot_data.get('platform') and
depot_data.get('platform') != os.name):
continue
if depot_data.get('recurse') and depot in depot_data.get('from'):
depot_data_src = depot_data.get('src') or depot_data.get('src_old')
src_dir = deps_data.get(depot_data_src)
if src_dir:
self.depot_registry.SetDepotDir(depot_name, os.path.join(
self.src_cwd, depot_data_src[4:]))
re_results = rxp.search(src_dir)
if re_results:
results[depot_name] = re_results.group('revision')
else:
warning_text = ('Could not parse revision for %s while bisecting '
'%s' % (depot_name, depot))
if not warning_text in self.warnings:
self.warnings.append(warning_text)
else:
results[depot_name] = None
return results
except ImportError:
deps_file_contents = ReadStringFromFile(deps_file)
parse_results = _ParseRevisionsFromDEPSFileManually(deps_file_contents)
results = {}
for depot_name, depot_revision in parse_results.iteritems():
depot_revision = depot_revision.strip('@')
logging.warn(depot_name, depot_revision)
for cur_name, cur_data in bisect_utils.DEPOT_DEPS_NAME.iteritems():
if cur_data.get('deps_var') == depot_name:
src_name = cur_name
results[src_name] = depot_revision
break
return results
def _Get3rdPartyRevisions(self, depot):
"""Parses the DEPS file to determine WebKit/v8/etc... versions.
Args:
depot: A depot name. Should be in the DEPOT_NAMES list.
Returns:
A dict in the format {depot: revision} if successful, otherwise None.
"""
cwd = os.getcwd()
self.depot_registry.ChangeToDepotDir(depot)
results = {}
if depot == 'chromium' or depot == 'android-chrome':
results = self._ParseRevisionsFromDEPSFile(depot)
os.chdir(cwd)
if depot == 'v8':
# We can't try to map the trunk revision to bleeding edge yet, because
# we don't know which direction to try to search in. Have to wait until
# the bisect has narrowed the results down to 2 v8 rolls.
results['v8_bleeding_edge'] = None
return results
def BackupOrRestoreOutputDirectory(self, restore=False, build_type='Release'):
"""Backs up or restores build output directory based on restore argument.
Args:
restore: Indicates whether to restore or backup. Default is False(Backup)
build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
Returns:
Path to backup or restored location as string. otherwise None if it fails.
"""
build_dir = os.path.abspath(
builder.GetBuildOutputDirectory(self.opts, self.src_cwd))
source_dir = os.path.join(build_dir, build_type)
destination_dir = os.path.join(build_dir, '%s.bak' % build_type)
if restore:
source_dir, destination_dir = destination_dir, source_dir
if os.path.exists(source_dir):
RemoveDirectoryTree(destination_dir)
shutil.move(source_dir, destination_dir)
return destination_dir
return None
def _DownloadAndUnzipBuild(self, revision, depot, build_type='Release',
create_patch=False):
"""Downloads the build archive for the given revision.
Args:
revision: The git revision to download.
depot: The name of a dependency repository. Should be in DEPOT_NAMES.
build_type: Target build type, e.g. Release', 'Debug', 'Release_x64' etc.
create_patch: Create a patch with any locally modified files.
Returns:
True if download succeeds, otherwise False.
"""
patch = None
patch_sha = None
if depot not in ('chromium', 'android-chrome'):
# Create a DEPS patch with new revision for dependency repository.
self._CreateDEPSPatch(depot, revision)
create_patch = True
if create_patch:
revision, patch = self._CreatePatch(revision)
if patch:
# Get the SHA of the DEPS changes patch.
patch_sha = GetSHA1HexDigest(patch)
# Update the DEPS changes patch with a patch to create a new file named
# 'DEPS.sha' and add patch_sha evaluated above to it.
patch = '%s\n%s' % (patch, DEPS_SHA_PATCH % {'deps_sha': patch_sha})
build_dir = builder.GetBuildOutputDirectory(self.opts, self.src_cwd)
downloaded_file = self._WaitForBuildDownload(
revision, build_dir, deps_patch=patch, deps_patch_sha=patch_sha)
if not downloaded_file:
return False
return self._UnzipAndMoveBuildProducts(downloaded_file, build_dir,
build_type=build_type)
def _WaitForBuildDownload(self, revision, build_dir, deps_patch=None,
deps_patch_sha=None):
"""Tries to download a zip archive for a build.
This involves seeing whether the archive is already available, and if not,
then requesting a build and waiting before downloading.
Args:
revision: A git commit hash.
build_dir: The directory to download the build into.
deps_patch: A patch which changes a dependency repository revision in
the DEPS, if applicable.
deps_patch_sha: The SHA1 hex digest of the above patch.
Returns:
File path of the downloaded file if successful, otherwise None.
"""
bucket_name, remote_path = fetch_build.GetBucketAndRemotePath(
revision, builder_type=self.opts.builder_type,
target_arch=self.opts.target_arch,
target_platform=self.opts.target_platform,
deps_patch_sha=deps_patch_sha,
extra_src=self.opts.extra_src)
output_dir = os.path.abspath(build_dir)
fetch_build_func = lambda: fetch_build.FetchFromCloudStorage(
bucket_name, remote_path, output_dir)
is_available = fetch_build.BuildIsAvailable(bucket_name, remote_path)
if is_available:
return fetch_build_func()
# When build archive doesn't exist, make a request and wait.
return self._RequestBuildAndWait(
revision, fetch_build_func, deps_patch=deps_patch)
def _RequestBuildAndWait(self, git_revision, fetch_build_func,
deps_patch=None):
"""Triggers a try job for a build job.
This function prepares and starts a try job for a builder, and waits for
the archive to be produced and archived. Once the build is ready it is
downloaded.
For performance tests, builders on the tryserver.chromium.perf are used.
TODO(qyearsley): Make this function take "builder_type" as a parameter
and make requests to different bot names based on that parameter.
Args:
git_revision: A git commit hash.
fetch_build_func: Function to check and download build from cloud storage.
deps_patch: DEPS patch string, used when bisecting dependency repos.
Returns:
Downloaded archive file path when requested build exists and download is
successful, otherwise None.
"""
if not fetch_build_func:
return None
# Create a unique ID for each build request posted to try server builders.
# This ID is added to "Reason" property of the build.
build_request_id = GetSHA1HexDigest(
'%s-%s-%s' % (git_revision, deps_patch, time.time()))
# Revert any changes to DEPS file.
bisect_utils.CheckRunGit(['reset', '--hard', 'HEAD'], cwd=self.src_cwd)
builder_name, build_timeout = fetch_build.GetBuilderNameAndBuildTime(
builder_type=self.opts.builder_type,
target_arch=self.opts.target_arch,
target_platform=self.opts.target_platform,
extra_src=self.opts.extra_src)
try:
_StartBuilderTryJob(self.opts.builder_type, git_revision, builder_name,
job_name=build_request_id, patch=deps_patch)
except RunGitError as e:
logging.warn('Failed to post builder try job for revision: [%s].\n'
'Error: %s', git_revision, e)
return None
# Get the buildbot master URL to monitor build status.
buildbot_server_url = fetch_build.GetBuildBotUrl(
builder_type=self.opts.builder_type,
target_arch=self.opts.target_arch,
target_platform=self.opts.target_platform,
extra_src=self.opts.extra_src)
archive_filename, error_msg = _WaitUntilBuildIsReady(
fetch_build_func, builder_name, build_request_id, build_timeout,
buildbot_server_url)
if not archive_filename:
logging.warn('%s [revision: %s]', error_msg, git_revision)
return archive_filename
def _UnzipAndMoveBuildProducts(self, downloaded_file, build_dir,
build_type='Release'):
"""Unzips the build archive and moves it to the build output directory.
The build output directory is wherever the binaries are expected to
be in order to start Chrome and run tests.
TODO: Simplify and clarify this method if possible.
Args:
downloaded_file: File path of the downloaded zip file.
build_dir: Directory where the the zip file was downloaded to.
build_type: "Release" or "Debug".
Returns:
True if successful, False otherwise.
"""
abs_build_dir = os.path.abspath(build_dir)
output_dir = os.path.join(abs_build_dir, self.GetZipFileBuildDirName())
logging.info('EXPERIMENTAL RUN, _UnzipAndMoveBuildProducts locals %s',
str(locals()))
try:
RemoveDirectoryTree(output_dir)
self.BackupOrRestoreOutputDirectory(restore=False)
# Build output directory based on target(e.g. out/Release, out/Debug).
target_build_output_dir = os.path.join(abs_build_dir, build_type)
logging.info('Extracting "%s" to "%s"', downloaded_file, abs_build_dir)
fetch_build.Unzip(downloaded_file, abs_build_dir)
if not os.path.exists(output_dir):
# Due to recipe changes, the builds extract folder contains
# out/Release instead of full-build-<platform>/Release.
if os.path.exists(os.path.join(abs_build_dir, 'out', build_type)):
output_dir = os.path.join(abs_build_dir, 'out', build_type)
else:
raise IOError('Missing extracted folder %s ' % output_dir)
logging.info('Moving build from %s to %s',
output_dir, target_build_output_dir)
shutil.move(output_dir, target_build_output_dir)
return True
except Exception as e:
logging.info('Something went wrong while extracting archive file: %s', e)
self.BackupOrRestoreOutputDirectory(restore=True)
# Cleanup any leftovers from unzipping.
if os.path.exists(output_dir):
RemoveDirectoryTree(output_dir)
finally:
# Delete downloaded archive
if os.path.exists(downloaded_file):
os.remove(downloaded_file)
return False
@staticmethod
def GetZipFileBuildDirName():
"""Gets the base file name of the zip file.
After extracting the zip file, this is the name of the directory where
the build files are expected to be. Possibly.
TODO: Make sure that this returns the actual directory name where the
Release or Debug directory is inside of the zip files. This probably
depends on the builder recipe, and may depend on whether the builder is
a perf builder or full builder.
Returns:
The name of the directory inside a build archive which is expected to
contain a Release or Debug directory.
"""
if bisect_utils.IsWindowsHost():
return 'full-build-win32'
if bisect_utils.IsLinuxHost():
return 'full-build-linux'
if bisect_utils.IsMacHost():
return 'full-build-mac'
raise NotImplementedError('Unknown platform "%s".' % sys.platform)
def IsDownloadable(self, depot):
"""Checks if build can be downloaded based on target platform and depot."""
if (self.opts.target_platform in ['chromium', 'android', 'android-chrome']
and self.opts.builder_type):
# In case of android-chrome platform, download archives only for
# android-chrome depot; for other depots such as chromium, v8, skia
# etc., build the binary locally.
if self.opts.target_platform == 'android-chrome':
return depot == 'android-chrome'
else:
return (depot == 'chromium' or
'chromium' in bisect_utils.DEPOT_DEPS_NAME[depot]['from'] or
'v8' in bisect_utils.DEPOT_DEPS_NAME[depot]['from'])
return False
def UpdateDepsContents(self, deps_contents, depot, git_revision, deps_key):
"""Returns modified version of DEPS file contents.
Args:
deps_contents: DEPS file content.
depot: Current depot being bisected.
git_revision: A git hash to be updated in DEPS.
deps_key: Key in vars section of DEPS file to be searched.
Returns:
Updated DEPS content as string if deps key is found, otherwise None.
"""
# Check whether the depot and revision pattern in DEPS file vars
# e.g. for webkit the format is "webkit_revision": "12345".
deps_revision = re.compile(r'(?<="%s": ")([0-9]+)(?=")' % deps_key,
re.MULTILINE)
new_data = None
if re.search(deps_revision, deps_contents):
commit_position = source_control.GetCommitPosition(
git_revision, self.depot_registry.GetDepotDir(depot))
if not commit_position:
logging.warn('Could not determine commit position for %s', git_revision)
return None
# Update the revision information for the given depot
new_data = re.sub(deps_revision, str(commit_position), deps_contents)
else:
# Check whether the depot and revision pattern in DEPS file vars
# e.g. for webkit the format is "webkit_revision": "559a6d4ab7a84c539..".
deps_revision = re.compile(
r'(?<=["\']%s["\']: ["\'])([a-fA-F0-9]{40})(?=["\'])' % deps_key,
re.MULTILINE)
if re.search(deps_revision, deps_contents):
new_data = re.sub(deps_revision, git_revision, deps_contents)
if new_data:
# For v8_bleeding_edge revisions change V8 branch in order
# to fetch bleeding edge revision.
if depot == 'v8_bleeding_edge':
new_data = _UpdateV8Branch(new_data)
if not new_data:
return None
return new_data
def UpdateDeps(self, revision, depot, deps_file):
"""Updates DEPS file with new revision of dependency repository.
This method search DEPS for a particular pattern in which depot revision
is specified (e.g "webkit_revision": "123456"). If a match is found then
it resolves the given git hash to SVN revision and replace it in DEPS file.
Args:
revision: A git hash revision of the dependency repository.
depot: Current depot being bisected.
deps_file: Path to DEPS file.
Returns:
True if DEPS file is modified successfully, otherwise False.
"""
if not os.path.exists(deps_file):
return False
deps_var = bisect_utils.DEPOT_DEPS_NAME[depot]['deps_var']
# Don't update DEPS file if deps_var is not set in DEPOT_DEPS_NAME.
if not deps_var:
logging.warn('DEPS update not supported for Depot: %s', depot)
return False
# Hack for Angle repository. In the DEPS file, "vars" dictionary variable
# contains "angle_revision" key that holds git hash instead of SVN revision.
# And sometime "angle_revision" key is not specified in "vars" variable.
# In such cases check, "deps" dictionary variable that matches
# angle.git@[a-fA-F0-9]{40}$ and replace git hash.
if depot == 'angle':
return _UpdateDEPSForAngle(revision, depot, deps_file)
try:
deps_contents = ReadStringFromFile(deps_file)
updated_deps_content = self.UpdateDepsContents(
deps_contents, depot, revision, deps_var)
# Write changes to DEPS file
if updated_deps_content:
WriteStringToFile(updated_deps_content, deps_file)
return True
except IOError, e:
logging.warn('Something went wrong while updating DEPS file. [%s]', e)
return False
def _CreateDEPSPatch(self, depot, revision):
"""Checks out the DEPS file at the specified revision and modifies it.
Args:
depot: Current depot being bisected.
revision: A git hash revision of the dependency repository.
"""
deps_file_path = os.path.join(self.src_cwd, bisect_utils.FILE_DEPS)
if not os.path.exists(deps_file_path):
raise RuntimeError('DEPS file does not exists.[%s]' % deps_file_path)
# Get current chromium revision (git hash).
cmd = ['rev-parse', 'HEAD']
chromium_sha = bisect_utils.CheckRunGit(cmd).strip()
if not chromium_sha:
raise RuntimeError('Failed to determine Chromium revision for %s' %
revision)
if ('chromium' in bisect_utils.DEPOT_DEPS_NAME[depot]['from'] or
'v8' in bisect_utils.DEPOT_DEPS_NAME[depot]['from']):
# Checkout DEPS file for the current chromium revision.
if not source_control.CheckoutFileAtRevision(
bisect_utils.FILE_DEPS, chromium_sha, cwd=self.src_cwd):
raise RuntimeError(
'DEPS checkout Failed for chromium revision : [%s]' % chromium_sha)
if not self.UpdateDeps(revision, depot, deps_file_path):
raise RuntimeError(
'Failed to update DEPS file for chromium: [%s]' % chromium_sha)
def _CreatePatch(self, revision):
"""Creates a patch from currently modified files.
Args:
depot: Current depot being bisected.
revision: A git hash revision of the dependency repository.
Returns:
A tuple with git hash of chromium revision and DEPS patch text.
"""
# Get current chromium revision (git hash).
chromium_sha = bisect_utils.CheckRunGit(['rev-parse', 'HEAD']).strip()
if not chromium_sha:
raise RuntimeError('Failed to determine Chromium revision for %s' %
revision)
# Checkout DEPS file for the current chromium revision.
diff_command = [
'diff',
'--src-prefix=',
'--dst-prefix=',
'--no-ext-diff',
'HEAD',
]
diff_text = bisect_utils.CheckRunGit(diff_command)
return (chromium_sha, ChangeBackslashToSlashInPatch(diff_text))
def ObtainBuild(
self, depot, revision=None, create_patch=False):
"""Obtains a build by either downloading or building directly.
Args:
depot: Dependency repository name.
revision: A git commit hash. If None is given, the currently checked-out
revision is built.
create_patch: Create a patch with any locally modified files.
Returns:
True for success.
"""
if self.opts.debug_ignore_build:
return True
build_success = False
cwd = os.getcwd()
os.chdir(self.src_cwd)
# Fetch build archive for the given revision from the cloud storage when
# the storage bucket is passed.
if self.IsDownloadable(depot) and revision:
build_success = self._DownloadAndUnzipBuild(
revision, depot, build_type='Release', create_patch=create_patch)
else:
# Print the current environment set on the machine.
print 'Full Environment:'
for key, value in sorted(os.environ.items()):
print '%s: %s' % (key, value)
# Print the environment before proceeding with compile.
sys.stdout.flush()
build_success = self.builder.Build(depot, self.opts)
os.chdir(cwd)
return build_success
def RunGClientHooks(self):
"""Runs gclient with runhooks command.
Returns:
True if gclient reports no errors.
"""
if self.opts.debug_ignore_build:
return True
# Some "runhooks" calls create symlinks that other (older?) versions
# do not handle correctly causing the build to fail. We want to avoid
# clearing the entire out/ directory so that changes close together will
# build faster so we just clear out all symlinks on the expectation that
# the next "runhooks" call will recreate everything properly. Ignore
# failures (like Windows that doesn't have "find").
try:
bisect_utils.RunProcess(
['find', 'out/', '-type', 'l', '-exec', 'rm', '-f', '{}', ';'],
cwd=self.src_cwd, shell=False)
except OSError:
pass
return not bisect_utils.RunGClient(['runhooks'], cwd=self.src_cwd)
def _IsBisectModeUsingMetric(self):
return self.opts.bisect_mode in [bisect_utils.BISECT_MODE_MEAN,
bisect_utils.BISECT_MODE_STD_DEV]
def _IsBisectModeReturnCode(self):
return self.opts.bisect_mode in [bisect_utils.BISECT_MODE_RETURN_CODE]
def _IsBisectModeStandardDeviation(self):
return self.opts.bisect_mode in [bisect_utils.BISECT_MODE_STD_DEV]
def RunPerformanceTestAndParseResults(
self, command_to_run, metric, reset_on_first_run=False,
upload_on_last_run=False, results_label=None, test_run_multiplier=1,
allow_flakes=True):
"""Runs a performance test on the current revision and parses the results.
Args:
command_to_run: The command to be run to execute the performance test.
metric: The metric to parse out from the results of the performance test.
This is the result chart name and trace name, separated by slash.
May be None for perf try jobs.
reset_on_first_run: If True, pass the flag --reset-results on first run.
upload_on_last_run: If True, pass the flag --upload-results on last run.
results_label: A value for the option flag --results-label.
The arguments reset_on_first_run, upload_on_last_run and results_label
are all ignored if the test is not a Telemetry test.
test_run_multiplier: Factor by which to multiply the number of test runs
and the timeout period specified in self.opts.
allow_flakes: Report success even if some tests fail to run.
Returns:
(values dict, 0) if --debug_ignore_perf_test was passed.
(values dict, 0, test output) if the test was run successfully.
(error message, -1) if the test couldn't be run.
(error message, -1, test output) if the test ran but there was an error.
"""
success_code, failure_code = 0, -1
if self.opts.debug_ignore_perf_test:
fake_results = {
'mean': 0.0,
'std_err': 0.0,
'std_dev': 0.0,
'values': [0.0]
}
# When debug_fake_test_mean is set, its value is returned as the mean
# and the flag is cleared so that further calls behave as if it wasn't
# set (returning the fake_results dict as defined above).
if self.opts.debug_fake_first_test_mean:
fake_results['mean'] = float(self.opts.debug_fake_first_test_mean)
self.opts.debug_fake_first_test_mean = 0
return (fake_results, success_code)
# For Windows platform set posix=False, to parse windows paths correctly.
# On Windows, path separators '\' or '\\' are replace by '' when posix=True,
# refer to http://bugs.python.org/issue1724822. By default posix=True.
args = shlex.split(command_to_run, posix=not bisect_utils.IsWindowsHost())
if not _GenerateProfileIfNecessary(args):
err_text = 'Failed to generate profile for performance test.'
return (err_text, failure_code)
is_telemetry = bisect_utils.IsTelemetryCommand(command_to_run)
start_time = time.time()
metric_values = []
output_of_all_runs = ''
repeat_count = self.opts.repeat_test_count * test_run_multiplier
return_codes = []
for i in xrange(repeat_count):
# Can ignore the return code since if the tests fail, it won't return 0.
current_args = copy.copy(args)
if is_telemetry:
if i == 0 and reset_on_first_run:
current_args.append('--reset-results')
if i == self.opts.repeat_test_count - 1 and upload_on_last_run:
current_args.append('--upload-results')
if results_label:
current_args.append('--results-label=%s' % results_label)
try:
output, return_code = bisect_utils.RunProcessAndRetrieveOutput(
current_args, cwd=self.src_cwd)
return_codes.append(return_code)
except OSError, e:
if e.errno == errno.ENOENT:
err_text = ('Something went wrong running the performance test. '
'Please review the command line:\n\n')
if 'src/' in ' '.join(args):
err_text += ('Check that you haven\'t accidentally specified a '
'path with src/ in the command.\n\n')
err_text += ' '.join(args)
err_text += '\n'
return (err_text, failure_code)
raise
output_of_all_runs += output
if self.opts.output_buildbot_annotations:
print output
if metric and self._IsBisectModeUsingMetric():
parsed_metric = _ParseMetricValuesFromOutput(metric, output)
if parsed_metric:
metric_values += parsed_metric
# If we're bisecting on a metric (ie, changes in the mean or
# standard deviation) and no metric values are produced, bail out.
if not metric_values:
break
elif self._IsBisectModeReturnCode():
metric_values.append(return_code)
# If there's a failed test, we can bail out early.
if return_code:
break
elapsed_minutes = (time.time() - start_time) / 60.0
time_limit = self.opts.max_time_minutes * test_run_multiplier
if elapsed_minutes >= time_limit:
break
if metric and len(metric_values) == 0:
err_text = 'Metric %s was not found in the test output.' % metric
# TODO(qyearsley): Consider also getting and displaying a list of metrics
# that were found in the output here.
return (err_text, failure_code, output_of_all_runs)
# If we're bisecting on return codes, we're really just looking for zero vs
# non-zero.
values = {}
if self._IsBisectModeReturnCode():
# If any of the return codes is non-zero, output 1.
overall_return_code = 0 if (
all(current_value == 0 for current_value in metric_values)) else 1
values = {
'mean': overall_return_code,
'std_err': 0.0,
'std_dev': 0.0,
'values': metric_values,
}
print 'Results of performance test: Command returned with %d' % (
overall_return_code)
print
elif metric:
# Need to get the average value if there were multiple values.
truncated_mean = math_utils.TruncatedMean(
metric_values, self.opts.truncate_percent)
standard_err = math_utils.StandardError(metric_values)
standard_dev = math_utils.StandardDeviation(metric_values)
if self._IsBisectModeStandardDeviation():
metric_values = [standard_dev]
values = {
'mean': truncated_mean,
'std_err': standard_err,
'std_dev': standard_dev,
'values': metric_values,
}
print 'Results of performance test: %12f %12f' % (
truncated_mean, standard_err)
print
overall_success = success_code
if not allow_flakes and not self._IsBisectModeReturnCode():
overall_success = (
success_code
if (all(current_value == 0 for current_value in return_codes))
else failure_code)
return (values, overall_success, output_of_all_runs)
def PerformPreBuildCleanup(self):
"""Performs cleanup between runs."""
print 'Cleaning up between runs.'
print
# Leaving these .pyc files around between runs may disrupt some perf tests.
for (path, _, files) in os.walk(self.src_cwd):
for cur_file in files:
if cur_file.endswith('.pyc'):
path_to_file = os.path.join(path, cur_file)
os.remove(path_to_file)
def _RunPostSync(self, _depot):
"""Performs any work after syncing.
Args:
depot: Depot name.
Returns:
True if successful.
"""
if 'android' in self.opts.target_platform:
if not builder.SetupAndroidBuildEnvironment(
self.opts, path_to_src=self.src_cwd):
return False
return self.RunGClientHooks()
@staticmethod
def ShouldSkipRevision(depot, revision):
"""Checks whether a particular revision can be safely skipped.
Some commits can be safely skipped (such as a DEPS roll for the repos
still using .DEPS.git), since the tool is git based those changes
would have no effect.
Args:
depot: The depot being bisected.
revision: Current revision we're synced to.
Returns:
True if we should skip building/testing this revision.
"""
# Skips revisions with DEPS on android-chrome.
if depot == 'android-chrome':
cmd = ['diff-tree', '--no-commit-id', '--name-only', '-r', revision]
output = bisect_utils.CheckRunGit(cmd)
files = output.splitlines()
if len(files) == 1 and files[0] == 'DEPS':
return True
return False
def RunTest(self, revision, depot, command, metric, skippable=False,
skip_sync=False, create_patch=False, force_build=False,
test_run_multiplier=1):
"""Performs a full sync/build/run of the specified revision.
Args:
revision: The revision to sync to.
depot: The depot that's being used at the moment (src, webkit, etc.)
command: The command to execute the performance test.
metric: The performance metric being tested.
skip_sync: Skip the sync step.
create_patch: Create a patch with any locally modified files.
force_build: Force a local build.
test_run_multiplier: Factor by which to multiply the given number of runs
and the set timeout period.
Returns:
On success, a tuple containing the results of the performance test.
Otherwise, a tuple with the error message.
"""
logging.info('Running RunTest with rev "%s", command "%s"',
revision, command)
# Decide which sync program to use.
sync_client = None
if depot == 'chromium' or depot == 'android-chrome':
sync_client = 'gclient'
# Do the syncing for all depots.
if not (self.opts.debug_ignore_sync or skip_sync):
if not self._SyncRevision(depot, revision, sync_client):
return ('Failed to sync: [%s]' % str(revision), BUILD_RESULT_FAIL)
# Try to do any post-sync steps. This may include "gclient runhooks".
if not self._RunPostSync(depot):
return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL)
# Skip this revision if it can be skipped.
if skippable and self.ShouldSkipRevision(depot, revision):
return ('Skipped revision: [%s]' % str(revision),
BUILD_RESULT_SKIPPED)
# Obtain a build for this revision. This may be done by requesting a build
# from another builder, waiting for it and downloading it.
start_build_time = time.time()
revision_to_build = revision if not force_build else None
build_success = self.ObtainBuild(
depot, revision=revision_to_build, create_patch=create_patch)
if not build_success:
return ('Failed to build revision: [%s]' % str(revision),
BUILD_RESULT_FAIL)
after_build_time = time.time()
# Run the command and get the results.
results = self.RunPerformanceTestAndParseResults(
command, metric, test_run_multiplier=test_run_multiplier)
# Restore build output directory once the tests are done, to avoid
# any discrepancies.
if self.IsDownloadable(depot) and revision:
self.BackupOrRestoreOutputDirectory(restore=True)
# A value other than 0 indicates that the test couldn't be run, and results
# should also include an error message.
if results[1] != 0:
return results
external_revisions = self._Get3rdPartyRevisions(depot)
if not external_revisions is None:
return (results[0], results[1], external_revisions,
time.time() - after_build_time, after_build_time -
start_build_time)
else:
return ('Failed to parse DEPS file for external revisions.',
BUILD_RESULT_FAIL)
def _SyncRevision(self, depot, revision, sync_client):
"""Syncs depot to particular revision.
Args:
depot: The depot that's being used at the moment (src, webkit, etc.)
revision: The revision to sync to.
sync_client: Program used to sync, e.g. "gclient". Can be None.
Returns:
True if successful, False otherwise.
"""
self.depot_registry.ChangeToDepotDir(depot)
if sync_client:
self.PerformPreBuildCleanup()
# When using gclient to sync, you need to specify the depot you
# want so that all the dependencies sync properly as well.
# i.e. gclient sync src@<SHA1>
if sync_client == 'gclient' and revision:
revision = '%s@%s' % (bisect_utils.DEPOT_DEPS_NAME[depot]['src'],
revision)
if depot == 'chromium' and self.opts.target_platform == 'android-chrome':
return self._SyncRevisionsForAndroidChrome(revision)
return source_control.SyncToRevision(revision, sync_client)
def _SyncRevisionsForAndroidChrome(self, revision):
"""Syncs android-chrome and chromium repos to particular revision.
This is a special case for android-chrome as the gclient sync for chromium
overwrites the android-chrome revision to TOT. Therefore both the repos
are synced to known revisions.
Args:
revision: Git hash of the Chromium to sync.
Returns:
True if successful, False otherwise.
"""
revisions_list = [revision]
current_android_rev = source_control.GetCurrentRevision(
self.depot_registry.GetDepotDir('android-chrome'))
revisions_list.append(
'%s@%s' % (bisect_utils.DEPOT_DEPS_NAME['android-chrome']['src'],
current_android_rev))
return not bisect_utils.RunGClientAndSync(revisions_list)
def _CheckIfRunPassed(self, current_value, known_good_value, known_bad_value):
"""Given known good and bad values, decide if the current_value passed
or failed.
Args:
current_value: The value of the metric being checked.
known_bad_value: The reference value for a "failed" run.
known_good_value: The reference value for a "passed" run.
Returns:
True if the current_value is closer to the known_good_value than the
known_bad_value.
"""
if self.opts.bisect_mode == bisect_utils.BISECT_MODE_STD_DEV:
dist_to_good_value = abs(current_value['std_dev'] -
known_good_value['std_dev'])
dist_to_bad_value = abs(current_value['std_dev'] -
known_bad_value['std_dev'])
else:
dist_to_good_value = abs(current_value['mean'] - known_good_value['mean'])
dist_to_bad_value = abs(current_value['mean'] - known_bad_value['mean'])
return dist_to_good_value < dist_to_bad_value
def _GetV8BleedingEdgeFromV8TrunkIfMappable(
self, revision, bleeding_edge_branch):
"""Gets v8 bleeding edge revision mapped to v8 revision in trunk.
Args:
revision: A trunk V8 revision mapped to bleeding edge revision.
bleeding_edge_branch: Branch used to perform lookup of bleeding edge
revision.
Return:
A mapped bleeding edge revision if found, otherwise None.
"""
commit_position = source_control.GetCommitPosition(revision)
if bisect_utils.IsStringInt(commit_position):
# V8 is tricky to bisect, in that there are only a few instances when
# we can dive into bleeding_edge and get back a meaningful result.
# Try to detect a V8 "business as usual" case, which is when:
# 1. trunk revision N has description "Version X.Y.Z"
# 2. bleeding_edge revision (N-1) has description "Prepare push to
# trunk. Now working on X.Y.(Z+1)."
#
# As of 01/24/2014, V8 trunk descriptions are formatted:
# "Version 3.X.Y (based on bleeding_edge revision rZ)"
# So we can just try parsing that out first and fall back to the old way.
v8_dir = self.depot_registry.GetDepotDir('v8')
v8_bleeding_edge_dir = self.depot_registry.GetDepotDir('v8_bleeding_edge')
revision_info = source_control.QueryRevisionInfo(revision, cwd=v8_dir)
version_re = re.compile("Version (?P<values>[0-9,.]+)")
regex_results = version_re.search(revision_info['subject'])
if regex_results:
git_revision = None
if 'based on bleeding_edge' in revision_info['subject']:
try:
bleeding_edge_revision = revision_info['subject'].split(
'bleeding_edge revision r')[1]
bleeding_edge_revision = int(bleeding_edge_revision.split(')')[0])
bleeding_edge_url = ('https://v8.googlecode.com/svn/branches/'
'bleeding_edge@%s' % bleeding_edge_revision)
cmd = ['log',
'--format=%H',
'--grep',
bleeding_edge_url,
'-1',
bleeding_edge_branch]
output = bisect_utils.CheckRunGit(cmd, cwd=v8_dir)
if output:
git_revision = output.strip()
return git_revision
except (IndexError, ValueError):
pass
else:
# V8 rolls description changed after V8 git migration, new description
# includes "Version 3.X.Y (based on <git hash>)"
try:
rxp = re.compile('based on (?P<git_revision>[a-fA-F0-9]+)')
re_results = rxp.search(revision_info['subject'])
if re_results:
return re_results.group('git_revision')
except (IndexError, ValueError):
pass
if not git_revision:
# Wasn't successful, try the old way of looking for "Prepare push to"
git_revision = source_control.ResolveToRevision(
int(commit_position) - 1, 'v8_bleeding_edge',
bisect_utils.DEPOT_DEPS_NAME, -1, cwd=v8_bleeding_edge_dir)
if git_revision:
revision_info = source_control.QueryRevisionInfo(
git_revision, cwd=v8_bleeding_edge_dir)
if 'Prepare push to trunk' in revision_info['subject']:
return git_revision
return None
def _GetNearestV8BleedingEdgeFromTrunk(
self, revision, v8_branch, bleeding_edge_branch, search_forward=True):
"""Gets the nearest V8 roll and maps to bleeding edge revision.
V8 is a bit tricky to bisect since it isn't just rolled out like blink.
Each revision on trunk might just be whatever was in bleeding edge, rolled
directly out. Or it could be some mixture of previous v8 trunk versions,
with bits and pieces cherry picked out from bleeding edge. In order to
bisect, we need both the before/after versions on trunk v8 to be just pushes
from bleeding edge. With the V8 git migration, the branches got switched.
a) master (external/v8) == candidates (v8/v8)
b) bleeding_edge (external/v8) == master (v8/v8)
Args:
revision: A V8 revision to get its nearest bleeding edge revision
search_forward: Searches forward if True, otherwise search backward.
Return:
A mapped bleeding edge revision if found, otherwise None.
"""
cwd = self.depot_registry.GetDepotDir('v8')
cmd = ['log', '--format=%ct', '-1', revision]
output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
commit_time = int(output)
commits = []
if search_forward:
cmd = ['log',
'--format=%H',
'--after=%d' % commit_time,
v8_branch,
'--reverse']
output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
output = output.split()
commits = output
#Get 10 git hashes immediately after the given commit.
commits = commits[:10]
else:
cmd = ['log',
'--format=%H',
'-10',
'--before=%d' % commit_time,
v8_branch]
output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
output = output.split()
commits = output
bleeding_edge_revision = None
for c in commits:
bleeding_edge_revision = self._GetV8BleedingEdgeFromV8TrunkIfMappable(
c, bleeding_edge_branch)
if bleeding_edge_revision:
break
return bleeding_edge_revision
def _FillInV8BleedingEdgeInfo(self, min_revision_state, max_revision_state):
cwd = self.depot_registry.GetDepotDir('v8')
# when "remote.origin.url" is https://chromium.googlesource.com/v8/v8.git
v8_branch = 'origin/candidates'
bleeding_edge_branch = 'origin/master'
# Support for the chromium revisions with external V8 repo.
# ie https://chromium.googlesource.com/external/v8.git
cmd = ['config', '--get', 'remote.origin.url']
v8_repo_url = bisect_utils.CheckRunGit(cmd, cwd=cwd)
if 'external/v8.git' in v8_repo_url:
v8_branch = 'origin/master'
bleeding_edge_branch = 'origin/bleeding_edge'
r1 = self._GetNearestV8BleedingEdgeFromTrunk(
min_revision_state.revision,
v8_branch,
bleeding_edge_branch,
search_forward=True)
r2 = self._GetNearestV8BleedingEdgeFromTrunk(
max_revision_state.revision,
v8_branch,
bleeding_edge_branch,
search_forward=False)
min_revision_state.external['v8_bleeding_edge'] = r1
max_revision_state.external['v8_bleeding_edge'] = r2
if (not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
min_revision_state.revision, bleeding_edge_branch)
or not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
max_revision_state.revision, bleeding_edge_branch)):
self.warnings.append(
'Trunk revisions in V8 did not map directly to bleeding_edge. '
'Attempted to expand the range to find V8 rolls which did map '
'directly to bleeding_edge revisions, but results might not be '
'valid.')
def _FindNextDepotToBisect(
self, current_depot, min_revision_state, max_revision_state):
"""Decides which depot the script should dive into next (if any).
Args:
current_depot: Current depot being bisected.
min_revision_state: State of the earliest revision in the bisect range.
max_revision_state: State of the latest revision in the bisect range.
Returns:
Name of the depot to bisect next, or None.
"""
external_depot = None
for next_depot in bisect_utils.DEPOT_NAMES:
if ('platform' in bisect_utils.DEPOT_DEPS_NAME[next_depot] and
bisect_utils.DEPOT_DEPS_NAME[next_depot]['platform'] != os.name):
continue
if not (bisect_utils.DEPOT_DEPS_NAME[next_depot]['recurse']
and min_revision_state.depot
in bisect_utils.DEPOT_DEPS_NAME[next_depot]['from']):
continue
if current_depot == 'v8':
# We grab the bleeding_edge info here rather than earlier because we
# finally have the revision range. From that we can search forwards and
# backwards to try to match trunk revisions to bleeding_edge.
self._FillInV8BleedingEdgeInfo(min_revision_state, max_revision_state)
if (min_revision_state.external.get(next_depot) ==
max_revision_state.external.get(next_depot)):
continue
if (min_revision_state.external.get(next_depot) and
max_revision_state.external.get(next_depot)):
external_depot = next_depot
break
return external_depot
def PrepareToBisectOnDepot(
self, current_depot, start_revision, end_revision, previous_revision):
"""Changes to the appropriate directory and gathers a list of revisions
to bisect between |start_revision| and |end_revision|.
Args:
current_depot: The depot we want to bisect.
start_revision: Start of the revision range.
end_revision: End of the revision range.
previous_revision: The last revision we synced to on |previous_depot|.
Returns:
A list containing the revisions between |start_revision| and
|end_revision| inclusive.
"""
# Change into working directory of external library to run
# subsequent commands.
self.depot_registry.ChangeToDepotDir(current_depot)
# V8 (and possibly others) is merged in periodically. Bisecting
# this directory directly won't give much good info.
if 'custom_deps' in bisect_utils.DEPOT_DEPS_NAME[current_depot]:
config_path = os.path.join(self.src_cwd, '..')
if bisect_utils.RunGClientAndCreateConfig(
self.opts, bisect_utils.DEPOT_DEPS_NAME[current_depot]['custom_deps'],
cwd=config_path):
return []
if bisect_utils.RunGClient(
['sync', '--revision', previous_revision], cwd=self.src_cwd):
return []
if current_depot == 'v8_bleeding_edge':
self.depot_registry.ChangeToDepotDir('chromium')
shutil.move('v8', 'v8.bak')
shutil.move('v8_bleeding_edge', 'v8')
self.cleanup_commands.append(['mv', 'v8', 'v8_bleeding_edge'])
self.cleanup_commands.append(['mv', 'v8.bak', 'v8'])
self.depot_registry.SetDepotDir(
'v8_bleeding_edge', os.path.join(self.src_cwd, 'v8'))
self.depot_registry.SetDepotDir(
'v8', os.path.join(self.src_cwd, 'v8.bak'))
self.depot_registry.ChangeToDepotDir(current_depot)
depot_revision_list = self.GetRevisionList(current_depot,
end_revision,
start_revision)
self.depot_registry.ChangeToDepotDir('chromium')
return depot_revision_list
def GatherReferenceValues(self, good_rev, bad_rev, cmd, metric, target_depot):
"""Gathers reference values by running the performance tests on the
known good and bad revisions.
Args:
good_rev: The last known good revision where the performance regression
has not occurred yet.
bad_rev: A revision where the performance regression has already occurred.
cmd: The command to execute the performance test.
metric: The metric being tested for regression.
Returns:
A tuple with the results of building and running each revision.
"""
bad_run_results = self.RunTest(bad_rev, target_depot, cmd, metric)
good_run_results = None
if not bad_run_results[1]:
good_run_results = self.RunTest(good_rev, target_depot, cmd, metric)
return (bad_run_results, good_run_results)
def PrintRevisionsToBisectMessage(self, revision_list, depot):
if self.opts.output_buildbot_annotations:
step_name = 'Bisection Range: [%s:%s - %s]' % (depot, revision_list[-1],
revision_list[0])
bisect_utils.OutputAnnotationStepStart(step_name)
print
print 'Revisions to bisect on [%s]:' % depot
for revision_id in revision_list:
print ' -> %s' % (revision_id, )
print
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepClosed()
def NudgeRevisionsIfDEPSChange(self, bad_revision, good_revision,
good_svn_revision=None):
"""Checks to see if changes to DEPS file occurred, and that the revision
range also includes the change to .DEPS.git. If it doesn't, attempts to
expand the revision range to include it.
Args:
bad_revision: First known bad git revision.
good_revision: Last known good git revision.
good_svn_revision: Last known good svn revision.
Returns:
A tuple with the new bad and good revisions.
"""
# DONOT perform nudge because at revision 291563 .DEPS.git was removed
# and source contain only DEPS file for dependency changes.
if good_svn_revision >= 291563:
return (bad_revision, good_revision)
if self.opts.target_platform == 'chromium':
changes_to_deps = source_control.QueryFileRevisionHistory(
bisect_utils.FILE_DEPS, good_revision, bad_revision)
if changes_to_deps:
# DEPS file was changed, search from the oldest change to DEPS file to
# bad_revision to see if there are matching .DEPS.git changes.
oldest_deps_change = changes_to_deps[-1]
changes_to_gitdeps = source_control.QueryFileRevisionHistory(
bisect_utils.FILE_DEPS_GIT, oldest_deps_change, bad_revision)
if len(changes_to_deps) != len(changes_to_gitdeps):
# Grab the timestamp of the last DEPS change
cmd = ['log', '--format=%ct', '-1', changes_to_deps[0]]
output = bisect_utils.CheckRunGit(cmd)
commit_time = int(output)
# Try looking for a commit that touches the .DEPS.git file in the
# next 15 minutes after the DEPS file change.
cmd = [
'log', '--format=%H', '-1',
'--before=%d' % (commit_time + 900),
'--after=%d' % commit_time,
'origin/master', '--', bisect_utils.FILE_DEPS_GIT
]
output = bisect_utils.CheckRunGit(cmd)
output = output.strip()
if output:
self.warnings.append(
'Detected change to DEPS and modified '
'revision range to include change to .DEPS.git')
return (output, good_revision)
else:
self.warnings.append(
'Detected change to DEPS but couldn\'t find '
'matching change to .DEPS.git')
return (bad_revision, good_revision)
def CheckIfRevisionsInProperOrder(
self, target_depot, good_revision, bad_revision):
"""Checks that |good_revision| is an earlier revision than |bad_revision|.
Args:
good_revision: Number/tag of the known good revision.
bad_revision: Number/tag of the known bad revision.
Returns:
True if the revisions are in the proper order (good earlier than bad).
"""
cwd = self.depot_registry.GetDepotDir(target_depot)
good_position = source_control.GetCommitPosition(good_revision, cwd)
bad_position = source_control.GetCommitPosition(bad_revision, cwd)
# Compare commit timestamp for repos that don't support commit position.
if not (bad_position and good_position):
logging.info('Could not get commit positions for revisions %s and %s in '
'depot %s', good_position, bad_position, target_depot)
good_position = source_control.GetCommitTime(good_revision, cwd=cwd)
bad_position = source_control.GetCommitTime(bad_revision, cwd=cwd)
return good_position <= bad_position
def CanPerformBisect(self, good_revision, bad_revision):
"""Checks whether a given revision is bisectable.
Checks for following:
1. Non-bisectable revisions for android bots (refer to crbug.com/385324).
2. Non-bisectable revisions for Windows bots (refer to crbug.com/405274).
Args:
good_revision: Known good revision.
bad_revision: Known bad revision.
Returns:
A dictionary indicating the result. If revision is not bisectable,
this will contain the field "error", otherwise None.
"""
if self.opts.target_platform == 'android':
good_revision = source_control.GetCommitPosition(good_revision)
if (bisect_utils.IsStringInt(good_revision)
and good_revision < 265549):
return {'error': (
'Bisect cannot continue for the given revision range.\n'
'It is impossible to bisect Android regressions '
'prior to r265549, which allows the bisect bot to '
'rely on Telemetry to do apk installation of the most recently '
'built local ChromePublic (refer to crbug.com/385324).\n'
'Please try bisecting revisions greater than or equal to r265549.')}
if bisect_utils.IsWindowsHost():
good_revision = source_control.GetCommitPosition(good_revision)
bad_revision = source_control.GetCommitPosition(bad_revision)
if (bisect_utils.IsStringInt(good_revision) and
bisect_utils.IsStringInt(bad_revision)):
if (289987 <= good_revision < 290716 or
289987 <= bad_revision < 290716):
return {'error': ('Oops! Revision between r289987 and r290716 are '
'marked as dead zone for Windows due to '
'crbug.com/405274. Please try another range.')}
return None
def _GatherResultsFromRevertedCulpritCL(
self, results, target_depot, command_to_run, metric):
"""Gathers performance results with/without culprit CL.
Attempts to revert the culprit CL against ToT and runs the
performance tests again with and without the CL, adding the results to
the over bisect results.
Args:
results: BisectResults from the bisect.
target_depot: The target depot we're bisecting.
command_to_run: Specify the command to execute the performance test.
metric: The performance metric to monitor.
"""
run_results_tot, run_results_reverted = self._RevertCulpritCLAndRetest(
results, target_depot, command_to_run, metric)
results.AddRetestResults(run_results_tot, run_results_reverted)
if len(results.culprit_revisions) != 1:
return
# Cleanup reverted files if anything is left.
_, _, culprit_depot = results.culprit_revisions[0]
bisect_utils.CheckRunGit(
['reset', '--hard', 'HEAD'],
cwd=self.depot_registry.GetDepotDir(culprit_depot))
def _RevertCL(self, culprit_revision, culprit_depot):
"""Reverts the specified revision in the specified depot."""
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepStart(
'Reverting culprit CL: %s' % culprit_revision)
_, return_code = bisect_utils.RunGit(
['revert', '--no-commit', culprit_revision],
cwd=self.depot_registry.GetDepotDir(culprit_depot))
if return_code:
bisect_utils.OutputAnnotationStepWarning()
bisect_utils.OutputAnnotationStepText('Failed to revert CL cleanly.')
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepClosed()
return not return_code
def _RevertCulpritCLAndRetest(
self, results, target_depot, command_to_run, metric):
"""Reverts the culprit CL against ToT and runs the performance test.
Attempts to revert the culprit CL against ToT and runs the
performance tests again with and without the CL.
Args:
results: BisectResults from the bisect.
target_depot: The target depot we're bisecting.
command_to_run: Specify the command to execute the performance test.
metric: The performance metric to monitor.
Returns:
A tuple with the results of running the CL at ToT/reverted.
"""
# Might want to retest ToT with a revert of the CL to confirm that
# performance returns.
if results.confidence < bisect_utils.HIGH_CONFIDENCE:
return (None, None)
# If there were multiple culprit CLs, we won't try to revert.
if len(results.culprit_revisions) != 1:
return (None, None)
culprit_revision, _, culprit_depot = results.culprit_revisions[0]
if not self._SyncRevision(target_depot, None, 'gclient'):
return (None, None)
head_revision = bisect_utils.CheckRunGit(['log', '--format=%H', '-1'])
head_revision = head_revision.strip()
if not self._RevertCL(culprit_revision, culprit_depot):
return (None, None)
# If the culprit CL happened to be in a depot that gets pulled in, we
# can't revert the change and issue a try job to build, since that would
# require modifying both the DEPS file and files in another depot.
# Instead, we build locally.
force_build = (culprit_depot != target_depot)
if force_build:
results.warnings.append(
'Culprit CL is in another depot, attempting to revert and build'
' locally to retest. This may not match the performance of official'
' builds.')
run_results_reverted = self._RunTestWithAnnotations(
'Re-Testing ToT with reverted culprit',
'Failed to run reverted CL.',
head_revision, target_depot, command_to_run, metric, force_build)
# Clear the reverted file(s).
bisect_utils.RunGit(
['reset', '--hard', 'HEAD'],
cwd=self.depot_registry.GetDepotDir(culprit_depot))
# Retesting with the reverted CL failed, so bail out of retesting against
# ToT.
if run_results_reverted[1]:
return (None, None)
run_results_tot = self._RunTestWithAnnotations(
'Re-Testing ToT',
'Failed to run ToT.',
head_revision, target_depot, command_to_run, metric, force_build)
return (run_results_tot, run_results_reverted)
def _RunTestWithAnnotations(
self, step_text, error_text, head_revision,
target_depot, command_to_run, metric, force_build):
"""Runs the performance test and outputs start/stop annotations.
Args:
results: BisectResults from the bisect.
target_depot: The target depot we're bisecting.
command_to_run: Specify the command to execute the performance test.
metric: The performance metric to monitor.
force_build: Whether to force a build locally.
Returns:
Results of the test.
"""
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepStart(step_text)
# Build and run the test again with the reverted culprit CL against ToT.
run_test_results = self.RunTest(
head_revision, target_depot, command_to_run,
metric, skippable=False, skip_sync=True, create_patch=True,
force_build=force_build)
if self.opts.output_buildbot_annotations:
if run_test_results[1]:
bisect_utils.OutputAnnotationStepWarning()
bisect_utils.OutputAnnotationStepText(error_text)
bisect_utils.OutputAnnotationStepClosed()
return run_test_results
def Run(self, command_to_run, bad_revision_in, good_revision_in, metric):
"""Given known good and bad revisions, run a binary search on all
intermediate revisions to determine the CL where the performance regression
occurred.
Args:
command_to_run: Specify the command to execute the performance test.
good_revision: Number/tag of the known good revision.
bad_revision: Number/tag of the known bad revision.
metric: The performance metric to monitor.
Returns:
A BisectResults object.
"""
# Choose depot to bisect first
target_depot = 'chromium'
if self.opts.target_platform == 'android-chrome':
target_depot = 'android-chrome'
cwd = os.getcwd()
self.depot_registry.ChangeToDepotDir(target_depot)
# If they passed SVN revisions, we can try match them to git SHA1 hashes.
bad_revision = source_control.ResolveToRevision(
bad_revision_in, target_depot, bisect_utils.DEPOT_DEPS_NAME, 100)
good_revision = source_control.ResolveToRevision(
good_revision_in, target_depot, bisect_utils.DEPOT_DEPS_NAME, -100)
os.chdir(cwd)
if bad_revision is None:
return BisectResults(
error='Couldn\'t resolve [%s] to SHA1.' % bad_revision_in)
if good_revision is None:
return BisectResults(
error='Couldn\'t resolve [%s] to SHA1.' % good_revision_in)
# Check that they didn't accidentally swap good and bad revisions.
if not self.CheckIfRevisionsInProperOrder(
target_depot, good_revision, bad_revision):
return BisectResults(error='Bad rev (%s) appears to be earlier than good '
'rev (%s).' % (good_revision, bad_revision))
bad_revision, good_revision = self.NudgeRevisionsIfDEPSChange(
bad_revision, good_revision, good_revision_in)
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepStart('Gathering Revisions')
cannot_bisect = self.CanPerformBisect(good_revision, bad_revision)
if cannot_bisect:
return BisectResults(error=cannot_bisect.get('error'))
print 'Gathering revision range for bisection.'
# Retrieve a list of revisions to do bisection on.
revision_list = self.GetRevisionList(target_depot, bad_revision,
good_revision)
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepClosed()
if revision_list:
self.PrintRevisionsToBisectMessage(revision_list, target_depot)
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepStart('Gathering Reference Values')
print 'Gathering reference values for bisection.'
# Perform the performance tests on the good and bad revisions, to get
# reference values.
bad_results, good_results = self.GatherReferenceValues(good_revision,
bad_revision,
command_to_run,
metric,
target_depot)
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepClosed()
if bad_results[1]:
error = ('An error occurred while building and running the \'bad\' '
'reference value. The bisect cannot continue without '
'a working \'bad\' revision to start from.\n\nError: %s' %
bad_results[0])
return BisectResults(error=error)
if good_results[1]:
error = ('An error occurred while building and running the \'good\' '
'reference value. The bisect cannot continue without '
'a working \'good\' revision to start from.\n\nError: %s' %
good_results[0])
return BisectResults(error=error)
# We need these reference values to determine if later runs should be
# classified as pass or fail.
known_bad_value = bad_results[0]
known_good_value = good_results[0]
# Abort bisect early when the return codes for known good
# and known bad revisions are same.
if (self._IsBisectModeReturnCode() and
known_bad_value['mean'] == known_good_value['mean']):
return BisectResults(abort_reason=('known good and known bad revisions '
'returned same return code (return code=%s). '
'Continuing bisect might not yield any results.' %
known_bad_value['mean']))
# Check the direction of improvement only if the improvement_direction
# option is set to a specific direction (1 for higher is better or -1 for
# lower is better).
improvement_dir = self.opts.improvement_direction
if improvement_dir:
higher_is_better = improvement_dir > 0
if higher_is_better:
message = "Expecting higher values to be better for this metric, "
else:
message = "Expecting lower values to be better for this metric, "
metric_increased = known_bad_value['mean'] > known_good_value['mean']
if metric_increased:
message += "and the metric appears to have increased. "
else:
message += "and the metric appears to have decreased. "
if ((higher_is_better and metric_increased) or
(not higher_is_better and not metric_increased)):
error = (message + 'Then, the test results for the ends of the given '
'\'good\' - \'bad\' range of revisions represent an '
'improvement (and not a regression).')
return BisectResults(error=error)
logging.info(message + "Therefore we continue to bisect.")
bisect_state = BisectState(target_depot, revision_list)
revision_states = bisect_state.GetRevisionStates()
min_revision = 0
max_revision = len(revision_states) - 1
# Can just mark the good and bad revisions explicitly here since we
# already know the results.
bad_revision_state = revision_states[min_revision]
bad_revision_state.external = bad_results[2]
bad_revision_state.perf_time = bad_results[3]
bad_revision_state.build_time = bad_results[4]
bad_revision_state.passed = False
bad_revision_state.value = known_bad_value
good_revision_state = revision_states[max_revision]
good_revision_state.external = good_results[2]
good_revision_state.perf_time = good_results[3]
good_revision_state.build_time = good_results[4]
good_revision_state.passed = True
good_revision_state.value = known_good_value
# Check how likely it is that the good and bad results are different
# beyond chance-induced variation.
if not (self.opts.debug_ignore_regression_confidence or
self._IsBisectModeReturnCode()):
if not _IsRegressionReproduced(known_good_value, known_bad_value,
self.opts.required_initial_confidence):
# If there is no significant difference between "good" and "bad"
# revision results, then the "bad revision" is considered "good".
# TODO(qyearsley): Remove this if it is not necessary.
bad_revision_state.passed = True
self.warnings.append(_RegressionNotReproducedWarningMessage(
good_revision, bad_revision, known_good_value, known_bad_value))
return BisectResults(bisect_state, self.depot_registry, self.opts,
self.warnings)
while True:
if not revision_states:
break
if max_revision - min_revision <= 1:
min_revision_state = revision_states[min_revision]
max_revision_state = revision_states[max_revision]
current_depot = min_revision_state.depot
# TODO(sergiyb): Under which conditions can first two branches be hit?
if min_revision_state.passed == '?':
next_revision_index = min_revision
elif max_revision_state.passed == '?':
next_revision_index = max_revision
elif current_depot in ['android-chrome', 'chromium', 'v8']:
previous_revision = revision_states[min_revision].revision
# If there were changes to any of the external libraries we track,
# should bisect the changes there as well.
external_depot = self._FindNextDepotToBisect(
current_depot, min_revision_state, max_revision_state)
# If there was no change in any of the external depots, the search
# is over.
if not external_depot:
if current_depot == 'v8':
self.warnings.append(
'Unfortunately, V8 bisection couldn\'t '
'continue any further. The script can only bisect into '
'V8\'s bleeding_edge repository if both the current and '
'previous revisions in trunk map directly to revisions in '
'bleeding_edge.')
break
earliest_revision = max_revision_state.external[external_depot]
latest_revision = min_revision_state.external[external_depot]
new_revision_list = self.PrepareToBisectOnDepot(
external_depot, earliest_revision, latest_revision,
previous_revision)
if not new_revision_list:
error = ('An error occurred attempting to retrieve revision '
'range: [%s..%s]' % (earliest_revision, latest_revision))
return BisectResults(error=error)
revision_states = bisect_state.CreateRevisionStatesAfter(
external_depot, new_revision_list, current_depot,
previous_revision)
# Reset the bisection and perform it on the newly inserted states.
min_revision = 0
max_revision = len(revision_states) - 1
print ('Regression in metric %s appears to be the result of '
'changes in [%s].' % (metric, external_depot))
revision_list = [state.revision for state in revision_states]
self.PrintRevisionsToBisectMessage(revision_list, external_depot)
continue
else:
break
else:
next_revision_index = (int((max_revision - min_revision) / 2) +
min_revision)
next_revision_state = revision_states[next_revision_index]
next_revision = next_revision_state.revision
next_depot = next_revision_state.depot
self.depot_registry.ChangeToDepotDir(next_depot)
message = 'Working on [%s:%s]' % (next_depot, next_revision)
print message
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepStart(message)
run_results = self.RunTest(next_revision, next_depot, command_to_run,
metric, skippable=True)
# If the build is successful, check whether or not the metric
# had regressed.
if not run_results[1]:
if len(run_results) > 2:
next_revision_state.external = run_results[2]
next_revision_state.perf_time = run_results[3]
next_revision_state.build_time = run_results[4]
passed_regression = self._CheckIfRunPassed(run_results[0],
known_good_value,
known_bad_value)
next_revision_state.passed = passed_regression
next_revision_state.value = run_results[0]
if passed_regression:
max_revision = next_revision_index
else:
min_revision = next_revision_index
else:
if run_results[1] == BUILD_RESULT_SKIPPED:
next_revision_state.passed = 'Skipped'
elif run_results[1] == BUILD_RESULT_FAIL:
next_revision_state.passed = 'Build Failed'
print run_results[0]
# If the build is broken, remove it and redo search.
revision_states.pop(next_revision_index)
max_revision -= 1
if self.opts.output_buildbot_annotations:
self.printer.PrintPartialResults(bisect_state)
bisect_utils.OutputAnnotationStepClosed()
self._ConfidenceExtraTestRuns(min_revision_state, max_revision_state,
command_to_run, metric)
results = BisectResults(bisect_state, self.depot_registry, self.opts,
self.warnings)
self._GatherResultsFromRevertedCulpritCL(
results, target_depot, command_to_run, metric)
return results
else:
# Weren't able to sync and retrieve the revision range.
error = ('An error occurred attempting to retrieve revision range: '
'[%s..%s]' % (good_revision, bad_revision))
return BisectResults(error=error)
def _ConfidenceExtraTestRuns(self, good_state, bad_state, command_to_run,
metric):
if (bool(good_state.passed) != bool(bad_state.passed)
and good_state.passed not in ('Skipped', 'Build Failed')
and bad_state.passed not in ('Skipped', 'Build Failed')):
for state in (good_state, bad_state):
run_results = self.RunTest(
state.revision,
state.depot,
command_to_run,
metric,
test_run_multiplier=BORDER_REVISIONS_EXTRA_RUNS)
# Is extend the right thing to do here?
if run_results[1] != BUILD_RESULT_FAIL:
state.value['values'].extend(run_results[0]['values'])
else:
warning_text = 'Re-test of revision %s failed with error message: %s'
warning_text %= (state.revision, run_results[0])
if warning_text not in self.warnings:
self.warnings.append(warning_text)
def _IsPlatformSupported():
"""Checks that this platform and build system are supported.
Args:
opts: The options parsed from the command line.
Returns:
True if the platform and build system are supported.
"""
# Haven't tested the script out on any other platforms yet.
supported = ['posix', 'nt']
return os.name in supported
def RemoveBuildFiles(build_type):
"""Removes build files from previous runs."""
out_dir = os.path.join('out', build_type)
build_dir = os.path.join('build', build_type)
logging.info('Removing build files in "%s" and "%s".',
os.path.abspath(out_dir), os.path.abspath(build_dir))
try:
RemakeDirectoryTree(out_dir)
RemakeDirectoryTree(build_dir)
except Exception as e:
raise RuntimeError('Got error in RemoveBuildFiles: %s' % e)
def RemakeDirectoryTree(path_to_dir):
"""Removes a directory tree and replaces it with an empty one.
Returns True if successful, False otherwise.
"""
RemoveDirectoryTree(path_to_dir)
MaybeMakeDirectory(path_to_dir)
def RemoveDirectoryTree(path_to_dir):
"""Removes a directory tree. Returns True if successful or False otherwise."""
if os.path.isfile(path_to_dir):
logging.info('REMOVING FILE %s' % path_to_dir)
os.remove(path_to_dir)
try:
if os.path.exists(path_to_dir):
shutil.rmtree(path_to_dir)
except OSError, e:
if e.errno != errno.ENOENT:
raise
# This is copied from build/scripts/common/chromium_utils.py.
def MaybeMakeDirectory(*path):
"""Creates an entire path, if it doesn't already exist."""
file_path = os.path.join(*path)
try:
os.makedirs(file_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
class BisectOptions(object):
"""Options to be used when running bisection."""
def __init__(self):
super(BisectOptions, self).__init__()
self.target_platform = 'chromium'
self.build_preference = None
self.good_revision = None
self.bad_revision = None
self.use_goma = None
self.goma_dir = None
self.goma_threads = 64
self.repeat_test_count = 20
self.truncate_percent = 25
self.max_time_minutes = 20
self.metric = None
self.command = None
self.output_buildbot_annotations = None
self.no_custom_deps = False
self.working_directory = None
self.extra_src = None
self.debug_ignore_build = None
self.debug_ignore_sync = None
self.debug_ignore_perf_test = None
self.debug_ignore_regression_confidence = None
self.debug_fake_first_test_mean = 0
self.target_arch = 'ia32'
self.target_build_type = 'Release'
self.builder_type = 'perf'
self.bisect_mode = bisect_utils.BISECT_MODE_MEAN
self.improvement_direction = 0
self.bug_id = ''
self.required_initial_confidence = 80.0
@staticmethod
def _AddBisectOptionsGroup(parser):
group = parser.add_argument_group('Bisect options')
group.add_argument('-c', '--command', required=True,
help='A command to execute your performance test at '
'each point in the bisection.')
group.add_argument('-b', '--bad_revision', required=True,
help='A bad revision to start bisection. Must be later '
'than good revision. May be either a git or svn '
'revision.')
group.add_argument('-g', '--good_revision', required=True,
help='A revision to start bisection where performance '
'test is known to pass. Must be earlier than the '
'bad revision. May be either a git or a svn '
'revision.')
group.add_argument('-m', '--metric',
help='The desired metric to bisect on. For example '
'"vm_rss_final_b/vm_rss_f_b"')
group.add_argument('-d', '--improvement_direction', type=int, default=0,
help='An integer number representing the direction of '
'improvement. 1 for higher is better, -1 for lower '
'is better, 0 for ignore (default).')
group.add_argument('-r', '--repeat_test_count', type=int, default=20,
choices=range(1, 101),
help='The number of times to repeat the performance '
'test. Values will be clamped to range [1, 100]. '
'Default value is 20.')
group.add_argument('--max_time_minutes', type=int, default=20,
choices=range(1, 61),
help='The maximum time (in minutes) to take running the '
'performance tests. The script will run the '
'performance tests according to '
'--repeat_test_count, so long as it doesn\'t exceed'
' --max_time_minutes. Values will be clamped to '
'range [1, 60]. Default value is 20.')
group.add_argument('-t', '--truncate_percent', type=int, default=25,
help='The highest/lowest percent are discarded to form '
'a truncated mean. Values will be clamped to range '
'[0, 25]. Default value is 25 percent.')
group.add_argument('--bisect_mode', default=bisect_utils.BISECT_MODE_MEAN,
choices=[bisect_utils.BISECT_MODE_MEAN,
bisect_utils.BISECT_MODE_STD_DEV,
bisect_utils.BISECT_MODE_RETURN_CODE],
help='The bisect mode. Choices are to bisect on the '
'difference in mean, std_dev, or return_code.')
group.add_argument('--bug_id', default='',
help='The id for the bug associated with this bisect. ' +
'If this number is given, bisect will attempt to ' +
'verify that the bug is not closed before '
'starting.')
group.add_argument('--required_initial_confidence', type=float,
default=80.0,
help='The required confidence score for the initial '
'check to see whether there is a significant '
'difference between given good and bad revisions.')
@staticmethod
def _AddBuildOptionsGroup(parser):
group = parser.add_argument_group('Build options')
group.add_argument('-w', '--working_directory',
help='Path to the working directory where the script '
'will do an initial checkout of the chromium depot. The '
'files will be placed in a subdirectory "bisect" under '
'working_directory and that will be used to perform the '
'bisection. This parameter is optional, if it is not '
'supplied, the script will work from the current depot.')
group.add_argument('--build_preference',
choices=['msvs', 'ninja', 'make'],
help='The preferred build system to use. On linux/mac '
'the options are make/ninja. On Windows, the '
'options are msvs/ninja.')
group.add_argument('--target_platform', default='chromium',
choices=['chromium', 'android', 'android-chrome'],
help='The target platform. Choices are "chromium" '
'(current platform), or "android". If you specify '
'something other than "chromium", you must be '
'properly set up to build that platform.')
group.add_argument('--no_custom_deps', dest='no_custom_deps',
action='store_true', default=False,
help='Run the script with custom_deps or not.')
group.add_argument('--extra_src',
help='Path to a script which can be used to modify the '
'bisect script\'s behavior.')
group.add_argument('--use_goma', action='store_true',
help='Add a bunch of extra threads for goma, and enable '
'goma')
group.add_argument('--goma_dir',
help='Path to goma tools (or system default if not '
'specified).')
group.add_argument('--goma_threads', type=int, default='64',
help='Number of threads for goma, only if using goma.')
group.add_argument('--output_buildbot_annotations', action='store_true',
help='Add extra annotation output for buildbot.')
group.add_argument('--target_arch', default='ia32',
dest='target_arch',
choices=['ia32', 'x64', 'arm', 'arm64'],
help='The target build architecture. Choices are "ia32" '
'(default), "x64", "arm" or "arm64".')
group.add_argument('--target_build_type', default='Release',
choices=['Release', 'Debug', 'Release_x64'],
help='The target build type. Choices are "Release" '
'(default), Release_x64 or "Debug".')
group.add_argument('--builder_type', default=fetch_build.PERF_BUILDER,
choices=[fetch_build.PERF_BUILDER,
fetch_build.FULL_BUILDER,
fetch_build.ANDROID_CHROME_PERF_BUILDER, ''],
help='Type of builder to get build from. This '
'determines both the bot that builds and the '
'place where archived builds are downloaded from. '
'For local builds, an empty string can be passed.')
@staticmethod
def _AddDebugOptionsGroup(parser):
group = parser.add_argument_group('Debug options')
group.add_argument('--debug_ignore_build', action='store_true',
help='DEBUG: Don\'t perform builds.')
group.add_argument('--debug_ignore_sync', action='store_true',
help='DEBUG: Don\'t perform syncs.')
group.add_argument('--debug_ignore_perf_test', action='store_true',
help='DEBUG: Don\'t perform performance tests.')
group.add_argument('--debug_ignore_regression_confidence',
action='store_true',
help='DEBUG: Don\'t score the confidence of the initial '
'good and bad revisions\' test results.')
group.add_argument('--debug_fake_first_test_mean', type=int, default='0',
help='DEBUG: When faking performance tests, return this '
'value as the mean of the first performance test, '
'and return a mean of 0.0 for further tests.')
return group
@classmethod
def _CreateCommandLineParser(cls):
"""Creates a parser with bisect options.
Returns:
An instance of argparse.ArgumentParser.
"""
usage = ('%(prog)s [options] [-- chromium-options]\n'
'Perform binary search on revision history to find a minimal '
'range of revisions where a performance metric regressed.\n')
parser = argparse.ArgumentParser(usage=usage)
cls._AddBisectOptionsGroup(parser)
cls._AddBuildOptionsGroup(parser)
cls._AddDebugOptionsGroup(parser)
return parser
def ParseCommandLine(self):
"""Parses the command line for bisect options."""
parser = self._CreateCommandLineParser()
opts = parser.parse_args()
try:
if (not opts.metric and
opts.bisect_mode != bisect_utils.BISECT_MODE_RETURN_CODE):
raise RuntimeError('missing required parameter: --metric')
if opts.bisect_mode != bisect_utils.BISECT_MODE_RETURN_CODE:
metric_values = opts.metric.split('/')
if len(metric_values) != 2:
raise RuntimeError('Invalid metric specified: [%s]' % opts.metric)
opts.metric = metric_values
opts.truncate_percent = min(max(opts.truncate_percent, 0), 25) / 100.0
for k, v in opts.__dict__.iteritems():
assert hasattr(self, k), 'Invalid %s attribute in BisectOptions.' % k
setattr(self, k, v)
except RuntimeError, e:
output_string = StringIO.StringIO()
parser.print_help(file=output_string)
error_message = '%s\n\n%s' % (e.message, output_string.getvalue())
output_string.close()
raise RuntimeError(error_message)
@staticmethod
def FromDict(values):
"""Creates an instance of BisectOptions from a dictionary.
Args:
values: a dict containing options to set.
Returns:
An instance of BisectOptions.
"""
opts = BisectOptions()
for k, v in values.iteritems():
assert hasattr(opts, k), 'Invalid %s attribute in BisectOptions.' % k
setattr(opts, k, v)
if opts.metric and opts.bisect_mode != bisect_utils.BISECT_MODE_RETURN_CODE:
metric_values = opts.metric.split('/')
if len(metric_values) != 2:
raise RuntimeError('Invalid metric specified: [%s]' % opts.metric)
opts.metric = metric_values
if opts.target_arch == 'x64' and opts.target_build_type == 'Release':
opts.target_build_type = 'Release_x64'
opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
opts.truncate_percent = opts.truncate_percent / 100.0
return opts
def _ConfigureLogging():
"""Trivial logging config.
Configures logging to output any messages at or above INFO to standard out,
without any additional formatting.
"""
logging_format = '%(message)s'
logging.basicConfig(
stream=logging.sys.stdout, level=logging.INFO, format=logging_format)
def main():
_ConfigureLogging()
try:
opts = BisectOptions()
opts.ParseCommandLine()
if opts.bug_id:
if opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepStart('Checking Issue Tracker')
issue_closed = query_crbug.CheckIssueClosed(opts.bug_id)
if issue_closed:
print 'Aborting bisect because bug is closed'
else:
print 'Could not confirm bug is closed, proceeding.'
if opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepClosed()
if issue_closed:
results = BisectResults(abort_reason='the bug is closed.')
bisect_printer = BisectPrinter(opts)
bisect_printer.FormatAndPrintResults(results)
return 0
if opts.extra_src:
extra_src = bisect_utils.LoadExtraSrc(opts.extra_src)
if not extra_src:
raise RuntimeError('Invalid or missing --extra_src.')
bisect_utils.AddAdditionalDepotInfo(extra_src.GetAdditionalDepotInfo())
if opts.working_directory:
custom_deps = bisect_utils.DEFAULT_GCLIENT_CUSTOM_DEPS
if opts.no_custom_deps:
custom_deps = None
bisect_utils.CreateBisectDirectoryAndSetupDepot(opts, custom_deps)
os.chdir(os.path.join(os.getcwd(), 'src'))
RemoveBuildFiles(opts.target_build_type)
if not _IsPlatformSupported():
raise RuntimeError('Sorry, this platform isn\'t supported yet.')
if not source_control.IsInGitRepository():
raise RuntimeError(
'Sorry, only the git workflow is supported at the moment.')
# gClient sync seems to fail if you're not in master branch.
if (not source_control.IsInProperBranch() and
not opts.debug_ignore_sync and
not opts.working_directory):
raise RuntimeError('You must switch to master branch to run bisection.')
bisect_test = BisectPerformanceMetrics(opts, os.getcwd())
try:
results = bisect_test.Run(opts.command, opts.bad_revision,
opts.good_revision, opts.metric)
if results.error:
raise RuntimeError(results.error)
bisect_test.printer.FormatAndPrintResults(results)
return 0
finally:
bisect_test.PerformCleanup()
except RuntimeError as e:
if opts.output_buildbot_annotations:
# The perf dashboard scrapes the "results" step in order to comment on
# bugs. If you change this, please update the perf dashboard as well.
bisect_utils.OutputAnnotationStepStart('Results')
print 'Runtime Error: %s' % e
if opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepClosed()
return 1
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "95d6c5af5576525e2d3068ce196351f0",
"timestamp": "",
"source": "github",
"line_count": 2871,
"max_line_length": 80,
"avg_line_length": 39.95402298850575,
"alnum_prop": 0.6455696202531646,
"repo_name": "joone/chromium-crosswalk",
"id": "5167b1d3c1a1376fd295b22119de45bfb5a5d7f3",
"size": "114893",
"binary": false,
"copies": "7",
"ref": "refs/heads/2016.04.css-round-display-edtior-draft-1",
"path": "tools/auto_bisect/bisect_perf_regression.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.