code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
#############################################################################
#
# Copyright (C) 2013 Navi-X
#
# This file is part of Navi-X.
#
# Navi-X is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Navi-X is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Navi-X. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#############################################################################
#
# CDownloader:
# This class handles user login to the Navi-Xtreme website.
#############################################################################
from string import *
import sys, os.path
import urllib
import urllib2
import re, random, string
import xbmc, xbmcgui, xbmcaddon
import re, os, time, datetime, traceback
import shutil
import os
from libs2 import *
try: Emulating = xbmcgui.Emulating
except: Emulating = False
LABEL_USRNAME = 141
LABEL_PASSWORD = 142
BUTTON_USRNAME = 143
BUTTON_PASSWORD = 1144
BUTTON_LOGIN = 145
BUTTON_CANCEL = 146
class CDialogLogin(xbmcgui.WindowXMLDialog):
def __init__(self,strXMLname, strFallbackPath):#, strDefaultName, forceFallback):
# self.setCoordinateResolution(PAL_4x3)
#user background image
# self.bg = xbmcgui.ControlImage(100,100,520,376, imageDir + "background_txt.png")
# self.addControl(self.bg)
self.userloggedin = False
#read user ID from file
self.user_id=''
pass
def onAction(self, action):
if (action == ACTION_PREVIOUS_MENU) or (action == ACTION_PARENT_DIR) or (action == ACTION_PREVIOUS_MENU2):# or (action == ACTION_MOVE_LEFT):
self.close()
def onFocus( self, controlId ):
pass
def onClick( self, controlId ):
pass
def onControl(self, control):
#self.setFocus(control)
pass
def login(self):
#display GUI window
self.doModal()
#perform login to the Navi-Xtreme server
#if success
self.save_user_id()
def logout(self):
self.user_id=''
self.write_user_id() #There is no such function.
def is_user_logged_in(self):
if self.user_id != '':
return True
return False
def rate_item(self, mediaitem):
pass
def read_user_id(self):
pass
def save_user_id(self):
pass
#end of class
#use singleton
#login = CDialogLogin("CLoginskin.xml", os.getcwd())
login = CDialogLogin("CLoginskin2.xml", addon.getAddonInfo('path'))
| JamesLinEngineer/RKMC | addons/script.navi-x/src/CLogin.py | Python | gpl-2.0 | 3,203 |
# Copyright (c) 2013-2014 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import glob
import imp
import ansible.utils
from ansible.playbook.task import Task
import ansible.constants as C
from ansible.module_utils.splitter import split_args
import yaml
from yaml.composer import Composer
from yaml.constructor import Constructor
LINE_NUMBER_KEY = '__line__'
def load_plugins(directory):
result = []
fh = None
for pluginfile in glob.glob(os.path.join(directory, '[A-Za-z]*.py')):
pluginname = os.path.basename(pluginfile.replace('.py', ''))
try:
fh, filename, desc = imp.find_module(pluginname, [directory])
mod = imp.load_module(pluginname, fh, filename, desc)
obj = getattr(mod, pluginname)()
result.append(obj)
finally:
if fh:
fh.close()
return result
def tokenize(line):
result = list()
tokens = line.lstrip().split(" ")
if tokens[0] == '-':
tokens = tokens[1:]
if tokens[0] == 'action:' or tokens[0] == 'local_action:':
tokens = tokens[1:]
command = tokens[0].replace(":", "")
args = list()
kwargs = dict()
for arg in tokens[1:]:
if "=" in arg:
kv = arg.split("=", 1)
kwargs[kv[0]] = kv[1]
else:
args.append(arg)
return (command, args, kwargs)
def _playbook_items(pb_data):
if isinstance(pb_data, dict):
return pb_data.items()
elif not pb_data:
return []
else:
return [item for play in pb_data for item in play.items()]
def find_children(playbook):
if not os.path.exists(playbook[0]):
return []
results = []
basedir = os.path.dirname(playbook[0])
pb_data = ansible.utils.parse_yaml_from_file(playbook[0])
items = _playbook_items(pb_data)
for item in items:
for child in play_children(basedir, item, playbook[1]):
if "$" in child['path'] or "{{" in child['path']:
continue
valid_tokens = list()
for token in split_args(child['path']):
if '=' in token:
break
valid_tokens.append(token)
path = ' '.join(valid_tokens)
results.append({
'path': ansible.utils.path_dwim(basedir, path),
'type': child['type']
})
return results
def play_children(basedir, item, parent_type):
delegate_map = {
'tasks': _taskshandlers_children,
'pre_tasks': _taskshandlers_children,
'post_tasks': _taskshandlers_children,
'include': _include_children,
'roles': _roles_children,
'dependencies': _roles_children,
'handlers': _taskshandlers_children,
}
(k, v) = item
if k in delegate_map:
if v:
return delegate_map[k](basedir, k, v, parent_type)
return []
def _include_children(basedir, k, v, parent_type):
return [{'path': ansible.utils.path_dwim(basedir, v), 'type': parent_type}]
def _taskshandlers_children(basedir, k, v, parent_type):
return [{'path': ansible.utils.path_dwim(basedir, th['include']),
'type': 'tasks'}
for th in v if 'include' in th]
def _roles_children(basedir, k, v, parent_type):
results = []
for role in v:
if isinstance(role, dict):
results.extend(_look_for_role_files(basedir, role['role']))
else:
results.extend(_look_for_role_files(basedir, role))
return results
def _rolepath(basedir, role):
role_path = None
possible_paths = [
# if included from a playbook
ansible.utils.path_dwim(basedir, os.path.join('roles', role)),
ansible.utils.path_dwim(basedir, role),
# if included from roles/[role]/meta/main.yml
ansible.utils.path_dwim(
basedir, os.path.join('..', '..', '..', 'roles', role)
),
ansible.utils.path_dwim(basedir,
os.path.join('..', '..', role))
]
if C.DEFAULT_ROLES_PATH:
search_locations = C.DEFAULT_ROLES_PATH.split(os.pathsep)
for loc in search_locations:
loc = os.path.expanduser(loc)
possible_paths.append(ansible.utils.path_dwim(loc, role))
for path_option in possible_paths:
if os.path.isdir(path_option):
role_path = path_option
break
return role_path
def _look_for_role_files(basedir, role):
role_path = _rolepath(basedir, role)
if not role_path:
return []
results = []
for th in ['tasks', 'handlers', 'meta']:
for ext in ('.yml', '.yaml'):
thpath = os.path.join(role_path, th, 'main' + ext)
if os.path.exists(thpath):
results.append({'path': thpath, 'type': th})
break
return results
def rolename(filepath):
idx = filepath.find('roles/')
if idx < 0:
return ''
role = filepath[idx+6:]
role = role[:role.find('/')]
return role
def _kv_to_dict(v):
(command, args, kwargs) = tokenize(v)
return (dict(module=command, module_arguments=args, **kwargs))
def normalize_task(task):
''' ensures that all tasks have an action key
and that string values are converted to python objects '''
result = dict()
for (k, v) in task.items():
if k in Task.VALID_KEYS or k.startswith('with_'):
if k == 'local_action' or k == 'action':
if not isinstance(v, dict):
v = _kv_to_dict(v)
v['module_arguments'] = v.get('module_arguments', list())
result['action'] = v
else:
result[k] = v
else:
if isinstance(v, basestring):
v = _kv_to_dict(k + ' ' + v)
elif not v:
v = dict(module=k)
else:
if isinstance(v, dict):
v.update(dict(module=k))
else:
if k == '__line__':
# Keep the line number stored
result[k] = v
continue
else:
# Should not get here!
print "Was not expecting value %s of type %s for key %s" % (str(v), type(v), k)
print "Task: %s" % str(task)
exit(1)
v['module_arguments'] = v.get('module_arguments', list())
result['action'] = v
return result
def task_to_str(task):
name = task.get("name")
if name:
return name
action = task.get("action")
args = " ".join(["k=v" for (k, v) in action.items() if k != "module_arguments"] +
action.get("module_arguments"))
return "{0} {1}".format(action["module"], args)
def get_action_tasks(yaml, file):
tasks = list()
if file['type'] in ['tasks', 'handlers']:
tasks = yaml
else:
for block in yaml:
for section in ['tasks', 'handlers', 'pre_tasks', 'post_tasks']:
if section in block:
block_tasks = block.get(section) or []
tasks.extend(block_tasks)
return [normalize_task(task) for task in tasks
if 'include' not in task.keys()]
def parse_yaml_linenumbers(data):
"""Parses yaml as ansible.utils.parse_yaml but with linenumbers.
The line numbers are stored in each node's LINE_NUMBER_KEY key"""
loader = yaml.Loader(data)
def compose_node(parent, index):
# the line number where the previous token has ended (plus empty lines)
line = loader.line
node = Composer.compose_node(loader, parent, index)
node.__line__ = line + 1
return node
def construct_mapping(node, deep=False):
mapping = Constructor.construct_mapping(loader, node, deep=deep)
mapping[LINE_NUMBER_KEY] = node.__line__
return mapping
loader.compose_node = compose_node
loader.construct_mapping = construct_mapping
data = loader.get_single_data()
return data
| sumara/ansible-lint-deb | deb_dist/ansible-lint-2.1.3/lib/ansiblelint/utils.py | Python | gpl-2.0 | 9,195 |
#!/usr/bin/env python
import sys
from coverage import coverage
from coverage.results import Numbers
from coverage.summary import SummaryReporter
from twisted.python import usage
# this is an adaptation of the code behind "coverage report", modified to
# display+sortby "lines uncovered", which (IMHO) is more important of a
# metric than lines covered or percentage covered. Concentrating on the files
# with the most uncovered lines encourages getting the tree and test suite
# into a state that provides full line-coverage on all files.
# much of this code was adapted from coverage/summary.py in the 'coverage'
# distribution, and is used under their BSD license.
class Options(usage.Options):
optParameters = [
("sortby", "s", "uncovered", "how to sort: uncovered, covered, name"),
]
class MyReporter(SummaryReporter):
def report(self, outfile=None, sortby="uncovered"):
self.find_code_units(None, ["/System", "/Library", "/usr/lib",
"buildbot/test", "simplejson"])
# Prepare the formatting strings
max_name = max([len(cu.name) for cu in self.code_units] + [5])
fmt_name = "%%- %ds " % max_name
fmt_err = "%s %s: %s\n"
header1 = (fmt_name % "") + " Statements "
header2 = (fmt_name % "Name") + " Uncovered Covered"
fmt_coverage = fmt_name + "%9d %7d "
if self.branches:
header1 += " Branches "
header2 += " Found Excutd"
fmt_coverage += " %6d %6d"
header1 += " Percent"
header2 += " Covered"
fmt_coverage += " %7d%%"
if self.show_missing:
header1 += " "
header2 += " Missing"
fmt_coverage += " %s"
rule = "-" * len(header1) + "\n"
header1 += "\n"
header2 += "\n"
fmt_coverage += "\n"
if not outfile:
outfile = sys.stdout
# Write the header
outfile.write(header1)
outfile.write(header2)
outfile.write(rule)
total = Numbers()
total_uncovered = 0
lines = []
for cu in self.code_units:
try:
analysis = self.coverage._analyze(cu)
nums = analysis.numbers
uncovered = nums.n_statements - nums.n_executed
total_uncovered += uncovered
args = (cu.name, uncovered, nums.n_executed)
if self.branches:
args += (nums.n_branches, nums.n_executed_branches)
args += (nums.pc_covered,)
if self.show_missing:
args += (analysis.missing_formatted(),)
if sortby == "covered":
sortkey = nums.pc_covered
elif sortby == "uncovered":
sortkey = uncovered
else:
sortkey = cu.name
lines.append((sortkey, fmt_coverage % args))
total += nums
except KeyboardInterrupt: # pragma: no cover
raise
except:
if not self.ignore_errors:
typ, msg = sys.exc_info()[:2]
outfile.write(fmt_err % (cu.name, typ.__name__, msg))
lines.sort()
if sortby in ("uncovered", "covered"):
lines.reverse()
for sortkey, line in lines:
outfile.write(line)
if total.n_files > 1:
outfile.write(rule)
args = ("TOTAL", total_uncovered, total.n_executed)
if self.branches:
args += (total.n_branches, total.n_executed_branches)
args += (total.pc_covered,)
if self.show_missing:
args += ("",)
outfile.write(fmt_coverage % args)
def report(o):
c = coverage()
c.load()
r = MyReporter(c, show_missing=False, ignore_errors=False)
r.report(sortby=o['sortby'])
if __name__ == '__main__':
o = Options()
o.parseOptions()
report(o)
| zozo123/buildbot | master/contrib/coverage2text.py | Python | gpl-3.0 | 4,085 |
import easygui_qt as easy
import pandas as pd
import numpy as np
import geoplot
from matplotlib import pyplot as plt
import math
from matplotlib.colors import LinearSegmentedColormap
MTH = {'sum': np.sum, 'max': np.max, 'min': np.min, 'mean': np.mean}
class SpatialData:
def __init__(self, result_file=None):
if result_file is None:
result_file = easy.get_file_names(title="Select result file.")[0]
print(result_file)
self.results = pd.read_csv(result_file, index_col=[0, 1, 2])
self.polygons = None
self.lines = None
self.plotter = None
def add_polygon_column(self, obj=None, direction=None, bus=None,
method=None, kws=None, **kwargs):
if method is None:
method = easy.get_choice("Chose you method!",
choices=['sum', 'max', 'min', 'mean'])
if self.polygons is None:
self.polygons = load_geometry(**kwargs)
if kws is None:
kws = ['line', 'GL', 'duals']
objects = list(set([
x[5:] for x in
self.results.index.get_level_values('obj_label').unique()
if not any(y in x for y in kws)]))
reg_buses = list(set([
x[5:] for x in
self.results.index.get_level_values('bus_label').unique()
if not any(y in x for y in kws)]))
global_buses = list(set([
x for x in
self.results.index.get_level_values('bus_label').unique()
if 'GL' in x]))
buses = reg_buses + global_buses
if obj is None:
obj = easy.get_choice("What object do you want to plot?",
choices=objects)
if direction is None:
direction = easy.get_choice("From bus or to bus?",
choices=['from_bus', 'to_bus'])
if bus is None:
bus = easy.get_choice("Which bus?", choices=buses)
for r in self.polygons.index:
try:
tmp = pd.Series(self.results.loc[
'{0}_{1}'.format(r, bus), direction,
'{0}_{1}'.format(r, obj)]['val']).groupby(
level=0).agg(MTH[method])[0]
except KeyError:
tmp = float('nan')
self.polygons.loc[r, obj] = tmp
uv = unit_round(self.polygons[obj])
self.polygons[obj] = uv['series']
self.polygons[obj].prefix = uv['prefix']
self.polygons[obj].prefix_long = uv['prefix_long']
selection = {'obj': obj,
'direction': direction,
'bus': bus,
'method': method}
return selection
def add_power_lines(self, method=None, **kwargs):
if self.lines is None:
self.lines = load_geometry(region_column='name', **kwargs)
if self.plotter is None:
self.plotter = geoplot.GeoPlotter(
geoplot.postgis2shapely(self.lines.geom), (3, 16, 47, 56))
else:
self.plotter.geometries = geoplot.postgis2shapely(self.lines.geom)
if method is None:
method = easy.get_choice("Chose you method!",
choices=['sum', 'max', 'min', 'mean'])
for l in self.lines.index:
try:
r = l.split('-')
tmp = pd.Series()
tmp.set_value(1, self.results.loc[
'{0}_bus_el'.format(r[0]), 'from_bus',
'{0}_{1}_powerline'.format(*r)]['val'].groupby(
level=0).agg(MTH[method])[0])
tmp.set_value(2, self.results.loc[
'{0}_bus_el'.format(r[1]), 'from_bus',
'{1}_{0}_powerline'.format(*r)]['val'].groupby(
level=0).agg(MTH[method])[0])
self.lines.loc[l, 'trans'] = tmp.max()
except KeyError:
self.lines.loc[l, 'trans'] = 3000000
uv = unit_round(self.lines['trans'])
self.lines['trans'] = uv['series']
self.lines['trans'].prefix = uv['prefix']
self.lines['trans'].prefix_long = uv['prefix_long']
return method
def load_geometry(geometry_file=None, region_column='gid'):
if geometry_file is None:
geometry_file = easy.get_file_names()[0]
return pd.read_csv(geometry_file, index_col=region_column)
def show():
plt.tight_layout()
plt.box(on=None)
plt.show()
def unit_round(values, min_value=False):
longprefix = {0: '', 1: 'kilo', 2: 'Mega', 3: 'Giga', 4: 'Tera',
5: 'Exa', 6: 'Peta'}
shortprefix = {0: '', 1: 'k', 2: 'M', 3: 'G', 4: 'T',
5: 'E', 6: 'P'}
if min_value:
def_value = min(values)
a = 1
else:
def_value = max(values)
a = 0
if def_value > 0:
factor = int(int(math.log10(def_value)) / 3) + a
else:
factor = 0
values = round(values / 10 ** (factor * 3), 2)
return {'series': values, 'prefix': shortprefix[factor],
'prefix_long': longprefix[factor]}
def add_labels(data, plotter, label=None,
coord_file='data/geometries/coord_region.csv'):
p = pd.read_csv(coord_file, index_col='name')
data.polygons['point'] = p.point
for row in data.polygons.iterrows():
if 'point' not in row[1]:
point = geoplot.postgis2shapely([row[1].geom, ])[0].centroid
else:
point = geoplot.postgis2shapely([row[1].point, ])[0]
(x, y) = plotter.basemap(point.x, point.y)
if label is None:
text = row[0][2:]
else:
text = str(round(row[1][label], 1))
if row[1].normalised < 0.3 or row[1].normalised > 0.95:
textcolour = 'white'
else:
textcolour = 'black'
plotter.ax.text(x, y, text, color=textcolour, fontsize=12)
start_line = plotter.basemap(9.7, 53.4)
end_line = plotter.basemap(10.0, 53.55)
plt.plot([start_line[0], end_line[0]], [start_line[1], end_line[1]], '-',
color='white')
def polygon_plot(l_min=None, l_max=None, setname=None, myset=None, method=None,
filename=None):
geometry = 'data/geometries/polygons_de21_simple.csv'
sets = {
'load': {
'obj': 'load',
'direction': 'from_bus',
'bus': 'bus_el'},
'pv': {
'obj': 'solar',
'direction': 'to_bus',
'bus': 'bus_el'},
}
if setname is None and myset is None:
setname = easy.get_choice("What object do you want to plot?",
choices=tuple(sets.keys()))
if setname is not None:
myset = sets[setname]
if method is None:
myset['method'] = easy.get_choice(
"Chose you method!", choices=['sum', 'max', 'min', 'mean'])
else:
myset['method'] = method
s_data = SpatialData(filename)
myset = s_data.add_polygon_column(geometry_file=geometry, **myset)
if myset['method'] == 'sum':
unit = 'Wh'
else:
unit = 'W'
unit = "[{0}]".format(s_data.polygons[myset['obj']].prefix + unit)
plotter = geoplot.GeoPlotter(geoplot.postgis2shapely(s_data.polygons.geom),
(3, 16, 47, 56))
v_min = s_data.polygons[myset['obj']].min()
v_max = s_data.polygons[myset['obj']].max()
s_data.polygons['normalised'] = ((s_data.polygons[myset['obj']] - v_min) /
(v_max - v_min))
plotter.data = s_data.polygons['normalised']
plotter.plot(facecolor='data', edgecolor='white')
add_labels(s_data, plotter, myset['obj'])
if l_min is None:
l_min = v_min
if l_max is None:
l_max = v_max
plotter.draw_legend((l_min, l_max), number_ticks=3, legendlabel=unit,
location='bottom')
show()
def powerline_plot(l_min=None, l_max=None):
s_data = SpatialData()
reg = {
'geometry_file': 'data/geometries/polygons_de21_simple.csv'}
poly = geoplot.postgis2shapely(load_geometry(**reg).geom)
plotter = geoplot.GeoPlotter(poly, (3, 16, 47, 56))
method = s_data.add_power_lines(
geometry_file='data/geometries/lines_de21.csv')
plotter.plot(facecolor='grey', edgecolor='white')
if method == 'sum':
unit = 'Wh'
else:
unit = 'W'
unit = "[{0}]".format(s_data.lines['trans'].prefix + unit)
v_min = s_data.lines['trans'].min()
v_max = s_data.lines['trans'].max()
s_data.lines['normalised'] = ((s_data.lines['trans'] - v_min) /
(v_max - v_min))
plotter.geometries = geoplot.postgis2shapely(s_data.lines.geom)
plotter.data = s_data.lines['normalised']
my_cmap = LinearSegmentedColormap.from_list('mycmap', [(0, 'green'),
(0.5, 'yellow'),
(1, 'red')])
plotter.plot(edgecolor='data', linewidth=2, cmap=my_cmap)
if l_min is None:
l_min = v_min
if l_max is None:
l_max = v_max
plotter.draw_legend((l_min, l_max), number_ticks=3, cmap=my_cmap,
legendlabel=unit, location='right')
show()
def combined_plot():
s_data = SpatialData()
obj = s_data.add_polygon_column(
obj='load', direction='from_bus', bus='bus_el', method='sum',
geometry_file='geometries/polygons_de21_simple.csv')
s_data.add_power_lines(
geometry_file='geometries/lines_de21.csv')
unit = s_data.polygons[obj].prefix_long
plotter = geoplot.GeoPlotter(geoplot.postgis2shapely(s_data.polygons.geom),
(3, 16, 47, 56))
v_min = s_data.polygons[obj].min()
v_max = s_data.polygons[obj].max()
s_data.polygons['normalised'] = ((s_data.polygons[obj] - v_min) /
(v_max - v_min))
plotter.data = s_data.polygons['normalised']
plotter.plot(facecolor='data', edgecolor='white')
plotter.draw_legend((v_min, v_max), number_ticks=3, legendlabel=unit,
location='bottom')
unit = s_data.lines['trans'].prefix_long
v_min = s_data.lines['trans'].min()
v_max = s_data.lines['trans'].max()
s_data.lines['normalised'] = ((s_data.lines['trans'] - v_min) /
(v_max - v_min))
plotter.geometries = geoplot.postgis2shapely(s_data.lines.geom)
plotter.data = s_data.lines['normalised']
my_cmap = LinearSegmentedColormap.from_list('mycmap', [(0, 'green'),
(0.5, 'yellow'),
(1, 'red')])
plotter.plot(edgecolor='data', linewidth=2, cmap=my_cmap)
plotter.draw_legend((v_min, v_max), number_ticks=3,
legendlabel=unit, location='right')
show()
if __name__ == "__main__":
# resf = ('/home/uwe/git_local/reegis-hp/reegis_hp/de21/results' +
# '/scenario_reegis_de_21_test_2017-01-03 11:31:10.600830_' +
# 'results_complete.csv')
# choice = 'polygons'
choice = easy.get_choice(
"What geometry do you want to plot?", choices=['lines', 'polygons'])
if choice == 'polygons':
polygon_plot(l_min=0)
elif choice == 'lines':
powerline_plot()
else:
print("End!")
| oemof/reegis-hp | reegis_hp/de21/results.py | Python | gpl-3.0 | 11,607 |
import unittest
from xcolor.xparser import Xparser
class TestXcolorRegex(unittest.TestCase):
# expected output for the tested patterns
expected = dict(name='color5', value='aabbcc')
def test_comments(self):
line = ";*color5: rgb:aa/bb/cc"
self.assertFalse(Xparser.valid(line))
line = "#*color5: rgb:aa/bb/cc"
self.assertFalse(Xparser.valid(line))
def test_generic_rgb(self):
line = "*color5: rgb:aa/bb/cc"
self.assertTrue(Xparser.valid(line))
output = Xparser.rgb(line)
self.assertEqual(output, self.expected)
def test_generic_hex(self):
line = "*color5: #aabbcc"
self.assertTrue(Xparser.valid(line))
output = Xparser.hex(line)
self.assertEqual(output, self.expected)
def test_urxvt_rgb(self):
line = "URxvt*color5: rgb:aa/bb/cc"
self.assertTrue(Xparser.valid(line))
output = Xparser.rgb(line)
self.assertEqual(output, self.expected)
def test_urxvt_hex(self):
line = "URxvt*color5: #aabbcc"
self.assertTrue(Xparser.valid(line))
output = Xparser.hex(line)
self.assertEqual(output, self.expected)
def test_urxvt_dot_rgb(self):
line = "URxvt.color5: rgb:aa/bb/cc"
self.assertTrue(Xparser.valid(line))
output = Xparser.rgb(line)
self.assertEqual(output, self.expected)
def test_urxvt_dot_hex(self):
line = "URxvt.color5: #aabbcc"
self.assertTrue(Xparser.valid(line))
output = Xparser.hex(line)
self.assertEqual(output, self.expected)
if __name__ == "__main__":
unittest.main()
| mrhmouse/xcolors | tests/test_regex.py | Python | gpl-3.0 | 1,647 |
from SCons.Script import *
def exists(env):
return (env["PLATFORM"]=="win32")
def ConvertNewlines(target,source,env):
for t,s in zip(target,source):
f_in=open(str(s),"rb")
f_out=open(str(t),"wb")
f_out.write(f_in.read().replace("\n","\r\n"))
f_out.close()
f_in.close()
return None
def ConvertNewlinesB(target,source,env):
for t,s in zip(target,source):
f_in=open(str(s),"rb")
f_out=open(str(t),"wb")
f_out.write("\xef\xbb\xbf")
f_out.write(f_in.read().replace("\n","\r\n"))
f_out.close()
f_in.close()
return None
def generate(env):
env["BUILDERS"]["ConvertNewlines"]=Builder(action=ConvertNewlines,suffix=".txt")
env["BUILDERS"]["ConvertNewlinesB"]=Builder(action=ConvertNewlinesB,suffix=".txt")
| iakov/RHVoice | site_scons/site_tools/newlines.py | Python | gpl-3.0 | 816 |
#
# Copyright 2008,2009 Free Software Foundation, Inc.
#
# This application is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# The presence of this file turns this directory into a Python package
'''
This is the GNU Radio gr-air-modes package. It provides a library and
application for receiving Mode S / ADS-B signals from aircraft. Use
uhd_modes.py as the main application for receiving signals. cpr.py
provides an implementation of Compact Position Reporting. altitude.py
implements Gray-coded altitude decoding. Various plugins exist for SQL,
KML, and PlanePlotter-compliant SBS-1 emulation output. mlat.py provides
an experimental implementation of a multilateration solver.
'''
# ----------------------------------------------------------------
# Temporary workaround for ticket:181 (swig+python problem)
import sys
_RTLD_GLOBAL = 0
try:
from dl import RTLD_GLOBAL as _RTLD_GLOBAL
except ImportError:
try:
from DLFCN import RTLD_GLOBAL as _RTLD_GLOBAL
except ImportError:
pass
if _RTLD_GLOBAL != 0:
_dlopenflags = sys.getdlopenflags()
sys.setdlopenflags(_dlopenflags|_RTLD_GLOBAL)
# ----------------------------------------------------------------
# import swig generated symbols into the gr-air-modes namespace
from air_modes_swig import *
# import any pure python here
#
try:
import zmq
except ImportError:
raise RuntimeError("PyZMQ not found! Please install libzmq and PyZMQ to run gr-air-modes")
from rx_path import rx_path
from zmq_socket import zmq_pubsub_iface
from parse import *
from msprint import output_print
from sql import output_sql
from sbs1 import output_sbs1
from kml import output_kml, output_jsonp
from raw_server import raw_server
from radio import modes_radio
from exceptions import *
from az_map import *
from types import *
from altitude import *
from cpr import cpr_decoder
from html_template import html_template
#this is try/excepted in case the user doesn't have numpy installed
try:
from flightgear import output_flightgear
from Quaternion import *
except ImportError:
print "gr-air-modes warning: numpy+scipy not installed, FlightGear interface not supported"
pass
# ----------------------------------------------------------------
# Tail of workaround
if _RTLD_GLOBAL != 0:
sys.setdlopenflags(_dlopenflags) # Restore original flags
# ----------------------------------------------------------------
| koppa/gr-air-modes | python/__init__.py | Python | gpl-3.0 | 3,039 |
from bedrock.redirects.util import redirect
redirectpatterns = (
# bug 926629
redirect(r'^newsletter/about_mobile(?:/(?:index\.html)?)?$', 'newsletter.subscribe'),
redirect(r'^newsletter/about_mozilla(?:/(?:index\.html)?)?$', 'mozorg.contribute.index'),
redirect(r'^newsletter/new(?:/(?:index\.html)?)?$', 'newsletter.subscribe'),
redirect(r'^newsletter/ios(?:/(?:index\.html)?)?$', 'firefox.mobile.index'),
)
| sgarrity/bedrock | bedrock/newsletter/redirects.py | Python | mpl-2.0 | 432 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from blinker import Namespace
namespace = Namespace()
#: Trigerred when a dataset is published
on_dataset_published = namespace.signal('on-dataset-published')
| jphnoel/udata | udata/core/dataset/signals.py | Python | agpl-3.0 | 226 |
"""
This is the default template for our main set of AWS servers.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0614
import json
from .common import *
from logsettings import get_logger_config
import os
# specified as an environment variable. Typically this is set
# in the service's upstart script and corresponds exactly to the service name.
# Service variants apply config differences via env and auth JSON files,
# the names of which correspond to the variant.
SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None)
# when not variant is specified we attempt to load an unvaried
# config set.
CONFIG_PREFIX = ""
if SERVICE_VARIANT:
CONFIG_PREFIX = SERVICE_VARIANT + "."
############### ALWAYS THE SAME ################################
DEBUG = False
TEMPLATE_DEBUG = False
EMAIL_BACKEND = 'django_ses.SESBackend'
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
###################################### CELERY ################################
# Don't use a connection pool, since connections are dropped by ELB.
BROKER_POOL_LIMIT = 0
BROKER_CONNECTION_TIMEOUT = 1
# For the Result Store, use the django cache named 'celery'
CELERY_RESULT_BACKEND = 'cache'
CELERY_CACHE_BACKEND = 'celery'
# When the broker is behind an ELB, use a heartbeat to refresh the
# connection and to detect if it has been dropped.
BROKER_HEARTBEAT = 10.0
BROKER_HEARTBEAT_CHECKRATE = 2
# Each worker should only fetch one message at a time
CELERYD_PREFETCH_MULTIPLIER = 1
# Skip djcelery migrations, since we don't use the database as the broker
SOUTH_MIGRATION_MODULES = {
'djcelery': 'ignore',
}
# Rename the exchange and queues for each variant
QUEUE_VARIANT = CONFIG_PREFIX.lower()
CELERY_DEFAULT_EXCHANGE = 'edx.{0}core'.format(QUEUE_VARIANT)
HIGH_PRIORITY_QUEUE = 'edx.{0}core.high'.format(QUEUE_VARIANT)
DEFAULT_PRIORITY_QUEUE = 'edx.{0}core.default'.format(QUEUE_VARIANT)
LOW_PRIORITY_QUEUE = 'edx.{0}core.low'.format(QUEUE_VARIANT)
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {}
}
############# NON-SECURE ENV CONFIG ##############################
# Things like server locations, ports, etc.
with open(ENV_ROOT / CONFIG_PREFIX + "env.json") as env_file:
ENV_TOKENS = json.load(env_file)
EMAIL_BACKEND = ENV_TOKENS.get('EMAIL_BACKEND', EMAIL_BACKEND)
EMAIL_FILE_PATH = ENV_TOKENS.get('EMAIL_FILE_PATH', None)
LMS_BASE = ENV_TOKENS.get('LMS_BASE')
# Note that MITX_FEATURES['PREVIEW_LMS_BASE'] gets read in from the environment file.
SITE_NAME = ENV_TOKENS['SITE_NAME']
LOG_DIR = ENV_TOKENS['LOG_DIR']
CACHES = ENV_TOKENS['CACHES']
SESSION_COOKIE_DOMAIN = ENV_TOKENS.get('SESSION_COOKIE_DOMAIN')
SESSION_ENGINE = ENV_TOKENS.get('SESSION_ENGINE', SESSION_ENGINE)
# allow for environments to specify what cookie name our login subsystem should use
# this is to fix a bug regarding simultaneous logins between edx.org and edge.edx.org which can
# happen with some browsers (e.g. Firefox)
if ENV_TOKENS.get('SESSION_COOKIE_NAME', None):
# NOTE, there's a bug in Django (http://bugs.python.org/issue18012) which necessitates this being a str()
SESSION_COOKIE_NAME = str(ENV_TOKENS.get('SESSION_COOKIE_NAME'))
#Email overrides
DEFAULT_FROM_EMAIL = ENV_TOKENS.get('DEFAULT_FROM_EMAIL', DEFAULT_FROM_EMAIL)
DEFAULT_FEEDBACK_EMAIL = ENV_TOKENS.get('DEFAULT_FEEDBACK_EMAIL', DEFAULT_FEEDBACK_EMAIL)
ADMINS = ENV_TOKENS.get('ADMINS', ADMINS)
SERVER_EMAIL = ENV_TOKENS.get('SERVER_EMAIL', SERVER_EMAIL)
MKTG_URLS = ENV_TOKENS.get('MKTG_URLS', MKTG_URLS)
TECH_SUPPORT_EMAIL = ENV_TOKENS.get('TECH_SUPPORT_EMAIL', TECH_SUPPORT_EMAIL)
COURSES_WITH_UNSAFE_CODE = ENV_TOKENS.get("COURSES_WITH_UNSAFE_CODE", [])
#Timezone overrides
TIME_ZONE = ENV_TOKENS.get('TIME_ZONE', TIME_ZONE)
for feature, value in ENV_TOKENS.get('MITX_FEATURES', {}).items():
MITX_FEATURES[feature] = value
LOGGING = get_logger_config(LOG_DIR,
logging_env=ENV_TOKENS['LOGGING_ENV'],
syslog_addr=(ENV_TOKENS['SYSLOG_SERVER'], 514),
debug=False,
service_variant=SERVICE_VARIANT)
#theming start:
PLATFORM_NAME = ENV_TOKENS.get('PLATFORM_NAME', 'edX')
# Event Tracking
if "TRACKING_IGNORE_URL_PATTERNS" in ENV_TOKENS:
TRACKING_IGNORE_URL_PATTERNS = ENV_TOKENS.get("TRACKING_IGNORE_URL_PATTERNS")
################ SECURE AUTH ITEMS ###############################
# Secret things: passwords, access keys, etc.
with open(ENV_ROOT / CONFIG_PREFIX + "auth.json") as auth_file:
AUTH_TOKENS = json.load(auth_file)
# If Segment.io key specified, load it and turn on Segment.io if the feature flag is set
# Note that this is the Studio key. There is a separate key for the LMS.
SEGMENT_IO_KEY = AUTH_TOKENS.get('SEGMENT_IO_KEY')
if SEGMENT_IO_KEY:
MITX_FEATURES['SEGMENT_IO'] = ENV_TOKENS.get('SEGMENT_IO', False)
AWS_ACCESS_KEY_ID = AUTH_TOKENS["AWS_ACCESS_KEY_ID"]
AWS_SECRET_ACCESS_KEY = AUTH_TOKENS["AWS_SECRET_ACCESS_KEY"]
DATABASES = AUTH_TOKENS['DATABASES']
MODULESTORE = AUTH_TOKENS['MODULESTORE']
CONTENTSTORE = AUTH_TOKENS['CONTENTSTORE']
# Datadog for events!
DATADOG = AUTH_TOKENS.get("DATADOG", {})
DATADOG.update(ENV_TOKENS.get("DATADOG", {}))
# TODO: deprecated (compatibility with previous settings)
if 'DATADOG_API' in AUTH_TOKENS:
DATADOG['api_key'] = AUTH_TOKENS['DATADOG_API']
# Celery Broker
CELERY_BROKER_TRANSPORT = ENV_TOKENS.get("CELERY_BROKER_TRANSPORT", "")
CELERY_BROKER_HOSTNAME = ENV_TOKENS.get("CELERY_BROKER_HOSTNAME", "")
CELERY_BROKER_VHOST = ENV_TOKENS.get("CELERY_BROKER_VHOST", "")
CELERY_BROKER_USER = AUTH_TOKENS.get("CELERY_BROKER_USER", "")
CELERY_BROKER_PASSWORD = AUTH_TOKENS.get("CELERY_BROKER_PASSWORD", "")
BROKER_URL = "{0}://{1}:{2}@{3}/{4}".format(CELERY_BROKER_TRANSPORT,
CELERY_BROKER_USER,
CELERY_BROKER_PASSWORD,
CELERY_BROKER_HOSTNAME,
CELERY_BROKER_VHOST)
# Event tracking
TRACKING_BACKENDS.update(AUTH_TOKENS.get("TRACKING_BACKENDS", {}))
| morpheby/levelup-by | cms/envs/aws.py | Python | agpl-3.0 | 6,436 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from openerp import api, models, fields, _
from openerp.exceptions import ValidationError
import logging
logger = logging.getLogger(__name__)
class CommunicationConfig(models.Model):
""" This class allows to configure if and how we will inform the
sponsor when a given event occurs. """
_name = 'partner.communication.config'
_description = 'Communication Configuration'
##########################################################################
# FIELDS #
##########################################################################
name = fields.Char(
required=True, help='Rule name')
send_mode = fields.Selection('_get_send_mode', required=True)
send_mode_pref_field = fields.Char(
'Partner preference field',
help='Name of the field in res.partner in which to find the '
'delivery preference'
)
print_if_not_email = fields.Boolean(
help="Should we print the communication if the sponsor don't have "
"an e-mail address"
)
email_template_id = fields.Many2one(
'email.template', 'Email template')
report_id = fields.Many2one(
'ir.actions.report.xml', 'Letter template')
from_employee_id = fields.Many2one(
'hr.employee', 'Communication From',
help='The sponsor will receive the communication from this employee'
)
##########################################################################
# FIELDS METHODS #
##########################################################################
@api.one
@api.constrains('send_mode_pref_field')
def _validate_config(self):
""" Test if the config is valid. """
valid = True
if self.send_mode_pref_field:
valid = hasattr(self.env['res.partner'], self.send_mode_pref_field)
if not valid:
raise ValidationError(
"Following field does not exist in res.partner: %s." %
self.send_mode_pref_field
)
def _get_send_mode(self):
send_modes = self.get_delivery_preferences()
send_modes.append(
('partner_preference', _('Partner specific'))
)
return send_modes
##########################################################################
# PUBLIC METHODS #
##########################################################################
@api.model
def get_delivery_preferences(self):
return [
('none', _("Don't inform sponsor")),
('auto_digital', _('Send e-mail automatically')),
('digital', _('Prepare e-mail (sent manually)')),
('auto_physical', _('Print letter automatically')),
('physical', _('Prepare report (print manually)')),
]
def get_inform_mode(self, partner):
""" Returns how the partner should be informed for the given
communication (digital, physical or False).
:param partner: res.partner record
:returns: send_mode (auto/digital/False), auto_mode (True/False)
"""
self.ensure_one()
if self.send_mode != 'partner_preference':
send_mode = self.send_mode
else:
send_mode = getattr(
partner, self.send_mode_pref_field, False)
auto_mode = 'auto' in send_mode
send_mode = send_mode.replace('auto_', '')
if send_mode == 'none':
send_mode = False
if send_mode == 'digital' and not partner.email:
if self.print_if_not_email:
send_mode = 'physical'
else:
send_mode = False
return send_mode, auto_mode
| MickSandoz/compassion-modules | partner_communication/models/communication_config.py | Python | agpl-3.0 | 4,257 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyOnnx(PythonPackage):
"""Open Neural Network Exchange (ONNX) is an open ecosystem that
empowers AI developers to choose the right tools as their
project evolves. ONNX provides an open source format for AI
models, both deep learning and traditional ML. It defines an
extensible computation graph model, as well as definitions of
built-in operators and standard data types. Currently we focus
on the capabilities needed for inferencing (scoring)."""
homepage = "https://github.com/onnx/onnx"
pypi = "Onnx/onnx-1.6.0.tar.gz"
version('1.6.0', sha256='3b88c3fe521151651a0403c4d131cb2e0311bd28b753ef692020a432a81ce345')
version('1.5.0', sha256='1a584a4ef62a6db178c257fffb06a9d8e61b41c0a80bfd8bcd8a253d72c4b0b4')
depends_on('py-setuptools', type='build')
# Protobuf version limit is due to https://github.com/protocolbuffers/protobuf/pull/8794
depends_on('protobuf@:3.17')
depends_on('py-protobuf+cpp@:3.17', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-six', type=('build', 'run'))
depends_on('py-typing@3.6.4:', when='^python@:3.4', type=('build', 'run'))
depends_on('py-typing-extensions@3.6.2.1:', type=('build', 'run'))
depends_on('cmake@3.1:', type='build')
depends_on('py-pytest-runner', type='build')
# 'python_out' does not recognize dllexport_decl.
patch('remove_dllexport_decl.patch', when='@:1.6.0')
| LLNL/spack | var/spack/repos/builtin/packages/py-onnx/package.py | Python | lgpl-2.1 | 1,686 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class XapianCore(AutotoolsPackage):
"""Xapian is a highly adaptable toolkit which allows developers to easily
add advanced indexing and search facilities to their own applications.
It supports the Probabilistic Information Retrieval model and also
supports a rich set of boolean query operators."""
homepage = "https://xapian.org"
url = "http://oligarchy.co.uk/xapian/1.4.3/xapian-core-1.4.3.tar.xz"
version('1.4.3', '143f72693219f7fc5913815ed858f295')
depends_on('zlib')
| TheTimmy/spack | var/spack/repos/builtin/packages/xapian-core/package.py | Python | lgpl-2.1 | 1,768 |
from __future__ import print_function, division
import warnings
from itertools import permutations
import hmmlearn.hmm
import numpy as np
import pickle
import tempfile
from sklearn.pipeline import Pipeline
from msmbuilder.example_datasets import AlanineDipeptide
from msmbuilder.featurizer import SuperposeFeaturizer
from msmbuilder.hmm import GaussianHMM
rs = np.random.RandomState(42)
def test_ala2():
# creates a 4-state HMM on the ALA2 data. Nothing fancy, just makes
# sure the code runs without erroring out
trajectories = AlanineDipeptide().get_cached().trajectories
topology = trajectories[0].topology
indices = topology.select('symbol C or symbol O or symbol N')
featurizer = SuperposeFeaturizer(indices, trajectories[0][0])
sequences = featurizer.transform(trajectories)
hmm = GaussianHMM(n_states=4, n_init=3, random_state=rs)
hmm.fit(sequences)
assert len(hmm.timescales_ == 3)
assert np.any(hmm.timescales_ > 50)
def create_timeseries(means, vars, transmat):
"""Construct a random timeseries based on a specified Markov model."""
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
model = hmmlearn.hmm.GaussianHMM(n_components=len(means),
random_state=rs)
model.startprob_ = np.ones_like(means) / len(means)
model.means_ = means
model.covars_ = vars
model.transmat_ = transmat
X, Y = model.sample(1000)
return X
def validate_timeseries(means, vars, transmat, model,
valuetol=1e-3, transmattol=1e-3):
"""Whether our model matches the one used to create the timeseries."""
numStates = len(means)
assert len(model.means_) == numStates
assert (model.transmat_ >= 0.0).all()
assert (model.transmat_ <= 1.0).all()
totalProbability = sum(model.transmat_.T)
assert (abs(totalProbability - 1.0) < 1e-5).all()
# The states may have come out in a different order,
# so we need to test all possible permutations.
for order in permutations(range(len(means))):
match = True
for i in range(numStates):
if abs(means[i] - model.means_[order[i]]) > valuetol:
match = False
break
if abs(vars[i] - model.vars_[order[i]]) > valuetol:
match = False
break
for j in range(numStates):
diff = transmat[i, j] - model.transmat_[order[i], order[j]]
if abs(diff) > transmattol:
match = False
break
if match:
# It matches.
return
# No permutation matched.
assert False
def test_2_state():
transmat = np.array([[0.7, 0.3], [0.4, 0.6]])
means = np.array([[0.0], [5.0]])
vars = np.array([[1.0], [1.0]])
X = [create_timeseries(means, vars, transmat) for i in range(10)]
# For each value of various options,
# create a 2 state HMM and see if it is correct.
class two_state_tester(object):
def __init__(self, init_algo, reversible_type):
self.init_algo = init_algo
self.reversible_type = reversible_type
self.description = ("{}.test_3_state_{}_{}"
.format(__name__, init_algo, reversible_type))
def __call__(self, *args, **kwargs):
model = GaussianHMM(n_states=2, init_algo=self.init_algo,
reversible_type=self.reversible_type,
thresh=1e-4, n_iter=30, random_state=rs)
model.fit(X)
validate_timeseries(means, vars, transmat, model, 0.1, 0.05)
assert abs(model.fit_logprob_[-1] - model.score(X)) < 0.5
for init_algo in ('kmeans', 'GMM'):
for reversible_type in ('mle', 'transpose'):
yield two_state_tester(init_algo, reversible_type)
def test_3_state():
transmat = np.array([[0.2, 0.3, 0.5], [0.4, 0.4, 0.2], [0.8, 0.2, 0.0]])
means = np.array([[0.0], [10.0], [5.0]])
vars = np.array([[1.0], [2.0], [0.3]])
X = [create_timeseries(means, vars, transmat) for i in range(20)]
# For each value of various options,
# create a 3 state HMM and see if it is correct.
class three_state_tester(object):
def __init__(self, init_algo, reversible_type):
self.init_algo = init_algo
self.reversible_type = reversible_type
self.description = ("{}.test_2_state_{}_{}"
.format(__name__, init_algo, reversible_type))
def __call__(self, *args, **kwargs):
model = GaussianHMM(n_states=3, init_algo=self.init_algo,
reversible_type=self.reversible_type,
thresh=1e-4, n_iter=30, random_state=rs)
model.fit(X)
validate_timeseries(means, vars, transmat, model, 0.1, 0.1)
assert abs(model.fit_logprob_[-1] - model.score(X)) < 0.5
for init_algo in ('kmeans', 'GMM'):
for reversible_type in ('mle', 'transpose'):
yield three_state_tester(init_algo, reversible_type)
def test_pipeline():
trajs = AlanineDipeptide().get_cached().trajectories
topology = trajs[0].topology
indices = topology.select('backbone')
p = Pipeline([
('diheds', SuperposeFeaturizer(indices, trajs[0][0])),
('hmm', GaussianHMM(n_states=4))
])
predict = p.fit_predict(trajs)
p.named_steps['hmm'].summarize()
def test_pickle():
"""Test pickling an HMM"""
trajectories = AlanineDipeptide().get_cached().trajectories
topology = trajectories[0].topology
indices = topology.select('symbol C or symbol O or symbol N')
featurizer = SuperposeFeaturizer(indices, trajectories[0][0])
sequences = featurizer.transform(trajectories)
hmm = GaussianHMM(n_states=4, n_init=3, random_state=rs)
hmm.fit(sequences)
logprob, hidden = hmm.predict(sequences)
with tempfile.TemporaryFile() as savefile:
pickle.dump(hmm, savefile)
savefile.seek(0, 0)
hmm2 = pickle.load(savefile)
logprob2, hidden2 = hmm2.predict(sequences)
assert(logprob == logprob2)
| msmbuilder/msmbuilder | msmbuilder/tests/test_ghmm.py | Python | lgpl-2.1 | 6,280 |
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class REvd(RPackage):
"""evd: Functions for Extreme Value Distributions"""
homepage = "https://cloud.r-project.org/package=evd"
url = "https://cloud.r-project.org/src/contrib/evd_2.3-3.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/evd"
version('2.3-3', sha256='2fc5ef2e0c3a2a9392425ddd45914445497433d90fb80b8c363877baee4559b4')
| iulian787/spack | var/spack/repos/builtin/packages/r-evd/package.py | Python | lgpl-2.1 | 597 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.amazon.aws.transfers.imap_attachment_to_s3`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.amazon.aws.transfers.imap_attachment_to_s3 import ImapAttachmentToS3Operator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.transfers.imap_attachment_to_s3`.",
DeprecationWarning,
stacklevel=2,
)
| airbnb/airflow | airflow/contrib/operators/imap_attachment_to_s3_operator.py | Python | apache-2.0 | 1,222 |
import unittest
from collections import deque
import datetime
import sys
import os
import StringIO
from south import exceptions
from south.migration import migrate_app
from south.migration.base import all_migrations, Migration, Migrations
from south.migration.utils import depends, dfs, flatten, get_app_label
from south.models import MigrationHistory
from south.tests import Monkeypatcher
from south.db import db
class TestBrokenMigration(Monkeypatcher):
installed_apps = ["fakeapp", "otherfakeapp", "brokenapp"]
def test_broken_dependencies(self):
self.assertRaises(
exceptions.DependsOnUnmigratedApplication,
Migrations.calculate_dependencies,
force=True,
)
#depends_on_unknown = self.brokenapp['0002_depends_on_unknown']
#self.assertRaises(exceptions.DependsOnUnknownMigration,
# depends_on_unknown.dependencies)
#depends_on_higher = self.brokenapp['0003_depends_on_higher']
#self.assertRaises(exceptions.DependsOnHigherMigration,
# depends_on_higher.dependencies)
class TestMigration(Monkeypatcher):
installed_apps = ["fakeapp", "otherfakeapp"]
def setUp(self):
super(TestMigration, self).setUp()
self.fakeapp = Migrations('fakeapp')
self.otherfakeapp = Migrations('otherfakeapp')
Migrations.calculate_dependencies(force=True)
def test_str(self):
migrations = [str(m) for m in self.fakeapp]
self.assertEqual(['fakeapp:0001_spam',
'fakeapp:0002_eggs',
'fakeapp:0003_alter_spam'],
migrations)
def test_repr(self):
migrations = [repr(m) for m in self.fakeapp]
self.assertEqual(['<Migration: fakeapp:0001_spam>',
'<Migration: fakeapp:0002_eggs>',
'<Migration: fakeapp:0003_alter_spam>'],
migrations)
def test_app_label(self):
self.assertEqual(['fakeapp', 'fakeapp', 'fakeapp'],
[m.app_label() for m in self.fakeapp])
def test_name(self):
self.assertEqual(['0001_spam', '0002_eggs', '0003_alter_spam'],
[m.name() for m in self.fakeapp])
def test_full_name(self):
self.assertEqual(['fakeapp.migrations.0001_spam',
'fakeapp.migrations.0002_eggs',
'fakeapp.migrations.0003_alter_spam'],
[m.full_name() for m in self.fakeapp])
def test_migration(self):
# Can't use vanilla import, modules beginning with numbers aren't in grammar
M1 = __import__("fakeapp.migrations.0001_spam", {}, {}, ['Migration']).Migration
M2 = __import__("fakeapp.migrations.0002_eggs", {}, {}, ['Migration']).Migration
M3 = __import__("fakeapp.migrations.0003_alter_spam", {}, {}, ['Migration']).Migration
self.assertEqual([M1, M2, M3],
[m.migration().Migration for m in self.fakeapp])
self.assertRaises(exceptions.UnknownMigration,
self.fakeapp['9999_unknown'].migration)
def test_previous(self):
self.assertEqual([None,
self.fakeapp['0001_spam'],
self.fakeapp['0002_eggs']],
[m.previous() for m in self.fakeapp])
def test_dependencies(self):
"Test that the dependency detection works."
self.assertEqual([
set([]),
set([self.fakeapp['0001_spam']]),
set([self.fakeapp['0002_eggs']])
],
[m.dependencies for m in self.fakeapp],
)
self.assertEqual([
set([self.fakeapp['0001_spam']]),
set([self.otherfakeapp['0001_first']]),
set([
self.otherfakeapp['0002_second'],
self.fakeapp['0003_alter_spam'],
])
],
[m.dependencies for m in self.otherfakeapp],
)
def test_forwards_plan(self):
self.assertEqual([
[self.fakeapp['0001_spam']],
[
self.fakeapp['0001_spam'],
self.fakeapp['0002_eggs']
],
[
self.fakeapp['0001_spam'],
self.fakeapp['0002_eggs'],
self.fakeapp['0003_alter_spam'],
]
],
[m.forwards_plan() for m in self.fakeapp],
)
self.assertEqual([
[
self.fakeapp['0001_spam'],
self.otherfakeapp['0001_first']
],
[
self.fakeapp['0001_spam'],
self.otherfakeapp['0001_first'],
self.otherfakeapp['0002_second']
],
[
self.fakeapp['0001_spam'],
self.otherfakeapp['0001_first'],
self.otherfakeapp['0002_second'],
self.fakeapp['0002_eggs'],
self.fakeapp['0003_alter_spam'],
self.otherfakeapp['0003_third'],
]
],
[m.forwards_plan() for m in self.otherfakeapp],
)
def test_is_before(self):
F1 = self.fakeapp['0001_spam']
F2 = self.fakeapp['0002_eggs']
F3 = self.fakeapp['0003_alter_spam']
O1 = self.otherfakeapp['0001_first']
O2 = self.otherfakeapp['0002_second']
O3 = self.otherfakeapp['0003_third']
self.assertTrue(F1.is_before(F2))
self.assertTrue(F1.is_before(F3))
self.assertTrue(F2.is_before(F3))
self.assertEqual(O3.is_before(O1), False)
self.assertEqual(O3.is_before(O2), False)
self.assertEqual(O2.is_before(O2), False)
self.assertEqual(O2.is_before(O1), False)
self.assertEqual(F2.is_before(O1), None)
self.assertEqual(F2.is_before(O2), None)
self.assertEqual(F2.is_before(O3), None)
class TestMigrationDependencies(Monkeypatcher):
installed_apps = ['deps_a', 'deps_b', 'deps_c']
def setUp(self):
super(TestMigrationDependencies, self).setUp()
self.deps_a = Migrations('deps_a')
self.deps_b = Migrations('deps_b')
self.deps_c = Migrations('deps_c')
Migrations.calculate_dependencies(force=True)
def test_dependencies(self):
self.assertEqual(
[
set([]),
set([self.deps_a['0001_a']]),
set([self.deps_a['0002_a']]),
set([
self.deps_a['0003_a'],
self.deps_b['0003_b'],
]),
set([self.deps_a['0004_a']]),
],
[m.dependencies for m in self.deps_a],
)
self.assertEqual(
[
set([]),
set([
self.deps_b['0001_b'],
self.deps_a['0002_a']
]),
set([
self.deps_b['0002_b'],
self.deps_a['0003_a']
]),
set([self.deps_b['0003_b']]),
set([self.deps_b['0004_b']]),
],
[m.dependencies for m in self.deps_b],
)
self.assertEqual(
[
set([]),
set([self.deps_c['0001_c']]),
set([self.deps_c['0002_c']]),
set([self.deps_c['0003_c']]),
set([
self.deps_c['0004_c'],
self.deps_a['0002_a']
]),
],
[m.dependencies for m in self.deps_c],
)
def test_dependents(self):
self.assertEqual([set([self.deps_a['0002_a']]),
set([self.deps_c['0005_c'],
self.deps_b['0002_b'],
self.deps_a['0003_a']]),
set([self.deps_b['0003_b'],
self.deps_a['0004_a']]),
set([self.deps_a['0005_a']]),
set([])],
[m.dependents for m in self.deps_a])
self.assertEqual([set([self.deps_b['0002_b']]),
set([self.deps_b['0003_b']]),
set([self.deps_b['0004_b'],
self.deps_a['0004_a']]),
set([self.deps_b['0005_b']]),
set([])],
[m.dependents for m in self.deps_b])
self.assertEqual([set([self.deps_c['0002_c']]),
set([self.deps_c['0003_c']]),
set([self.deps_c['0004_c']]),
set([self.deps_c['0005_c']]),
set([])],
[m.dependents for m in self.deps_c])
def test_forwards_plan(self):
self.assertEqual([[self.deps_a['0001_a']],
[self.deps_a['0001_a'],
self.deps_a['0002_a']],
[self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_a['0003_a']],
[self.deps_b['0001_b'],
self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_b['0002_b'],
self.deps_a['0003_a'],
self.deps_b['0003_b'],
self.deps_a['0004_a']],
[self.deps_b['0001_b'],
self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_b['0002_b'],
self.deps_a['0003_a'],
self.deps_b['0003_b'],
self.deps_a['0004_a'],
self.deps_a['0005_a']]],
[m.forwards_plan() for m in self.deps_a])
self.assertEqual([[self.deps_b['0001_b']],
[self.deps_b['0001_b'],
self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_b['0002_b']],
[self.deps_b['0001_b'],
self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_b['0002_b'],
self.deps_a['0003_a'],
self.deps_b['0003_b']],
[self.deps_b['0001_b'],
self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_b['0002_b'],
self.deps_a['0003_a'],
self.deps_b['0003_b'],
self.deps_b['0004_b']],
[self.deps_b['0001_b'],
self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_b['0002_b'],
self.deps_a['0003_a'],
self.deps_b['0003_b'],
self.deps_b['0004_b'],
self.deps_b['0005_b']]],
[m.forwards_plan() for m in self.deps_b])
self.assertEqual([[self.deps_c['0001_c']],
[self.deps_c['0001_c'],
self.deps_c['0002_c']],
[self.deps_c['0001_c'],
self.deps_c['0002_c'],
self.deps_c['0003_c']],
[self.deps_c['0001_c'],
self.deps_c['0002_c'],
self.deps_c['0003_c'],
self.deps_c['0004_c']],
[self.deps_c['0001_c'],
self.deps_c['0002_c'],
self.deps_c['0003_c'],
self.deps_c['0004_c'],
self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_c['0005_c']]],
[m.forwards_plan() for m in self.deps_c])
def test_backwards_plan(self):
self.assertEqual([
[
self.deps_c['0005_c'],
self.deps_b['0005_b'],
self.deps_b['0004_b'],
self.deps_a['0005_a'],
self.deps_a['0004_a'],
self.deps_b['0003_b'],
self.deps_b['0002_b'],
self.deps_a['0003_a'],
self.deps_a['0002_a'],
self.deps_a['0001_a'],
],
[
self.deps_c['0005_c'],
self.deps_b['0005_b'],
self.deps_b['0004_b'],
self.deps_a['0005_a'],
self.deps_a['0004_a'],
self.deps_b['0003_b'],
self.deps_b['0002_b'],
self.deps_a['0003_a'],
self.deps_a['0002_a'],
],
[
self.deps_b['0005_b'],
self.deps_b['0004_b'],
self.deps_a['0005_a'],
self.deps_a['0004_a'],
self.deps_b['0003_b'],
self.deps_a['0003_a'],
],
[
self.deps_a['0005_a'],
self.deps_a['0004_a'],
],
[
self.deps_a['0005_a'],
]
], [m.backwards_plan() for m in self.deps_a])
self.assertEqual([
[
self.deps_b['0005_b'],
self.deps_b['0004_b'],
self.deps_a['0005_a'],
self.deps_a['0004_a'],
self.deps_b['0003_b'],
self.deps_b['0002_b'],
self.deps_b['0001_b'],
],
[
self.deps_b['0005_b'],
self.deps_b['0004_b'],
self.deps_a['0005_a'],
self.deps_a['0004_a'],
self.deps_b['0003_b'],
self.deps_b['0002_b'],
],
[
self.deps_b['0005_b'],
self.deps_b['0004_b'],
self.deps_a['0005_a'],
self.deps_a['0004_a'],
self.deps_b['0003_b'],
],
[
self.deps_b['0005_b'],
self.deps_b['0004_b'],
],
[
self.deps_b['0005_b'],
],
], [m.backwards_plan() for m in self.deps_b])
self.assertEqual([
[
self.deps_c['0005_c'],
self.deps_c['0004_c'],
self.deps_c['0003_c'],
self.deps_c['0002_c'],
self.deps_c['0001_c'],
],
[
self.deps_c['0005_c'],
self.deps_c['0004_c'],
self.deps_c['0003_c'],
self.deps_c['0002_c'],
],
[
self.deps_c['0005_c'],
self.deps_c['0004_c'],
self.deps_c['0003_c'],
],
[
self.deps_c['0005_c'],
self.deps_c['0004_c'],
],
[self.deps_c['0005_c']]
], [m.backwards_plan() for m in self.deps_c])
class TestCircularDependencies(Monkeypatcher):
installed_apps = ["circular_a", "circular_b"]
def test_plans(self):
Migrations.calculate_dependencies(force=True)
circular_a = Migrations('circular_a')
circular_b = Migrations('circular_b')
self.assertRaises(
exceptions.CircularDependency,
circular_a[-1].forwards_plan,
)
self.assertRaises(
exceptions.CircularDependency,
circular_b[-1].forwards_plan,
)
self.assertRaises(
exceptions.CircularDependency,
circular_a[-1].backwards_plan,
)
self.assertRaises(
exceptions.CircularDependency,
circular_b[-1].backwards_plan,
)
class TestMigrations(Monkeypatcher):
installed_apps = ["fakeapp", "otherfakeapp"]
def test_all(self):
M1 = Migrations(__import__("fakeapp", {}, {}, ['']))
M2 = Migrations(__import__("otherfakeapp", {}, {}, ['']))
self.assertEqual(
[M1, M2],
list(all_migrations()),
)
def test(self):
M1 = Migrations(__import__("fakeapp", {}, {}, ['']))
self.assertEqual(M1, Migrations("fakeapp"))
self.assertEqual(M1, Migrations(self.create_fake_app("fakeapp")))
def test_application(self):
fakeapp = Migrations("fakeapp")
application = __import__("fakeapp", {}, {}, [''])
self.assertEqual(application, fakeapp.application)
def test_migration(self):
# Can't use vanilla import, modules beginning with numbers aren't in grammar
M1 = __import__("fakeapp.migrations.0001_spam", {}, {}, ['Migration']).Migration
M2 = __import__("fakeapp.migrations.0002_eggs", {}, {}, ['Migration']).Migration
migration = Migrations('fakeapp')
self.assertEqual(M1, migration['0001_spam'].migration().Migration)
self.assertEqual(M2, migration['0002_eggs'].migration().Migration)
self.assertRaises(exceptions.UnknownMigration,
migration['0001_jam'].migration)
def test_guess_migration(self):
# Can't use vanilla import, modules beginning with numbers aren't in grammar
M1 = __import__("fakeapp.migrations.0001_spam", {}, {}, ['Migration']).Migration
migration = Migrations('fakeapp')
self.assertEqual(M1, migration.guess_migration("0001_spam").migration().Migration)
self.assertEqual(M1, migration.guess_migration("0001_spa").migration().Migration)
self.assertEqual(M1, migration.guess_migration("0001_sp").migration().Migration)
self.assertEqual(M1, migration.guess_migration("0001_s").migration().Migration)
self.assertEqual(M1, migration.guess_migration("0001_").migration().Migration)
self.assertEqual(M1, migration.guess_migration("0001").migration().Migration)
self.assertRaises(exceptions.UnknownMigration,
migration.guess_migration, "0001-spam")
self.assertRaises(exceptions.MultiplePrefixMatches,
migration.guess_migration, "000")
self.assertRaises(exceptions.MultiplePrefixMatches,
migration.guess_migration, "")
self.assertRaises(exceptions.UnknownMigration,
migration.guess_migration, "0001_spams")
self.assertRaises(exceptions.UnknownMigration,
migration.guess_migration, "0001_jam")
def test_app_label(self):
names = ['fakeapp', 'otherfakeapp']
self.assertEqual(names,
[Migrations(n).app_label() for n in names])
def test_full_name(self):
names = ['fakeapp', 'otherfakeapp']
self.assertEqual([n + '.migrations' for n in names],
[Migrations(n).full_name() for n in names])
class TestMigrationLogic(Monkeypatcher):
"""
Tests if the various logic functions in migration actually work.
"""
installed_apps = ["fakeapp", "otherfakeapp"]
def assertListEqual(self, list1, list2):
list1 = list(list1)
list2 = list(list2)
list1.sort()
list2.sort()
return self.assertEqual(list1, list2)
def test_find_ghost_migrations(self):
pass
def test_apply_migrations(self):
MigrationHistory.objects.all().delete()
migrations = Migrations("fakeapp")
# We should start with no migrations
self.assertEqual(list(MigrationHistory.objects.all()), [])
# Apply them normally
migrate_app(migrations, target_name=None, fake=False,
load_initial_data=True)
# We should finish with all migrations
self.assertListEqual(
((u"fakeapp", u"0001_spam"),
(u"fakeapp", u"0002_eggs"),
(u"fakeapp", u"0003_alter_spam"),),
MigrationHistory.objects.values_list("app_name", "migration"),
)
# Now roll them backwards
migrate_app(migrations, target_name="zero", fake=False)
# Finish with none
self.assertEqual(list(MigrationHistory.objects.all()), [])
def test_migration_merge_forwards(self):
MigrationHistory.objects.all().delete()
migrations = Migrations("fakeapp")
# We should start with no migrations
self.assertEqual(list(MigrationHistory.objects.all()), [])
# Insert one in the wrong order
MigrationHistory.objects.create(app_name = "fakeapp",
migration = "0002_eggs",
applied = datetime.datetime.now())
# Did it go in?
self.assertListEqual(
((u"fakeapp", u"0002_eggs"),),
MigrationHistory.objects.values_list("app_name", "migration"),
)
# Apply them normally
self.assertRaises(exceptions.InconsistentMigrationHistory,
migrate_app,
migrations, target_name=None, fake=False)
self.assertRaises(exceptions.InconsistentMigrationHistory,
migrate_app,
migrations, target_name='zero', fake=False)
try:
migrate_app(migrations, target_name=None, fake=False)
except exceptions.InconsistentMigrationHistory, e:
self.assertEqual(
[
(
migrations['0002_eggs'],
migrations['0001_spam'],
)
],
e.problems,
)
try:
migrate_app(migrations, target_name="zero", fake=False)
except exceptions.InconsistentMigrationHistory, e:
self.assertEqual(
[
(
migrations['0002_eggs'],
migrations['0001_spam'],
)
],
e.problems,
)
# Nothing should have changed (no merge mode!)
self.assertListEqual(
((u"fakeapp", u"0002_eggs"),),
MigrationHistory.objects.values_list("app_name", "migration"),
)
# Apply with merge
migrate_app(migrations, target_name=None, merge=True, fake=False)
# We should finish with all migrations
self.assertListEqual(
((u"fakeapp", u"0001_spam"),
(u"fakeapp", u"0002_eggs"),
(u"fakeapp", u"0003_alter_spam"),),
MigrationHistory.objects.values_list("app_name", "migration"),
)
# Now roll them backwards
migrate_app(migrations, target_name="0002", fake=False)
migrate_app(migrations, target_name="0001", fake=True)
migrate_app(migrations, target_name="zero", fake=False)
# Finish with none
self.assertEqual(list(MigrationHistory.objects.all()), [])
def test_alter_column_null(self):
def null_ok():
from django.db import connection, transaction
# the DBAPI introspection module fails on postgres NULLs.
cursor = connection.cursor()
# SQLite has weird now()
if db.backend_name == "sqlite3":
now_func = "DATETIME('NOW')"
else:
now_func = "NOW()"
try:
cursor.execute("INSERT INTO southtest_spam (id, weight, expires, name) VALUES (100, 10.1, %s, NULL);" % now_func)
except:
transaction.rollback()
return False
else:
cursor.execute("DELETE FROM southtest_spam")
transaction.commit()
return True
MigrationHistory.objects.all().delete()
migrations = Migrations("fakeapp")
# by default name is NOT NULL
migrate_app(migrations, target_name="0002", fake=False)
self.failIf(null_ok())
self.assertListEqual(
((u"fakeapp", u"0001_spam"),
(u"fakeapp", u"0002_eggs"),),
MigrationHistory.objects.values_list("app_name", "migration"),
)
# after 0003, it should be NULL
migrate_app(migrations, target_name="0003", fake=False)
self.assert_(null_ok())
self.assertListEqual(
((u"fakeapp", u"0001_spam"),
(u"fakeapp", u"0002_eggs"),
(u"fakeapp", u"0003_alter_spam"),),
MigrationHistory.objects.values_list("app_name", "migration"),
)
# make sure it is NOT NULL again
migrate_app(migrations, target_name="0002", fake=False)
self.failIf(null_ok(), 'name not null after migration')
self.assertListEqual(
((u"fakeapp", u"0001_spam"),
(u"fakeapp", u"0002_eggs"),),
MigrationHistory.objects.values_list("app_name", "migration"),
)
# finish with no migrations, otherwise other tests fail...
migrate_app(migrations, target_name="zero", fake=False)
self.assertEqual(list(MigrationHistory.objects.all()), [])
def test_dependencies(self):
fakeapp = Migrations("fakeapp")
otherfakeapp = Migrations("otherfakeapp")
# Test a simple path
self.assertEqual([fakeapp['0001_spam'],
fakeapp['0002_eggs'],
fakeapp['0003_alter_spam']],
fakeapp['0003_alter_spam'].forwards_plan())
# And a complex one.
self.assertEqual(
[
fakeapp['0001_spam'],
otherfakeapp['0001_first'],
otherfakeapp['0002_second'],
fakeapp['0002_eggs'],
fakeapp['0003_alter_spam'],
otherfakeapp['0003_third']
],
otherfakeapp['0003_third'].forwards_plan(),
)
class TestMigrationUtils(Monkeypatcher):
installed_apps = ["fakeapp", "otherfakeapp"]
def test_get_app_label(self):
self.assertEqual(
"southtest",
get_app_label(self.create_fake_app("southtest.models")),
)
self.assertEqual(
"baz",
get_app_label(self.create_fake_app("foo.bar.baz.models")),
)
class TestUtils(unittest.TestCase):
def test_flatten(self):
self.assertEqual([], list(flatten(iter([]))))
self.assertEqual([], list(flatten(iter([iter([]), ]))))
self.assertEqual([1], list(flatten(iter([1]))))
self.assertEqual([1, 2], list(flatten(iter([1, 2]))))
self.assertEqual([1, 2], list(flatten(iter([iter([1]), 2]))))
self.assertEqual([1, 2], list(flatten(iter([iter([1, 2])]))))
self.assertEqual([1, 2, 3], list(flatten(iter([iter([1, 2]), 3]))))
self.assertEqual([1, 2, 3],
list(flatten(iter([iter([1]), iter([2]), 3]))))
self.assertEqual([1, 2, 3],
list(flatten([[1], [2], 3])))
def test_depends(self):
graph = {'A1': []}
self.assertEqual(['A1'],
depends('A1', lambda n: graph[n]))
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2']}
self.assertEqual(['A1', 'A2', 'A3'],
depends('A3', lambda n: graph[n]))
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2', 'A1']}
self.assertEqual(['A1', 'A2', 'A3'],
depends('A3', lambda n: graph[n]))
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2', 'A1', 'B1'],
'B1': []}
self.assertEqual(
['B1', 'A1', 'A2', 'A3'],
depends('A3', lambda n: graph[n]),
)
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2', 'A1', 'B2'],
'B1': [],
'B2': ['B1']}
self.assertEqual(
['B1', 'B2', 'A1', 'A2', 'A3'],
depends('A3', lambda n: graph[n]),
)
graph = {'A1': [],
'A2': ['A1', 'B1'],
'A3': ['A2'],
'B1': ['A1']}
self.assertEqual(['A1', 'B1', 'A2', 'A3'],
depends('A3', lambda n: graph[n]))
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2', 'A1', 'B2'],
'B1': [],
'B2': ['B1', 'C1'],
'C1': ['B1']}
self.assertEqual(
['B1', 'C1', 'B2', 'A1', 'A2', 'A3'],
depends('A3', lambda n: graph[n]),
)
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2', 'B2', 'A1', 'C1'],
'B1': ['A1'],
'B2': ['B1', 'C2', 'A1'],
'C1': ['B1'],
'C2': ['C1', 'A1'],
'C3': ['C2']}
self.assertEqual(
['A1', 'B1', 'C1', 'C2', 'B2', 'A2', 'A3'],
depends('A3', lambda n: graph[n]),
)
def assertCircularDependency(self, trace, target, graph):
"Custom assertion that checks a circular dependency is detected correctly."
self.assertRaises(
exceptions.CircularDependency,
depends,
target,
lambda n: graph[n],
)
try:
depends(target, lambda n: graph[n])
except exceptions.CircularDependency, e:
self.assertEqual(trace, e.trace)
def test_depends_cycle(self):
graph = {'A1': ['A1']}
self.assertCircularDependency(
['A1', 'A1'],
'A1',
graph,
)
graph = {'A1': [],
'A2': ['A1', 'A2'],
'A3': ['A2']}
self.assertCircularDependency(
['A1', 'A2', 'A1'],
'A3',
graph,
)
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2', 'A3'],
'A4': ['A3']}
self.assertCircularDependency(
['A3', 'A2', 'A1', 'A3'],
'A4',
graph,
)
graph = {'A1': ['B1'],
'B1': ['A1']}
self.assertCircularDependency(
['A1', 'B1', 'A1'],
'A1',
graph,
)
graph = {'A1': [],
'A2': ['A1', 'B2'],
'A3': ['A2'],
'B1': [],
'B2': ['B1', 'A2'],
'B3': ['B2']}
self.assertCircularDependency(
['A2', 'A1', 'B2', 'A2'],
'A3',
graph,
)
graph = {'A1': [],
'A2': ['A1', 'B3'],
'A3': ['A2'],
'B1': [],
'B2': ['B1', 'A2'],
'B3': ['B2']}
self.assertCircularDependency(
['B2', 'A2', 'A1', 'B3', 'B2'],
'A3',
graph,
)
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2', 'B2'],
'A4': ['A3'],
'B1': ['A3'],
'B2': ['B1']}
self.assertCircularDependency(
['A1', 'B2', 'B1', 'A3', 'A2', 'A1'],
'A4',
graph,
)
| defcube/django-south | south/tests/logic.py | Python | apache-2.0 | 32,253 |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class authenticationpolicylabel(base_resource) :
""" Configuration for authentication policy label resource. """
def __init__(self) :
self._labelname = ""
self._newname = ""
self._numpol = 0
self._hits = 0
self._policyname = ""
self._priority = 0
self._gotopriorityexpression = ""
self._flowtype = 0
self._description = ""
self.___count = 0
@property
def labelname(self) :
ur"""Name for the new authentication policy label.
Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at (@), equals (=), colon (:), and underscore characters.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my authentication policy label" or 'authentication policy label').
"""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
ur"""Name for the new authentication policy label.
Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at (@), equals (=), colon (:), and underscore characters.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my authentication policy label" or 'authentication policy label').
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def newname(self) :
ur"""The new name of the auth policy label.<br/>Minimum length = 1.
"""
try :
return self._newname
except Exception as e:
raise e
@newname.setter
def newname(self, newname) :
ur"""The new name of the auth policy label.<br/>Minimum length = 1
"""
try :
self._newname = newname
except Exception as e:
raise e
@property
def numpol(self) :
ur"""Number of polices bound to label.
"""
try :
return self._numpol
except Exception as e:
raise e
@property
def hits(self) :
ur"""Number of times policy label was invoked.
"""
try :
return self._hits
except Exception as e:
raise e
@property
def policyname(self) :
ur"""Name of the authentication policy to bind to the policy label.
"""
try :
return self._policyname
except Exception as e:
raise e
@property
def priority(self) :
ur"""Specifies the priority of the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
ur"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@property
def flowtype(self) :
ur"""Flowtype of the bound authentication policy.
"""
try :
return self._flowtype
except Exception as e:
raise e
@property
def description(self) :
ur"""Description of the policylabel.
"""
try :
return self._description
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(authenticationpolicylabel_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.authenticationpolicylabel
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.labelname is not None :
return str(self.labelname)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
ur""" Use this API to add authenticationpolicylabel.
"""
try :
if type(resource) is not list :
addresource = authenticationpolicylabel()
addresource.labelname = resource.labelname
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ authenticationpolicylabel() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].labelname = resource[i].labelname
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
ur""" Use this API to delete authenticationpolicylabel.
"""
try :
if type(resource) is not list :
deleteresource = authenticationpolicylabel()
if type(resource) != type(deleteresource):
deleteresource.labelname = resource
else :
deleteresource.labelname = resource.labelname
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ authenticationpolicylabel() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].labelname = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ authenticationpolicylabel() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].labelname = resource[i].labelname
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def rename(cls, client, resource, new_labelname) :
ur""" Use this API to rename a authenticationpolicylabel resource.
"""
try :
renameresource = authenticationpolicylabel()
if type(resource) == cls :
renameresource.labelname = resource.labelname
else :
renameresource.labelname = resource
return renameresource.rename_resource(client,new_labelname)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the authenticationpolicylabel resources that are configured on netscaler.
"""
try :
if not name :
obj = authenticationpolicylabel()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = authenticationpolicylabel()
obj.labelname = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [authenticationpolicylabel() for _ in range(len(name))]
obj = [authenticationpolicylabel() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = authenticationpolicylabel()
obj[i].labelname = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
ur""" Use this API to fetch filtered set of authenticationpolicylabel resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationpolicylabel()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
ur""" Use this API to count the authenticationpolicylabel resources configured on NetScaler.
"""
try :
obj = authenticationpolicylabel()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
ur""" Use this API to count filtered the set of authenticationpolicylabel resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationpolicylabel()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class authenticationpolicylabel_response(base_response) :
def __init__(self, length=1) :
self.authenticationpolicylabel = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.authenticationpolicylabel = [authenticationpolicylabel() for _ in range(length)]
| benfinke/ns_python | nssrc/com/citrix/netscaler/nitro/resource/config/authentication/authenticationpolicylabel.py | Python | apache-2.0 | 9,779 |
'''
eqtime: 30
'''
def main():
info('Felix CO2 analysis')
gosub('felix:WaitForCO2Access')
gosub('felix:PrepareForCO2Analysis')
set_motor('beam',beam_diameter)
if analysis_type=='blank':
info('is blank. not heating')
'''
sleep cumulative time to account for blank
during a multiple position analysis
'''
close(description='Bone to Turbo')
close('A')
close('C')
open('F')
numPositions=len(position)
sleep(duration*max(1,numPositions))
else:
'''
this is the most generic what to move and fire the laser
position is always a list even if only one hole is specified
'''
enable()
for p_i in position:
'''
position the laser at p_i, p_i can be an holenumber or (x,y)
'''
move_to_position(p_i)
sleep(5)
close(description='Bone to Turbo')
do_extraction()
if disable_between_positions:
end_extract()
end_extract()
disable()
info('cleaning gas {} seconds'.format(cleanup))
sleep(cleanup)
def do_extraction():
if ramp_rate>0:
'''
style 1.
'''
# begin_interval(duration)
# info('ramping to {} at {} {}/s'.format(extract_value, ramp_rate, extract_units)
# ramp(setpoint=extract_value, rate=ramp_rate)
# complete_interval()
'''
style 2.
'''
elapsed=ramp(setpoint=extract_value, rate=ramp_rate)
pelapsed=execute_pattern(pattern)
sleep(min(0, duration-elapsed-pelapsed))
else:
begin_interval(duration)
info('set extract to {}'.format(extract_value))
extract(extract_value)
sleep(2)
if pattern:
info('executing pattern {}'.format(pattern))
execute_pattern(pattern)
complete_interval()
| USGSDenverPychron/pychron | docs/user_guide/operation/scripts/examples/helix/extraction/felix_co2.py | Python | apache-2.0 | 2,035 |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class TestVariable(unittest.TestCase):
PROJECT = 'PROJECT'
CONFIG_NAME = 'config_name'
VARIABLE_NAME = 'variable_name'
PATH = 'projects/%s/configs/%s/variables/%s' % (
PROJECT, CONFIG_NAME, VARIABLE_NAME)
@staticmethod
def _get_target_class():
from google.cloud.runtimeconfig.variable import Variable
return Variable
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def _verifyResourceProperties(self, variable, resource):
import base64
from google.cloud._helpers import _rfc3339_to_datetime
if 'name' in resource:
self.assertEqual(variable.full_name, resource['name'])
if 'value' in resource:
self.assertEqual(
variable.value, base64.b64decode(resource['value']))
else:
self.assertIsNone(variable.value)
if 'state' in resource:
self.assertEqual(variable.state, resource['state'])
if 'updateTime' in resource:
self.assertEqual(
variable.update_time,
_rfc3339_to_datetime(resource['updateTime']))
else:
self.assertIsNone(variable.update_time)
def test_ctor(self):
from google.cloud.runtimeconfig.config import Config
client = _Client(project=self.PROJECT)
config = Config(name=self.CONFIG_NAME, client=client)
variable = self._make_one(name=self.VARIABLE_NAME, config=config)
self.assertEqual(variable.name, self.VARIABLE_NAME)
self.assertEqual(variable.full_name, self.PATH)
self.assertEqual(variable.path, '/%s' % (self.PATH,))
self.assertIs(variable.client, client)
def test_ctor_w_no_name(self):
from google.cloud.runtimeconfig.config import Config
client = _Client(project=self.PROJECT)
config = Config(name=self.CONFIG_NAME, client=client)
variable = self._make_one(name=None, config=config)
with self.assertRaises(ValueError):
getattr(variable, 'full_name')
def test_exists_miss_w_bound_client(self):
from google.cloud.runtimeconfig.config import Config
conn = _Connection()
client = _Client(project=self.PROJECT, connection=conn)
config = Config(name=self.CONFIG_NAME, client=client)
variable = self._make_one(name=self.VARIABLE_NAME, config=config)
self.assertFalse(variable.exists())
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % (self.PATH,))
self.assertEqual(req['query_params'], {'fields': 'name'})
def test_exists_hit_w_alternate_client(self):
from google.cloud.runtimeconfig.config import Config
conn1 = _Connection()
CLIENT1 = _Client(project=self.PROJECT, connection=conn1)
CONFIG1 = Config(name=self.CONFIG_NAME, client=CLIENT1)
conn2 = _Connection({})
CLIENT2 = _Client(project=self.PROJECT, connection=conn2)
variable = self._make_one(name=self.VARIABLE_NAME, config=CONFIG1)
self.assertTrue(variable.exists(client=CLIENT2))
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % (self.PATH,))
self.assertEqual(req['query_params'], {'fields': 'name'})
def test_reload_w_bound_client(self):
from google.cloud.runtimeconfig.config import Config
RESOURCE = {
'name': self.PATH,
'value': 'bXktdmFyaWFibGUtdmFsdWU=', # base64 my-variable-value
'updateTime': '2016-04-14T21:21:54.5000Z',
'state': 'VARIABLE_STATE_UNSPECIFIED',
}
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
config = Config(name=self.CONFIG_NAME, client=client)
variable = self._make_one(name=self.VARIABLE_NAME, config=config)
variable.reload()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % (self.PATH,))
self._verifyResourceProperties(variable, RESOURCE)
def test_reload_w_empty_resource(self):
from google.cloud.runtimeconfig.config import Config
RESOURCE = {}
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
config = Config(name=self.CONFIG_NAME, client=client)
variable = self._make_one(name=self.VARIABLE_NAME, config=config)
variable.reload()
# Name should not be overwritten.
self.assertEqual(self.VARIABLE_NAME, variable.name)
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % (self.PATH,))
self._verifyResourceProperties(variable, RESOURCE)
def test_reload_w_alternate_client(self):
from google.cloud.runtimeconfig.config import Config
RESOURCE = {
'name': self.PATH,
'value': 'bXktdmFyaWFibGUtdmFsdWU=', # base64 my-variable-value
'updateTime': '2016-04-14T21:21:54.5000Z',
'state': 'VARIABLE_STATE_UNSPECIFIED',
}
conn1 = _Connection()
CLIENT1 = _Client(project=self.PROJECT, connection=conn1)
CONFIG1 = Config(name=self.CONFIG_NAME, client=CLIENT1)
conn2 = _Connection(RESOURCE)
CLIENT2 = _Client(project=self.PROJECT, connection=conn2)
variable = self._make_one(name=self.VARIABLE_NAME, config=CONFIG1)
variable.reload(client=CLIENT2)
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % (self.PATH,))
self._verifyResourceProperties(variable, RESOURCE)
class _Client(object):
_connection = None
def __init__(self, project, connection=None):
self.project = project
self._connection = connection
class _Connection(object):
def __init__(self, *responses):
self._responses = responses
self._requested = []
def api_request(self, **kw):
from google.cloud.exceptions import NotFound
self._requested.append(kw)
try:
response, self._responses = self._responses[0], self._responses[1:]
except:
raise NotFound('miss')
else:
return response
| quom/google-cloud-python | runtimeconfig/unit_tests/test_variable.py | Python | apache-2.0 | 7,382 |
from __future__ import absolute_import, unicode_literals
import logging
import os
from ...platformdirs import user_config_dir
from ..info import PY3
from ..util import ConfigParser
from ..util.path import Path
from ..util.six import ensure_str
from .convert import convert
class IniConfig(object):
VIRTUALENV_CONFIG_FILE_ENV_VAR = ensure_str("VIRTUALENV_CONFIG_FILE")
STATE = {None: "failed to parse", True: "active", False: "missing"}
section = "virtualenv"
def __init__(self, env=None):
env = os.environ if env is None else env
config_file = env.get(self.VIRTUALENV_CONFIG_FILE_ENV_VAR, None)
self.is_env_var = config_file is not None
config_file = (
Path(config_file)
if config_file is not None
else Path(user_config_dir(appname="virtualenv", appauthor="pypa")) / "virtualenv.ini"
)
self.config_file = config_file
self._cache = {}
exception = None
self.has_config_file = None
try:
self.has_config_file = self.config_file.exists()
except OSError as exc:
exception = exc
else:
if self.has_config_file:
self.config_file = self.config_file.resolve()
self.config_parser = ConfigParser.ConfigParser()
try:
self._load()
self.has_virtualenv_section = self.config_parser.has_section(self.section)
except Exception as exc:
exception = exc
if exception is not None:
logging.error("failed to read config file %s because %r", config_file, exception)
def _load(self):
with self.config_file.open("rt") as file_handler:
reader = getattr(self.config_parser, "read_file" if PY3 else "readfp")
reader(file_handler)
def get(self, key, as_type):
cache_key = key, as_type
if cache_key in self._cache:
return self._cache[cache_key]
# noinspection PyBroadException
try:
source = "file"
raw_value = self.config_parser.get(self.section, key.lower())
value = convert(raw_value, as_type, source)
result = value, source
except Exception:
result = None
self._cache[cache_key] = result
return result
def __bool__(self):
return bool(self.has_config_file) and bool(self.has_virtualenv_section)
@property
def epilog(self):
msg = "{}config file {} {} (change{} via env var {})"
return msg.format(
"\n",
self.config_file,
self.STATE[self.has_config_file],
"d" if self.is_env_var else "",
self.VIRTUALENV_CONFIG_FILE_ENV_VAR,
)
| pybuilder/pybuilder | src/main/python/pybuilder/_vendor/virtualenv/config/ini.py | Python | apache-2.0 | 2,807 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Momentum."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
class MomentumOptimizerTest(tf.test.TestCase):
def testBasic(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
mom_opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9)
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
self.assertFalse(slot0 in tf.trainable_variables())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
self.assertFalse(slot1 in tf.trainable_variables())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval())
self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(np.array([1.0 - (0.1 * 2.0),
2.0 - (0.1 * 2.0)]),
var0.eval())
self.assertAllCloseAccordingToType(np.array([3.0 - (0.01 * 2.0),
4.0 - (0.01 * 2.0)]),
var1.eval())
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
slot0.eval())
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]),
var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]),
var1.eval())
def testTensorLearningRateAndMomentum(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
mom_opt = tf.train.MomentumOptimizer(
learning_rate=tf.constant(2.0), momentum=tf.constant(0.9))
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
self.assertFalse(slot0 in tf.trainable_variables())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
self.assertFalse(slot1 in tf.trainable_variables())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval())
self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(np.array([1.0 - (0.1 * 2.0),
2.0 - (0.1 * 2.0)]),
var0.eval())
self.assertAllCloseAccordingToType(np.array([3.0 - (0.01 * 2.0),
4.0 - (0.01 * 2.0)]),
var1.eval())
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
slot0.eval())
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]),
var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]),
var1.eval())
def _dbParamsMom01(self):
"""Return dist-belief momentum values.
Return values been generated from the dist-belief momentum unittest,
running with a learning rate of 0.1 and a momentum of 0.1.
These values record how a parameter vector of size 10, initialized with 0.0,
gets updated with 10 consecutive momentum steps. It uses random gradients.
Returns:
db_grad: The gradients to apply
db_out: The parameters after the momentum update.
"""
db_grad = [[]] * 10
db_out = [[]] * 10
# pylint: disable=line-too-long
db_grad[0] = [0.00096264342, 0.17914793, 0.93945462, 0.41396621, 0.53037018, 0.93197989, 0.78648776, 0.50036013, 0.55345792, 0.96722615]
db_out[0] = [-9.6264346e-05, -0.017914793, -0.093945466, -0.041396622, -0.053037018, -0.093197994, -0.078648776, -0.050036013, -0.055345792, -0.096722618]
db_grad[1] = [0.17075552, 0.88821375, 0.20873757, 0.25236958, 0.57578111, 0.15312378, 0.5513742, 0.94687688, 0.16012503, 0.22159521]
db_out[1] = [-0.017181443, -0.10852765, -0.12421377, -0.070773244, -0.11591884, -0.11783017, -0.14165108, -0.14972731, -0.076892875, -0.1285544]
db_grad[2] = [0.35077485, 0.47304362, 0.44412705, 0.44368884, 0.078527533, 0.81223965, 0.31168157, 0.43203235, 0.16792089, 0.24644311]
db_out[2] = [-0.053967446, -0.1648933, -0.1716533, -0.1180798, -0.13005978, -0.20151734, -0.17911947, -0.20289968, -0.095839672, -0.15638189]
db_grad[3] = [0.9694621, 0.75035888, 0.28171822, 0.83813518, 0.53807181, 0.3728098, 0.81454384, 0.03848977, 0.89759839, 0.93665648]
db_out[3] = [-0.15459226, -0.24556576, -0.20456907, -0.20662397, -0.18528105, -0.24716705, -0.2643207, -0.21206589, -0.18749419, -0.2528303]
db_grad[4] = [0.38578293, 0.8536852, 0.88722926, 0.66276771, 0.13678469, 0.94036359, 0.69107032, 0.81897682, 0.5433259, 0.67860287]
db_out[4] = [-0.20323303, -0.33900154, -0.29658359, -0.28175515, -0.20448165, -0.34576839, -0.34194785, -0.29488021, -0.25099224, -0.33033544]
db_grad[5] = [0.27885768, 0.76100707, 0.24625534, 0.81354135, 0.18959245, 0.48038563, 0.84163809, 0.41172323, 0.83259648, 0.44941229]
db_out[5] = [-0.23598288, -0.42444581, -0.33041057, -0.3706224, -0.22536094, -0.40366709, -0.43387437, -0.34433398, -0.34060168, -0.38302717]
db_grad[6] = [0.27233034, 0.056316052, 0.5039115, 0.24105175, 0.35697976, 0.75913221, 0.73577434, 0.16014607, 0.57500273, 0.071136251]
db_out[6] = [-0.26649091, -0.43862185, -0.38418442, -0.40361428, -0.26314685, -0.48537019, -0.51664448, -0.36529395, -0.40706289, -0.39540997]
db_grad[7] = [0.58697265, 0.2494842, 0.08106143, 0.39954534, 0.15892942, 0.12683646, 0.74053431, 0.16033, 0.66625422, 0.73515922]
db_out[7] = [-0.32823896, -0.46498787, -0.39766794, -0.446868, -0.28281838, -0.50622416, -0.59897494, -0.38342294, -0.48033443, -0.47016418]
db_grad[8] = [0.8215279, 0.41994119, 0.95172721, 0.68000203, 0.79439718, 0.43384039, 0.55561525, 0.22567581, 0.93331909, 0.29438227]
db_out[8] = [-0.41656655, -0.50961858, -0.49418902, -0.51919359, -0.36422527, -0.55169362, -0.6627695, -0.40780342, -0.58099347, -0.50707781]
db_grad[9] = [0.68297005, 0.67758518, 0.1748755, 0.13266537, 0.70697063, 0.055731893, 0.68593478, 0.50580865, 0.12602448, 0.093537711]
db_out[9] = [-0.49369633, -0.58184016, -0.52132869, -0.5396927, -0.44306302, -0.56181377, -0.73774242, -0.46082234, -0.60366184, -0.52012295]
# pylint: enable=line-too-long
return db_grad, db_out
def testLikeDistBeliefMom01(self):
with self.test_session():
db_grad, db_out = self._dbParamsMom01()
num_samples = len(db_grad)
var0 = tf.Variable([0.0] * num_samples)
grads0 = tf.constant([0.0] * num_samples)
mom_opt = tf.train.MomentumOptimizer(learning_rate=0.1, momentum=0.1)
mom_update = mom_opt.apply_gradients(zip([grads0], [var0]))
tf.initialize_all_variables().run()
for i in xrange(num_samples):
mom_update.run(feed_dict={grads0: db_grad[i]})
self.assertAllClose(np.array(db_out[i]), var0.eval())
def testSparse(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = tf.Variable(tf.zeros([4, 2], dtype=dtype))
var1 = tf.Variable(tf.constant(1.0, dtype, [4, 2]))
grads0 = tf.IndexedSlices(tf.constant([[.1, .1]], dtype=dtype),
tf.constant([1]),
tf.constant([4, 2]))
grads1 = tf.IndexedSlices(tf.constant([[.01, .01], [.01, .01]],
dtype=dtype),
tf.constant([2, 3]),
tf.constant([4, 2]))
mom_opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9)
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
# Fetch params to validate initial values
self.assertAllClose([0, 0], var0.eval()[0])
self.assertAllClose([0, 0], var0.eval()[1])
self.assertAllClose([1, 1], var1.eval()[2])
# Step 1: the momentum accumulators are 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([0, 0]), slot0.eval()[0])
self.assertAllCloseAccordingToType(
np.array([.1, .1]), slot0.eval()[1])
self.assertAllCloseAccordingToType(
np.array([.01, .01]), slot1.eval()[2])
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(np.array([0, 0]), var0.eval()[0])
self.assertAllCloseAccordingToType(np.array([- (0.1 * 2.0),
- (0.1 * 2.0)]),
var0.eval()[1])
self.assertAllCloseAccordingToType(np.array([1.0 - (0.01 * 2.0),
1.0 - (0.01 * 2.0)]),
var1.eval()[2])
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllClose(np.array([0, 0]), slot0.eval()[0])
self.assertAllCloseAccordingToType(np.array([(0.9 * 0.1 + 0.1),
(0.9 * 0.1 + 0.1)]),
slot0.eval()[1])
self.assertAllCloseAccordingToType(np.array([(0.9 * 0.01 + 0.01),
(0.9 * 0.01 + 0.01)]),
slot1.eval()[2])
# Check that the parameters have been updated.
self.assertAllClose(np.array([0, 0]), var0.eval()[0])
self.assertAllCloseAccordingToType(
np.array([- (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
- (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]),
var0.eval()[1])
self.assertAllCloseAccordingToType(
np.array([0.98 - ((0.9 * 0.01 + 0.01) * 2.0),
0.98 - ((0.9 * 0.01 + 0.01) * 2.0)]),
var1.eval()[2])
def testSharing(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
mom_opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9)
mom_update1 = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
mom_update2 = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update1.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval())
self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(np.array([1.0 - (0.1 * 2.0),
2.0 - (0.1 * 2.0)]),
var0.eval())
self.assertAllCloseAccordingToType(np.array([3.0 - (0.01 * 2.0),
4.0 - (0.01 * 2.0)]),
var1.eval())
# Step 2: the second momentum accumulators contain the previous update.
mom_update2.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
slot0.eval())
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]),
var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]),
var1.eval())
if __name__ == "__main__":
tf.test.main()
| sachinpro/sachinpro.github.io | tensorflow/python/training/momentum_test.py | Python | apache-2.0 | 17,251 |
# (c) Copyright 2014 Brocade Communications Systems Inc.
# All Rights Reserved.
#
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
ZoneManager is responsible to manage access control using FC zoning
when zoning mode is set as 'fabric'.
ZoneManager provides interfaces to add connection and remove connection
for given initiator and target list associated with a FC volume attach and
detach operation.
**Related Flags**
:zone_driver: Used by:class:`ZoneManager`.
Defaults to
`cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver.BrcdFCZoneDriver`
:zoning_policy: Used by: class: 'ZoneManager'. Defaults to 'none'
"""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
import six
from cinder import exception
from cinder.i18n import _, _LI
from cinder.volume import configuration as config
from cinder.zonemanager import fc_common
LOG = logging.getLogger(__name__)
zone_manager_opts = [
cfg.StrOpt('zone_driver',
default='cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver'
'.BrcdFCZoneDriver',
help='FC Zone Driver responsible for zone management'),
cfg.StrOpt('zoning_policy',
default='initiator-target',
help='Zoning policy configured by user; valid values include '
'"initiator-target" or "initiator"'),
cfg.StrOpt('fc_fabric_names',
help='Comma separated list of Fibre Channel fabric names.'
' This list of names is used to retrieve other SAN credentials'
' for connecting to each SAN fabric'),
cfg.StrOpt('fc_san_lookup_service',
default='cinder.zonemanager.drivers.brocade'
'.brcd_fc_san_lookup_service.BrcdFCSanLookupService',
help='FC SAN Lookup Service'),
]
CONF = cfg.CONF
CONF.register_opts(zone_manager_opts, group='fc-zone-manager')
class ZoneManager(fc_common.FCCommon):
"""Manages Connection control during attach/detach.
Version History:
1.0 - Initial version
1.0.1 - Added __new__ for singleton
"""
VERSION = "1.0.1"
driver = None
fabric_names = []
def __new__(class_, *args, **kwargs):
if not hasattr(class_, "_instance"):
class_._instance = object.__new__(class_, *args, **kwargs)
return class_._instance
def __init__(self, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
super(ZoneManager, self).__init__(**kwargs)
self.configuration = config.Configuration(zone_manager_opts,
'fc-zone-manager')
self._build_driver()
def _build_driver(self):
zone_driver = self.configuration.zone_driver
LOG.debug("Zone Driver from config: {%s}", zone_driver)
# Initialize vendor specific implementation of FCZoneDriver
self.driver = importutils.import_object(
zone_driver,
configuration=self.configuration)
def get_zoning_state_ref_count(self, initiator_wwn, target_wwn):
"""Zone management state check.
Performs state check for given I-T pair to return the current count of
active attach for the pair.
"""
# TODO(sk): ref count state management
count = 0
# check the state for I-T pair
return count
def add_connection(self, initiator_target_map):
"""Add connection control.
Adds connection control for the given initiator target map.
initiator_target_map - each initiator WWN mapped to a list of one
or more target WWN:
eg:
{
'10008c7cff523b01': ['20240002ac000a50', '20240002ac000a40']
}
"""
connected_fabric = None
try:
for initiator in initiator_target_map.keys():
target_list = initiator_target_map[initiator]
LOG.debug("Target List: %s", target_list)
# get SAN context for the target list
fabric_map = self.get_san_context(target_list)
LOG.debug("Fabric Map after context lookup: %s", fabric_map)
# iterate over each SAN and apply connection control
for fabric in fabric_map.keys():
connected_fabric = fabric
t_list = fabric_map[fabric]
# get valid I-T map to add connection control
i_t_map = {initiator: t_list}
valid_i_t_map = self.get_valid_initiator_target_map(
i_t_map, True)
LOG.info(_LI("Final filtered map for fabric: %s"),
valid_i_t_map)
# Call driver to add connection control
self.driver.add_connection(fabric, valid_i_t_map)
LOG.info(_LI("Add Connection: Finished iterating "
"over all target list"))
except Exception as e:
msg = _("Failed adding connection for fabric=%(fabric)s: "
"Error: %(err)s") % {'fabric': connected_fabric,
'err': six.text_type(e)}
LOG.error(msg)
raise exception.ZoneManagerException(reason=msg)
def delete_connection(self, initiator_target_map):
"""Delete connection.
Updates/deletes connection control for the given initiator target map.
initiator_target_map - each initiator WWN mapped to a list of one
or more target WWN:
eg:
{
'10008c7cff523b01': ['20240002ac000a50', '20240002ac000a40']
}
"""
connected_fabric = None
try:
for initiator in initiator_target_map.keys():
target_list = initiator_target_map[initiator]
LOG.info(_LI("Delete connection Target List: %s"),
target_list)
# get SAN context for the target list
fabric_map = self.get_san_context(target_list)
LOG.debug("Delete connection Fabric Map from SAN "
"context: %s", fabric_map)
# iterate over each SAN and apply connection control
for fabric in fabric_map.keys():
connected_fabric = fabric
t_list = fabric_map[fabric]
# get valid I-T map to add connection control
i_t_map = {initiator: t_list}
valid_i_t_map = self.get_valid_initiator_target_map(
i_t_map, False)
LOG.info(_LI("Final filtered map for delete "
"connection: %s"), valid_i_t_map)
# Call driver to delete connection control
if len(valid_i_t_map) > 0:
self.driver.delete_connection(fabric, valid_i_t_map)
LOG.debug("Delete Connection - Finished iterating over all"
" target list")
except Exception as e:
msg = _("Failed removing connection for fabric=%(fabric)s: "
"Error: %(err)s") % {'fabric': connected_fabric,
'err': six.text_type(e)}
LOG.error(msg)
raise exception.ZoneManagerException(reason=msg)
def get_san_context(self, target_wwn_list):
"""SAN lookup for end devices.
Look up each SAN configured and return a map of SAN (fabric IP)
to list of target WWNs visible to the fabric.
"""
fabric_map = self.driver.get_san_context(target_wwn_list)
LOG.debug("Got SAN context: %s", fabric_map)
return fabric_map
def get_valid_initiator_target_map(self, initiator_target_map,
add_control):
"""Reference count check for end devices.
Looks up the reference count for each initiator-target pair from the
map and returns a filtered list based on the operation type
add_control - operation type can be true for add connection control
and false for remove connection control
"""
filtered_i_t_map = {}
for initiator in initiator_target_map.keys():
t_list = initiator_target_map[initiator]
for target in t_list:
count = self.get_zoning_state_ref_count(initiator, target)
if add_control:
if count > 0:
t_list.remove(target)
# update count = count + 1
else:
if count > 1:
t_list.remove(target)
# update count = count - 1
if t_list:
filtered_i_t_map[initiator] = t_list
else:
LOG.info(_LI("No targets to add or remove connection for "
"I: %s"), initiator)
return filtered_i_t_map
| tobegit3hub/cinder_docker | cinder/zonemanager/fc_zone_manager.py | Python | apache-2.0 | 9,631 |
# Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import pretend
from warehouse import serving
from warehouse.serving import WSGIRequestHandler
def test_request_handler_log(monkeypatch):
_log = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(serving, "_log", _log)
monkeypatch.setattr(WSGIRequestHandler, "__init__", lambda *a, **kw: None)
handler = WSGIRequestHandler()
handler.address_string = pretend.call_recorder(lambda: "127.0.0.1")
handler.log("info", "test message")
assert _log.calls == [pretend.call("info", "127.0.0.1 - test message\n")]
assert handler.address_string.calls == [pretend.call()]
| robhudson/warehouse | tests/test_serving.py | Python | apache-2.0 | 1,290 |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class clusterinstance_binding(base_resource):
""" Binding class showing the resources that can be bound to clusterinstance_binding.
"""
def __init__(self) :
self._clid = 0
self.clusterinstance_clusternode_binding = []
@property
def clid(self) :
ur"""Unique number that identifies the cluster.<br/>Minimum value = 1<br/>Maximum value = 16.
"""
try :
return self._clid
except Exception as e:
raise e
@clid.setter
def clid(self, clid) :
ur"""Unique number that identifies the cluster.<br/>Minimum value = 1<br/>Maximum value = 16
"""
try :
self._clid = clid
except Exception as e:
raise e
@property
def clusterinstance_clusternode_bindings(self) :
ur"""clusternode that can be bound to clusterinstance.
"""
try :
return self._clusterinstance_clusternode_binding
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(clusterinstance_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.clusterinstance_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.clid is not None :
return str(self.clid)
return None
except Exception as e :
raise e
@classmethod
def get(self, service, clid) :
ur""" Use this API to fetch clusterinstance_binding resource.
"""
try :
if type(clid) is not list :
obj = clusterinstance_binding()
obj.clid = clid
response = obj.get_resource(service)
else :
if clid and len(clid) > 0 :
obj = [clusterinstance_binding() for _ in range(len(clid))]
for i in range(len(clid)) :
obj[i].clid = clid[i];
response[i] = obj[i].get_resource(service)
return response
except Exception as e:
raise e
class clusterinstance_binding_response(base_response) :
def __init__(self, length=1) :
self.clusterinstance_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.clusterinstance_binding = [clusterinstance_binding() for _ in range(length)]
| benfinke/ns_python | nssrc/com/citrix/netscaler/nitro/resource/config/cluster/clusterinstance_binding.py | Python | apache-2.0 | 3,621 |
'''
Created on Jul 19, 2010
@author: jnaous
'''
from django.core.urlresolvers import reverse
from django.test import Client
from common.tests.client import test_get_and_post_form
from django.contrib.auth.models import User
from pyquery import PyQuery as pq
from openflow.plugin.models import OpenFlowInterface, NonOpenFlowConnection
from geni.planetlab.models import PlanetLabNode
try:
from setup_expedient_params import \
SUPERUSER_USERNAME, SUPERUSER_PASSWORD,\
USER_INFO,\
PL_AGGREGATE_INFO,\
OF_AGGREGATE_INFO,\
OF_PL_CONNECTIONS
except ImportError:
print """
Could not import setup_om_params module. Make sure this
module exists and that it contains the following variables:
SUPERUSER_USERNAME, SUPERUSER_PASSWORD,
CH_PASSWORD, CH_USERNAME
"""
raise
def run():
client = Client()
client.login(username=SUPERUSER_USERNAME,
password=SUPERUSER_PASSWORD)
# Add all planetlab aggregates
for pl_agg in PL_AGGREGATE_INFO:
print "adding pl agg %s" % pl_agg["url"]
response = test_get_and_post_form(
client,
reverse("planetlab_aggregate_create"),
pl_agg,
)
print "got response %s" % response
assert response.status_code == 302
for of_agg in OF_AGGREGATE_INFO:
print "adding of agg %s" % of_agg["url"]
response = test_get_and_post_form(
client,
reverse("openflow_aggregate_create"),
of_agg,
del_params=["verify_certs"],
)
assert response.status_code == 302
for cnxn_tuple in OF_PL_CONNECTIONS:
print "adding cnxn %s" % (cnxn_tuple,)
NonOpenFlowConnection.objects.get_or_create(
of_iface=OpenFlowInterface.objects.get(
switch__datapath_id=cnxn_tuple[0],
port_num=cnxn_tuple[1],
),
resource=PlanetLabNode.objects.get(name=cnxn_tuple[2]),
)
client.logout()
for username, info in USER_INFO.items():
# create user
User.objects.create_user(
username=username, email=info["email"], password=info["password"])
client.login(username=username, password=info["password"])
# create project and slice
for project in info["projects"]:
response = test_get_and_post_form(
client, reverse("project_create"),
params=dict(
name=project["name"],
description=project["description"],
),
)
assert response.status_code == 302
# This code is missing the project id. Need to get somehow to use reverse.
# for slice in project["slices"]:
# response = test_get_and_post_form(
# client, reverse("slice_create"),
# params=dict(
# name=slice["name"],
# description=slice["description"],
# ),
# )
# assert response.status_code == 302
client.logout()
| ict-felix/stack | vt_manager_kvm/src/python/scripts/setup_ch.py | Python | apache-2.0 | 3,204 |
# Copyright (c) 2014, MapR Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sahara.plugins.mapr.versions.base_cluster_configurer as bcc
class ClusterConfigurer(bcc.BaseClusterConfigurer):
def get_hadoop_conf_dir(self):
return '/opt/mapr/hadoop/hadoop-2.4.1/etc/hadoop'
def is_node_awareness_enabled(self):
return False
| citrix-openstack-build/sahara | sahara/plugins/mapr/versions/v4_0_1_mrv2/cluster_configurer.py | Python | apache-2.0 | 875 |
"""
Support for displaying the current CPU speed.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.cpuspeed/
"""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['py-cpuinfo==3.2.0']
_LOGGER = logging.getLogger(__name__)
ATTR_BRAND = 'Brand'
ATTR_HZ = 'GHz Advertised'
ATTR_ARCH = 'arch'
DEFAULT_NAME = 'CPU speed'
ICON = 'mdi:pulse'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
# pylint: disable=unused-variable
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the CPU speed sensor."""
name = config.get(CONF_NAME)
add_devices([CpuSpeedSensor(name)])
class CpuSpeedSensor(Entity):
"""Representation of a CPU sensor."""
def __init__(self, name):
"""Initialize the sensor."""
self._name = name
self._state = None
self._unit_of_measurement = 'GHz'
self.update()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self.info is not None:
return {
ATTR_ARCH: self.info['arch'],
ATTR_BRAND: self.info['brand'],
ATTR_HZ: round(self.info['hz_advertised_raw'][0]/10**9, 2)
}
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
def update(self):
"""Get the latest data and updates the state."""
from cpuinfo import cpuinfo
self.info = cpuinfo.get_cpu_info()
self._state = round(float(self.info['hz_actual_raw'][0])/10**9, 2)
| JshWright/home-assistant | homeassistant/components/sensor/cpuspeed.py | Python | apache-2.0 | 2,256 |
#!/usr/bin/python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This fuzzer is an example of native extension fuzzing with coverage.
This requires that the fuzzer be built with coverage:
see build_install_ujson.sh.
The fuzzer should then be executed under ASAN.
As an example, this is the run command under the author's machine:
LD_PRELOAD="/usr/lib/llvm-9/lib/clang/9.0.1/lib/linux/libclang_rt.asan-x86_64.so
$(python3 -c "import atheris; print(atheris.path())")" python3
./ujson_fuzzer.py -detect_leaks=0
This fuzzer is provided mainly as an example for how to deal with native
coverage.
"""
import sys
import atheris
import ujson
def TestOneInput(input_bytes):
fdp = atheris.FuzzedDataProvider(input_bytes)
original = fdp.ConsumeUnicode(sys.maxsize)
try:
ujson_data = ujson.loads(original)
except ValueError:
return
# We make sure there's no error in encoding, but we don't actually compare
# (encoded == original) because it's not entirely preserving. For example,
# it does not preserve whitespace.
encoded = ujson.dumps(ujson_data)
del encoded
def main():
# Since everything interesting in this fuzzer is in native code, we can
# disable Python coverage to improve performance and reduce coverage noise.
atheris.Setup(sys.argv, TestOneInput, enable_python_coverage=False)
atheris.Fuzz()
if __name__ == "__main__":
main()
| skia-dev/oss-fuzz | projects/ujson/ujson_fuzzer.py | Python | apache-2.0 | 1,921 |
from unittest import TestCase
from pandas import Timestamp, Timedelta
from zipline.utils.cache import CachedObject, Expired, ExpiringCache
class CachedObjectTestCase(TestCase):
def test_cached_object(self):
expiry = Timestamp('2014')
before = expiry - Timedelta('1 minute')
after = expiry + Timedelta('1 minute')
obj = CachedObject(1, expiry)
self.assertEqual(obj.unwrap(before), 1)
self.assertEqual(obj.unwrap(expiry), 1) # Unwrap on expiry is allowed.
with self.assertRaises(Expired) as e:
obj.unwrap(after)
self.assertEqual(e.exception.args, (expiry,))
def test_expired(self):
always_expired = CachedObject.expired()
for dt in Timestamp.min, Timestamp.now(), Timestamp.max:
with self.assertRaises(Expired):
always_expired.unwrap(dt)
class ExpiringCacheTestCase(TestCase):
def test_expiring_cache(self):
expiry_1 = Timestamp('2014')
before_1 = expiry_1 - Timedelta('1 minute')
after_1 = expiry_1 + Timedelta('1 minute')
expiry_2 = Timestamp('2015')
after_2 = expiry_1 + Timedelta('1 minute')
expiry_3 = Timestamp('2016')
cache = ExpiringCache()
cache.set('foo', 1, expiry_1)
cache.set('bar', 2, expiry_2)
self.assertEqual(cache.get('foo', before_1), 1)
# Unwrap on expiry is allowed.
self.assertEqual(cache.get('foo', expiry_1), 1)
with self.assertRaises(KeyError) as e:
self.assertEqual(cache.get('foo', after_1))
self.assertEqual(e.exception.args, ('foo',))
# Should raise same KeyError after deletion.
with self.assertRaises(KeyError) as e:
self.assertEqual(cache.get('foo', before_1))
self.assertEqual(e.exception.args, ('foo',))
# Second value should still exist.
self.assertEqual(cache.get('bar', after_2), 2)
# Should raise similar KeyError on non-existent key.
with self.assertRaises(KeyError) as e:
self.assertEqual(cache.get('baz', expiry_3))
self.assertEqual(e.exception.args, ('baz',))
| humdings/zipline | tests/utils/test_cache.py | Python | apache-2.0 | 2,166 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import horizon
from horizon.dashboards.nova import dashboard
class InstancesAndVolumes(horizon.Panel):
name = "Instances & Volumes"
slug = 'instances_and_volumes'
dashboard.Nova.register(InstancesAndVolumes)
| andrewsmedina/horizon | horizon/horizon/dashboards/nova/instances_and_volumes/panel.py | Python | apache-2.0 | 870 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest.api.image import base
from tempest.common.utils import data_utils
from tempest import exceptions
from tempest.test import attr
class ImagesTagsNegativeTest(base.BaseV2ImageTest):
@attr(type=['negative', 'gate'])
def test_update_tags_for_non_existing_image(self):
# Update tag with non existing image.
tag = data_utils.rand_name('tag-')
non_exist_image = str(uuid.uuid4())
self.assertRaises(exceptions.NotFound, self.client.add_image_tag,
non_exist_image, tag)
@attr(type=['negative', 'gate'])
def test_delete_non_existing_tag(self):
# Delete non existing tag.
resp, body = self.create_image(container_format='bare',
disk_format='raw',
is_public=True,
)
image_id = body['id']
tag = data_utils.rand_name('non-exist-tag-')
self.addCleanup(self.client.delete_image, image_id)
self.assertRaises(exceptions.NotFound, self.client.delete_image_tag,
image_id, tag)
| eltonkevani/tempest_el_env | tempest/api/image/v2/test_images_tags_negative.py | Python | apache-2.0 | 1,762 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests add_loss API correctness."""
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.keras import Input
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import layers
from tensorflow.python.keras import losses
from tensorflow.python.keras import Model
from tensorflow.python.keras import optimizer_v2
from tensorflow.python.keras import Sequential
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.rmsprop import RMSPropOptimizer
MAE = losses.MeanAbsoluteError
mae = losses.mean_absolute_error
def get_ctl_train_step(model):
optimizer = optimizer_v2.gradient_descent.SGD(0.05)
def train_step(x, y, w=None):
with backprop.GradientTape() as tape:
if w is not None:
model([x, y, w])
else:
model([x, y])
loss = math_ops.reduce_sum(model.losses)
gradients = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
return loss
return train_step
# TODO(psv): Add tests cases where a model is used in loss function but is
# not part of the training model.
class TestAddLossCorrectness(keras_parameterized.TestCase):
def setUp(self):
super(TestAddLossCorrectness, self).setUp()
self.x = np.array([[0.], [1.], [2.]], dtype='float32')
self.y = np.array([[0.5], [2.], [3.5]], dtype='float32')
self.w = np.array([[1.25], [0.5], [1.25]], dtype='float32')
@keras_parameterized.run_all_keras_modes
def test_loss_on_model_fit(self):
inputs = Input(shape=(1,))
targets = Input(shape=(1,))
outputs = testing_utils.Bias()(inputs)
model = Model([inputs, targets], outputs)
model.add_loss(MAE()(targets, outputs))
model.add_loss(math_ops.reduce_mean(mae(targets, outputs)))
model.compile(
optimizer_v2.gradient_descent.SGD(0.05),
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit([self.x, self.y], batch_size=3, epochs=5)
self.assertAllClose(history.history['loss'], [2., 1.8, 1.6, 1.4, 1.2], 1e-3)
@keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_loss_callable_on_model_fit(self):
model = testing_utils.get_model_from_layers([testing_utils.Bias()],
input_shape=(1,))
def callable_loss():
return math_ops.reduce_sum(model.weights)
model.add_loss(callable_loss)
model.compile(
optimizer_v2.gradient_descent.SGD(0.1),
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(self.x, batch_size=3, epochs=5)
self.assertAllClose(history.history['loss'], [0., -.1, -.2, -.3, -.4], 1e-3)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_loss_on_model_ctl(self):
def get_model_and_train_step():
inputs = Input(shape=(1,))
targets = Input(shape=(1,))
outputs = testing_utils.Bias()(inputs)
model = Model([inputs, targets], outputs)
model.add_loss(MAE()(targets, outputs))
model.add_loss(math_ops.reduce_mean(mae(targets, outputs)))
return get_ctl_train_step(model)
train_step = get_model_and_train_step()
loss = [train_step(self.x, self.y) for _ in range(5)]
self.assertAllClose(loss, [2., 1.8, 1.6, 1.4, 1.2], 1e-3)
train_step = def_function.function(get_model_and_train_step())
loss = [train_step(self.x, self.y) for _ in range(5)]
self.assertAllClose(loss, [2., 1.8, 1.6, 1.4, 1.2], 1e-3)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_loss_callable_on_model_ctl(self):
def get_model_and_train_step():
inputs = Input(shape=(1,))
targets = Input(shape=(1,))
outputs = testing_utils.Bias()(inputs)
model = Model([inputs, targets], outputs)
def callable_loss():
return math_ops.reduce_sum(model.weights)
model.add_loss(callable_loss)
return get_ctl_train_step(model)
train_step = get_model_and_train_step()
loss = [train_step(self.x, self.y) for _ in range(5)]
self.assertAllClose(loss, [0., -0.05, -0.1, -0.15, -0.2], 1e-3)
train_step = def_function.function(get_model_and_train_step())
loss = [train_step(self.x, self.y) for _ in range(5)]
self.assertAllClose(loss, [0., -0.05, -0.1, -0.15, -0.2], 1e-3)
@keras_parameterized.run_all_keras_modes
def test_loss_with_sample_weight_on_model_fit(self):
inputs = Input(shape=(1,))
targets = Input(shape=(1,))
sw = Input(shape=(1,))
outputs = testing_utils.Bias()(inputs)
model = Model([inputs, targets, sw], outputs)
model.add_loss(MAE()(targets, outputs, sw))
model.add_loss(3 * math_ops.reduce_mean(sw * mae(targets, outputs)))
model.compile(
optimizer_v2.gradient_descent.SGD(0.025),
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit([self.x, self.y, self.w], batch_size=3, epochs=5)
self.assertAllClose(history.history['loss'], [4., 3.6, 3.2, 2.8, 2.4], 1e-3)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_loss_with_sample_weight_on_model_ctl(self):
def get_model_and_train_step():
inputs = Input(shape=(1,))
targets = Input(shape=(1,))
sw = Input(shape=(1,))
outputs = testing_utils.Bias()(inputs)
model = Model([inputs, targets, sw], outputs)
model.add_loss(MAE()(targets, outputs, sw))
model.add_loss(math_ops.reduce_mean(sw * mae(targets, outputs)))
return get_ctl_train_step(model)
train_step = get_model_and_train_step()
loss = [train_step(self.x, self.y, self.w) for _ in range(5)]
self.assertAllClose(loss, [2., 1.8, 1.6, 1.4, 1.2], 1e-3)
train_step = def_function.function(get_model_and_train_step())
loss = [train_step(self.x, self.y, self.w) for _ in range(5)]
self.assertAllClose(loss, [2., 1.8, 1.6, 1.4, 1.2], 1e-3)
@keras_parameterized.run_all_keras_modes
def test_loss_with_sample_weight_in_model_call(self):
class MyModel(Model):
def __init__(self):
super(MyModel, self).__init__()
self.bias = testing_utils.Bias()
def call(self, inputs):
outputs = self.bias(inputs[0])
self.add_loss(MAE()(inputs[1], outputs, inputs[2]))
self.add_loss(math_ops.reduce_mean(inputs[2] * mae(inputs[1], outputs)))
return outputs
model = MyModel()
model.predict([self.x, self.y, self.w])
model.compile(
optimizer_v2.gradient_descent.SGD(0.05),
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit([self.x, self.y, self.w], batch_size=3, epochs=5)
self.assertEqual(len(model.losses), 2)
self.assertAllClose(history.history['loss'], [2., 1.8, 1.6, 1.4, 1.2], 1e-3)
eval_out = model.evaluate([self.x, self.y, self.w])
self.assertAlmostEqual(eval_out, 1.0, 3)
@keras_parameterized.run_all_keras_modes
def test_loss_with_sample_weight_in_layer_call(self):
class MyLayer(layers.Layer):
def __init__(self):
super(MyLayer, self).__init__()
self.bias = testing_utils.Bias()
def call(self, inputs):
out = self.bias(inputs[0])
self.add_loss(MAE()(inputs[1], out, inputs[2]))
self.add_loss(math_ops.reduce_mean(inputs[2] * mae(inputs[1], out)))
return out
inputs = Input(shape=(1,))
targets = Input(shape=(1,))
sw = Input(shape=(1,))
outputs = MyLayer()([inputs, targets, sw])
model = Model([inputs, targets, sw], outputs)
model.predict([self.x, self.y, self.w])
model.compile(
optimizer_v2.gradient_descent.SGD(0.05),
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit([self.x, self.y, self.w], batch_size=3, epochs=5)
self.assertAllClose(history.history['loss'], [2., 1.8, 1.6, 1.4, 1.2], 1e-3)
output = model.evaluate([self.x, self.y, self.w])
self.assertAlmostEqual(output, 1.0, 3)
output = model.test_on_batch([self.x, self.y, self.w])
self.assertAlmostEqual(output, 1.0, 3)
@keras_parameterized.run_all_keras_modes
def test_loss_on_layer(self):
class MyLayer(layers.Layer):
def call(self, inputs):
self.add_loss(math_ops.reduce_sum(inputs))
return inputs
inputs = Input((3,))
layer = MyLayer()
outputs = layer(inputs)
model = Model(inputs, outputs)
self.assertEqual(len(model.losses), 1)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(loss, 2 * 3)
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def test_activity_regularizer(self):
loss = {}
for reg in [None, 'l2']:
model_layers = [
layers.Dense(
10,
activation='relu',
activity_regularizer=reg,
kernel_initializer='ones',
use_bias=False),
layers.Dense(
1,
activation='sigmoid',
kernel_initializer='ones',
use_bias=False),
]
model = testing_utils.get_model_from_layers(
model_layers, input_shape=(10,))
x = np.ones((10, 10), 'float32')
y = np.zeros((10, 1), 'float32')
optimizer = RMSPropOptimizer(learning_rate=0.001)
model.compile(
optimizer,
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, batch_size=2, epochs=5)
loss[reg] = model.evaluate(x, y)
self.assertLess(loss[None], loss['l2'])
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def test_activity_regularizer_loss_value(self):
layer = layers.Dense(
1,
kernel_initializer='zeros',
bias_initializer='ones',
activity_regularizer='l2')
model = testing_utils.get_model_from_layers([layer], input_shape=(10,))
x = np.ones((10, 10), 'float32')
optimizer = RMSPropOptimizer(learning_rate=0.001)
model.compile(
optimizer,
run_eagerly=testing_utils.should_run_eagerly())
loss = model.test_on_batch(x)
self.assertAlmostEqual(0.01, loss, places=4)
@keras_parameterized.run_all_keras_modes
def test_activity_regularizer_batch_independent(self):
inputs = layers.Input(shape=(10,))
x = layers.Dense(10, activation='relu', activity_regularizer='l2')(inputs)
outputs = layers.Dense(1, activation='sigmoid')(x)
model = Model(inputs, outputs)
optimizer = RMSPropOptimizer(learning_rate=0.001)
model.compile(
optimizer,
run_eagerly=testing_utils.should_run_eagerly())
loss_small_batch = model.test_on_batch(np.ones((10, 10), 'float32'))
loss_big_batch = model.test_on_batch(np.ones((20, 10), 'float32'))
self.assertAlmostEqual(loss_small_batch, loss_big_batch, places=4)
@keras_parameterized.run_all_keras_modes
def test_with_shared_layer(self):
class LayerWithLoss(layers.Layer):
def call(self, inputs):
self.add_loss(math_ops.reduce_sum(inputs), inputs=inputs)
return inputs * 2
shared_layer = LayerWithLoss()
m = Sequential([shared_layer])
m2 = Sequential([shared_layer, m])
m2(array_ops.constant([1, 2, 3]))
self.assertEqual(len(m2.losses), 2)
self.assertAllClose(m2.losses, [6, 12])
@keras_parameterized.run_all_keras_modes
def test_with_shared_nested_layer(self):
class LayerWithLoss(layers.Layer):
def call(self, inputs):
self.add_loss(math_ops.reduce_sum(inputs), inputs=inputs)
return inputs * 2
class LayerWithNestedLayerWithLoss(layers.Layer):
def __init__(self):
super(LayerWithNestedLayerWithLoss, self).__init__()
self.loss_layer = LayerWithLoss()
def call(self, inputs):
return self.loss_layer(inputs)
shared_layer = LayerWithNestedLayerWithLoss()
m = Sequential([shared_layer])
m2 = Sequential([shared_layer, m])
m2(array_ops.constant([1, 2, 3]))
self.assertEqual(len(m2.losses), 2)
self.assertAllClose(m2.losses, [6, 12])
@keras_parameterized.run_all_keras_modes
def test_clear_losses(self):
class LayerWithSharedNestedLossLayer(layers.Layer):
def __init__(self):
super(LayerWithSharedNestedLossLayer, self).__init__()
self.loss_layer = layers.ActivityRegularization(l2=0.001)
self.add_weight(shape=(1,), regularizer='l2')
def call(self, x):
x = self.loss_layer(x)
return self.loss_layer(x)
inputs = Input(shape=(1,))
l = LayerWithSharedNestedLossLayer() # Weight loss + 2 activity losses.
x1 = array_ops.ones((1, 1))
_ = l(x1)
if not context.executing_eagerly():
self.assertEqual(len(l.get_losses_for(x1)), 2)
self.assertEqual(len(l.get_losses_for(None)), 1)
x2 = array_ops.ones((1, 1))
_ = l(x2)
if not context.executing_eagerly():
self.assertEqual(len(l.get_losses_for(x1)), 2)
self.assertEqual(len(l.get_losses_for(x2)), 2)
self.assertEqual(len(l.get_losses_for(None)), 1)
outputs = l(inputs)
model = Model(inputs, outputs)
if not context.executing_eagerly():
self.assertEqual(len(model.losses), 7)
self.assertEqual(len(l.get_losses_for(x1)), 2)
self.assertEqual(len(l.get_losses_for(x2)), 2)
self.assertEqual(len(l.get_losses_for(None)), 1)
x3 = array_ops.ones((1, 1))
model(x3)
x4 = array_ops.ones((1, 1))
model(x4)
if context.executing_eagerly():
# Eager losses are cleared every `__call__`.
self.assertEqual(len(model.losses), 3)
else:
self.assertEqual(len(model.losses), 11)
self.assertEqual(len(model.get_losses_for(x3)), 2)
self.assertEqual(len(model.get_losses_for(x4)), 2)
self.assertEqual(len(model.get_losses_for(None)), 1)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_invalid_constant_input(self):
inputs = Input(shape=(1,))
outputs = testing_utils.Bias()(inputs)
model = Model(inputs, outputs)
with self.assertRaisesRegex(
ValueError,
'Expected a symbolic Tensors or a callable for the loss value'):
model.add_loss(1.)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_invalid_variable_input(self):
inputs = Input(shape=(1,))
outputs = testing_utils.Bias()(inputs)
model = Model(inputs, outputs)
with self.assertRaisesRegex(
ValueError,
'Expected a symbolic Tensors or a callable for the loss value'):
model.add_loss(model.weights[0])
@keras_parameterized.run_all_keras_modes
def test_add_entropy_loss_on_functional_model(self):
inputs = Input(shape=(1,))
targets = Input(shape=(1,))
outputs = testing_utils.Bias()(inputs)
model = Model([inputs, targets], outputs)
model.add_loss(losses.binary_crossentropy(targets, outputs))
model.compile('sgd', run_eagerly=testing_utils.should_run_eagerly())
with test.mock.patch.object(logging, 'warning') as mock_log:
model.fit([self.x, self.y], batch_size=3, epochs=5)
self.assertNotIn('Gradients do not exist for variables',
str(mock_log.call_args))
if __name__ == '__main__':
test.main()
| sarvex/tensorflow | tensorflow/python/keras/tests/add_loss_correctness_test.py | Python | apache-2.0 | 16,393 |
"""
HSA driver bridge implementation
"""
from collections.abc import Sequence
import sys
import atexit
import os
import ctypes
import struct
import traceback
import weakref
import logging
from contextlib import contextmanager
from collections import defaultdict, deque
from functools import total_ordering
from numba import mviewbuf
from numba.core import utils, config
from .error import HsaSupportError, HsaDriverError, HsaApiError
from numba.roc.hsadrv import enums, enums_ext, drvapi
import numpy as np
_logger = logging.getLogger(__name__)
class HsaKernelTimedOut(HsaDriverError):
pass
def _device_type_to_string(device):
try:
return ['CPU', 'GPU', 'DSP'][device]
except IndexError:
return 'Unknown'
DEFAULT_HSA_DRIVER = '/opt/rocm/lib/libhsa-runtime64.so'
def _find_driver():
envpath = os.environ.get('NUMBA_HSA_DRIVER', DEFAULT_HSA_DRIVER)
if envpath == '0':
# Force fail
_raise_driver_not_found()
# Determine DLL type
if (struct.calcsize('P') != 8
or sys.platform == 'win32'
or sys.platform == 'darwin'):
_raise_platform_not_supported()
else:
# Assume to be *nix like and 64 bit
dlloader = ctypes.CDLL
dldir = ['/usr/lib', '/usr/lib64']
dlname = 'libhsa-runtime64.so'
if envpath is not None:
try:
envpath = os.path.abspath(envpath)
except ValueError:
raise HsaSupportError("NUMBA_HSA_DRIVER %s is not a valid path" %
envpath)
if not os.path.isfile(envpath):
raise HsaSupportError("NUMBA_HSA_DRIVER %s is not a valid file "
"path. Note it must be a filepath of the .so/"
".dll/.dylib or the driver" % envpath)
candidates = [envpath]
else:
# First search for the name in the default library path.
# If that is not found, try the specific path.
candidates = [dlname] + [os.path.join(x, dlname) for x in dldir]
# Load the driver; Collect driver error information
path_not_exist = []
driver_load_error = []
for path in candidates:
try:
dll = dlloader(path)
except OSError as e:
# Problem opening the DLL
path_not_exist.append(not os.path.isfile(path))
driver_load_error.append(e)
else:
return dll
# Problem loading driver
if all(path_not_exist):
_raise_driver_not_found()
else:
errmsg = '\n'.join(str(e) for e in driver_load_error)
_raise_driver_error(errmsg)
PLATFORM_NOT_SUPPORTED_ERROR = """
HSA is not currently supported on this platform ({0}).
"""
def _raise_platform_not_supported():
raise HsaSupportError(PLATFORM_NOT_SUPPORTED_ERROR.format(sys.platform))
DRIVER_NOT_FOUND_MSG = """
The HSA runtime library cannot be found.
If you are sure that the HSA is installed, try setting environment
variable NUMBA_HSA_DRIVER with the file path of the HSA runtime shared
library.
"""
def _raise_driver_not_found():
raise HsaSupportError(DRIVER_NOT_FOUND_MSG)
DRIVER_LOAD_ERROR_MSG = """
A HSA runtime library was found, but failed to load with error:
%s
"""
def _raise_driver_error(e):
raise HsaSupportError(DRIVER_LOAD_ERROR_MSG % e)
MISSING_FUNCTION_ERRMSG = """driver missing function: %s.
"""
class Recycler(object):
def __init__(self):
self._garbage = []
self.enabled = True
def free(self, obj):
self._garbage.append(obj)
self.service()
def _cleanup(self):
for obj in self._garbage:
obj._finalizer(obj)
del self._garbage[:]
def service(self):
if self.enabled:
if len(self._garbage) > 10:
self._cleanup()
def drain(self):
self._cleanup()
self.enabled = False
# The Driver ###########################################################
class Driver(object):
"""
Driver API functions are lazily bound.
"""
_singleton = None
_agent_map = None
_api_prototypes = drvapi.API_PROTOTYPES # avoid premature GC at exit
_hsa_properties = {
'version_major': (enums.HSA_SYSTEM_INFO_VERSION_MAJOR, ctypes.c_uint16),
'version_minor': (enums.HSA_SYSTEM_INFO_VERSION_MINOR, ctypes.c_uint16),
'timestamp': (enums.HSA_SYSTEM_INFO_TIMESTAMP, ctypes.c_uint64),
'timestamp_frequency': (enums.HSA_SYSTEM_INFO_TIMESTAMP_FREQUENCY, ctypes.c_uint16),
'signal_max_wait': (enums.HSA_SYSTEM_INFO_SIGNAL_MAX_WAIT, ctypes.c_uint64),
}
def __new__(cls):
obj = cls._singleton
if obj is not None:
return obj
else:
obj = object.__new__(cls)
cls._singleton = obj
return obj
def __init__(self):
try:
if config.DISABLE_HSA:
raise HsaSupportError("HSA disabled by user")
self.lib = _find_driver()
self.is_initialized = False
self.initialization_error = None
except HsaSupportError as e:
self.is_initialized = True
self.initialization_error = e
self._agent_map = None
self._programs = {}
self._recycler = Recycler()
self._active_streams = weakref.WeakSet()
def _initialize_api(self):
if self.is_initialized:
return
self.is_initialized = True
try:
self.hsa_init()
except HsaApiError as e:
self.initialization_error = e
raise HsaDriverError("Error at driver init: \n%s:" % e)
else:
@atexit.register
def shutdown():
try:
for agent in self.agents:
agent.release()
except AttributeError:
# this is because no agents initialised
# so self.agents isn't present
pass
else:
self._recycler.drain()
def _initialize_agents(self):
if self._agent_map is not None:
return
self._initialize_api()
agent_ids = []
def on_agent(agent_id, ctxt):
agent_ids.append(agent_id)
return enums.HSA_STATUS_SUCCESS
callback = drvapi.HSA_ITER_AGENT_CALLBACK_FUNC(on_agent)
self.hsa_iterate_agents(callback, None)
agent_map = dict((agent_id, Agent(agent_id)) for agent_id in agent_ids)
self._agent_map = agent_map
@property
def is_available(self):
self._initialize_api()
return self.initialization_error is None
@property
def agents(self):
self._initialize_agents()
return self._agent_map.values()
def create_program(self, model=enums.HSA_MACHINE_MODEL_LARGE,
profile=enums.HSA_PROFILE_FULL,
rounding_mode=enums.HSA_DEFAULT_FLOAT_ROUNDING_MODE_DEFAULT,
options=None):
program = drvapi.hsa_ext_program_t()
assert options is None
self.hsa_ext_program_create(model, profile, rounding_mode,
options, ctypes.byref(program))
return Program(program)
def create_signal(self, initial_value, consumers=None):
if consumers is None:
consumers = tuple(self.agents)
consumers_len = len(consumers)
consumers_type = drvapi.hsa_agent_t * consumers_len
consumers = consumers_type(*[c._id for c in consumers])
result = drvapi.hsa_signal_t()
self.hsa_signal_create(initial_value, consumers_len, consumers,
ctypes.byref(result))
return Signal(result.value)
def __getattr__(self, fname):
# Initialize driver
self._initialize_api()
# First try if it is an hsa property
try:
enum, typ = self._hsa_properties[fname]
result = typ()
self.hsa_system_get_info(enum, ctypes.byref(result))
return result.value
except KeyError:
pass
# if not a property... try if it is an api call
try:
proto = self._api_prototypes[fname]
except KeyError:
raise AttributeError(fname)
if self.initialization_error is not None:
raise HsaSupportError("Error at driver init: \n%s:" %
self.initialization_error)
# Find function in driver library
libfn = self._find_api(fname)
for key, val in proto.items():
setattr(libfn, key, val)
def driver_wrapper(fn):
def wrapped(*args, **kwargs):
_logger.debug('call driver api: %s', fname)
return fn(*args, **kwargs)
return wrapped
retval = driver_wrapper(libfn)
setattr(self, fname, retval)
return retval
def _find_api(self, fname):
# Try regular
try:
return getattr(self.lib, fname)
except AttributeError:
pass
# Not found.
# Delay missing function error to use
def absent_function(*args, **kws):
raise HsaDriverError(MISSING_FUNCTION_ERRMSG % fname)
setattr(self, fname, absent_function)
return absent_function
@property
def components(self):
"""Returns a ordered list of components
The first device should be picked first
"""
return list(filter(lambda a: a.is_component, reversed(sorted(
self.agents))))
def create_stream(self):
st = Stream()
self._active_streams.add(st)
return st
def implicit_sync(self):
"""
Implicit synchronization for all asynchronous streams
across all devices.
"""
_logger.info("implicit sync")
for st in self._active_streams:
st.synchronize()
hsa = Driver()
class HsaWrapper(object):
def __getattr__(self, fname):
try:
enum, typ = self._hsa_properties[fname]
except KeyError:
raise AttributeError(
"%r object has no attribute %r" % (self.__class__, fname))
func = getattr(hsa, self._hsa_info_function)
result = typ()
is_array_type = hasattr(typ, '_length_')
# if the result is not ctypes array, get a reference)
result_buff = result if is_array_type else ctypes.byref(result)
func(self._id, enum, result_buff)
if not is_array_type or typ._type_ == ctypes.c_char:
return result.value
else:
return list(result)
def __dir__(self):
return sorted(set(dir(type(self)) +
self.__dict__.keys() +
self._hsa_properties.keys()))
@total_ordering
class Agent(HsaWrapper):
"""Abstracts a HSA compute agent.
This will wrap and provide an OO interface for hsa_agent_t C-API elements
"""
# Note this will be handled in a rather unconventional way. When agents get
# initialized by the driver, a set of instances for all the available agents
# will be created. After that creation, the __new__ and __init__ methods will
# be replaced, and the constructor will act as a mapping from an agent_id to
# the equivalent Agent object. Any attempt to create an Agent with a non
# existing agent_id will result in an error.
#
# the logic for this resides in Driver._initialize_agents
_hsa_info_function = 'hsa_agent_get_info'
_hsa_properties = {
'name': (enums.HSA_AGENT_INFO_NAME, ctypes.c_char * 64),
'vendor_name': (enums.HSA_AGENT_INFO_VENDOR_NAME, ctypes.c_char * 64),
'feature': (enums.HSA_AGENT_INFO_FEATURE, drvapi.hsa_agent_feature_t),
'wavefront_size': (
enums.HSA_AGENT_INFO_WAVEFRONT_SIZE, ctypes.c_uint32),
'workgroup_max_dim': (
enums.HSA_AGENT_INFO_WORKGROUP_MAX_DIM, ctypes.c_uint16 * 3),
'grid_max_dim': (enums.HSA_AGENT_INFO_GRID_MAX_DIM, drvapi.hsa_dim3_t),
'grid_max_size': (enums.HSA_AGENT_INFO_GRID_MAX_SIZE, ctypes.c_uint32),
'fbarrier_max_size': (
enums.HSA_AGENT_INFO_FBARRIER_MAX_SIZE, ctypes.c_uint32),
'queues_max': (enums.HSA_AGENT_INFO_QUEUES_MAX, ctypes.c_uint32),
'queue_max_size': (
enums.HSA_AGENT_INFO_QUEUE_MAX_SIZE, ctypes.c_uint32),
'queue_type': (
enums.HSA_AGENT_INFO_QUEUE_TYPE, drvapi.hsa_queue_type_t),
'node': (enums.HSA_AGENT_INFO_NODE, ctypes.c_uint32),
'_device': (enums.HSA_AGENT_INFO_DEVICE, drvapi.hsa_device_type_t),
'cache_size': (enums.HSA_AGENT_INFO_CACHE_SIZE, ctypes.c_uint32 * 4),
'isa': (enums.HSA_AGENT_INFO_ISA, drvapi.hsa_isa_t),
}
def __init__(self, agent_id):
# This init will only happen when initializing the agents. After
# the agent initialization the instances of this class are considered
# initialized and locked, so this method will be removed.
self._id = agent_id
self._recycler = hsa._recycler
self._queues = set()
self._initialize_regions()
self._initialize_mempools()
@property
def device(self):
return _device_type_to_string(self._device)
@property
def is_component(self):
return (self.feature & enums.HSA_AGENT_FEATURE_KERNEL_DISPATCH) != 0
@property
def regions(self):
return self._regions
@property
def mempools(self):
return self._mempools
@property
def wavebits(self):
"""
log2(wavefront_size)
"""
# assume wavefront_size will always be a power of 2
return bin(self.wavefront_size)[::-1].index('1')
def _initialize_regions(self):
region_ids = []
def on_region(region_id, ctxt):
region_ids.append(region_id)
return enums.HSA_STATUS_SUCCESS
callback = drvapi.HSA_AGENT_ITERATE_REGIONS_CALLBACK_FUNC(on_region)
hsa.hsa_agent_iterate_regions(self._id, callback, None)
self._regions = _RegionList([MemRegion.instance_for(self, region_id)
for region_id in region_ids])
def _initialize_mempools(self):
mempool_ids = []
def on_region(_id, ctxt=None):
mempool_ids.append(_id)
return enums.HSA_STATUS_SUCCESS
callback = drvapi.HSA_AMD_AGENT_ITERATE_MEMORY_POOLS_CALLBACK(on_region)
hsa.hsa_amd_agent_iterate_memory_pools(self._id, callback, None)
self._mempools = _RegionList([MemPool.instance_for(self, mempool_id)
for mempool_id in mempool_ids])
def _create_queue(self, size, callback=None, data=None,
private_segment_size=None, group_segment_size=None,
queue_type=None):
assert queue_type is not None
assert size <= self.queue_max_size
cb_typ = drvapi.HSA_QUEUE_CALLBACK_FUNC
cb = ctypes.cast(None, cb_typ) if callback is None else cb_typ(callback)
result = ctypes.POINTER(drvapi.hsa_queue_t)()
private_segment_size = (ctypes.c_uint32(-1)
if private_segment_size is None
else private_segment_size)
group_segment_size = (ctypes.c_uint32(-1)
if group_segment_size is None
else group_segment_size)
hsa.hsa_queue_create(self._id, size, queue_type, cb, data,
private_segment_size, group_segment_size,
ctypes.byref(result))
q = Queue(self, result)
self._queues.add(q)
return weakref.proxy(q)
def create_queue_single(self, *args, **kwargs):
kwargs['queue_type'] = enums.HSA_QUEUE_TYPE_SINGLE
return self._create_queue(*args, **kwargs)
def create_queue_multi(self, *args, **kwargs):
kwargs['queue_type'] = enums.HSA_QUEUE_TYPE_MULTI
return self._create_queue(*args, **kwargs)
def release(self):
"""
Release all resources
Called at system teardown
"""
for q in list(self._queues):
q.release()
def release_queue(self, queue):
self._queues.remove(queue)
self._recycler.free(queue)
def __repr__(self):
return "<HSA agent ({0}): {1} {2} '{3}'{4}>".format(self._id,
self.device,
self.vendor_name,
self.name,
" (component)" if self.is_component else "")
def _rank(self):
return (self.is_component, self.grid_max_size, self._device)
def __lt__(self, other):
if isinstance(self, Agent):
return self._rank() < other._rank()
else:
return NotImplemented
def __eq__(self, other):
if isinstance(self, Agent):
return self._rank() == other._rank()
else:
return NotImplemented
def __hash__(self):
return hash(self._rank())
def create_context(self):
return Context(self)
class _RegionList(Sequence):
__slots__ = '_all', 'globals', 'readonlys', 'privates', 'groups'
def __init__(self, lst):
self._all = tuple(lst)
self.globals = tuple(x for x in lst if x.kind == 'global')
self.readonlys = tuple(x for x in lst if x.kind == 'readonly')
self.privates = tuple(x for x in lst if x.kind == 'private')
self.groups = tuple(x for x in lst if x.kind == 'group')
def __len__(self):
return len(self._all)
def __contains__(self, item):
return item in self._all
def __reversed__(self):
return reversed(self._all)
def __getitem__(self, idx):
return self._all[idx]
class MemPool(HsaWrapper):
"""Abstracts a HSA mem pool.
This will wrap and provide an OO interface for hsa_amd_memory_pool_t
C-API elements
"""
_hsa_info_function = 'hsa_amd_memory_pool_get_info'
_hsa_properties = {
'segment': (
enums_ext.HSA_AMD_MEMORY_POOL_INFO_SEGMENT,
drvapi.hsa_amd_segment_t
),
'_flags': (
enums_ext.HSA_AMD_MEMORY_POOL_INFO_GLOBAL_FLAGS,
ctypes.c_uint32
),
'size': (enums_ext.HSA_AMD_MEMORY_POOL_INFO_SIZE,
ctypes.c_size_t),
'alloc_allowed': (enums_ext.HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_ALLOWED,
ctypes.c_bool),
'alloc_granule': (enums_ext.HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_GRANULE,
ctypes.c_size_t),
'alloc_alignment': (enums_ext.HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_ALIGNMENT,
ctypes.c_size_t),
'accessible_by_all': (enums_ext.HSA_AMD_MEMORY_POOL_INFO_ACCESSIBLE_BY_ALL,
ctypes.c_bool),
}
_segment_name_map = {
enums_ext.HSA_AMD_SEGMENT_GLOBAL: 'global',
enums_ext.HSA_AMD_SEGMENT_READONLY: 'readonly',
enums_ext.HSA_AMD_SEGMENT_PRIVATE: 'private',
enums_ext.HSA_AMD_SEGMENT_GROUP: 'group',
}
def __init__(self, agent, pool):
"""Do not instantiate MemPool objects directly, use the factory class
method 'instance_for' to ensure MemPool identity"""
self._id = pool
self._owner_agent = agent
self._as_parameter_ = self._id
@property
def kind(self):
return self._segment_name_map[self.segment]
@property
def agent(self):
return self._owner_agent
def supports(self, check_flag):
"""
Determines if a given feature is supported by this MemRegion.
Feature flags are found in "./enums_exp.py" under:
* hsa_amd_memory_pool_global_flag_t
Params:
check_flag: Feature flag to test
"""
if self.kind == 'global':
return self._flags & check_flag
else:
return False
def allocate(self, nbytes):
assert self.alloc_allowed
assert nbytes >= 0
buff = ctypes.c_void_p()
flags = ctypes.c_uint32(0) # From API docs "Must be 0"!
hsa.hsa_amd_memory_pool_allocate(self._id, nbytes, flags, ctypes.byref(buff))
if buff.value is None:
raise HsaDriverError("Failed to allocate from {}".format(self))
return buff
_instance_dict = {}
@classmethod
def instance_for(cls, owner, _id):
try:
return cls._instance_dict[_id]
except KeyError:
new_instance = cls(owner, _id)
cls._instance_dict[_id] = new_instance
return new_instance
class MemRegion(HsaWrapper):
"""Abstracts a HSA memory region.
This will wrap and provide an OO interface for hsa_region_t C-API elements
"""
_hsa_info_function = 'hsa_region_get_info'
_hsa_properties = {
'segment': (
enums.HSA_REGION_INFO_SEGMENT,
drvapi.hsa_region_segment_t
),
'_flags': (
enums.HSA_REGION_INFO_GLOBAL_FLAGS,
drvapi.hsa_region_global_flag_t
),
'host_accessible': (enums_ext.HSA_AMD_REGION_INFO_HOST_ACCESSIBLE,
ctypes.c_bool),
'size': (enums.HSA_REGION_INFO_SIZE,
ctypes.c_size_t),
'alloc_max_size': (enums.HSA_REGION_INFO_ALLOC_MAX_SIZE,
ctypes.c_size_t),
'alloc_alignment': (enums.HSA_REGION_INFO_RUNTIME_ALLOC_ALIGNMENT,
ctypes.c_size_t),
'alloc_granule': (enums.HSA_REGION_INFO_RUNTIME_ALLOC_GRANULE,
ctypes.c_size_t),
'alloc_allowed': (enums.HSA_REGION_INFO_RUNTIME_ALLOC_ALLOWED,
ctypes.c_bool),
}
_segment_name_map = {
enums.HSA_REGION_SEGMENT_GLOBAL: 'global',
enums.HSA_REGION_SEGMENT_READONLY: 'readonly',
enums.HSA_REGION_SEGMENT_PRIVATE: 'private',
enums.HSA_REGION_SEGMENT_GROUP: 'group',
}
def __init__(self, agent, region_id):
"""Do not instantiate MemRegion objects directly, use the factory class
method 'instance_for' to ensure MemRegion identity"""
self._id = region_id
self._owner_agent = agent
self._as_parameter_ = self._id
@property
def kind(self):
return self._segment_name_map[self.segment]
@property
def agent(self):
return self._owner_agent
def supports(self, check_flag):
"""
Determines if a given feature is supported by this MemRegion.
Feature flags are found in "./enums.py" under:
* hsa_region_global_flag_t
Params:
check_flag: Feature flag to test
"""
if self.kind == 'global':
return self._flags & check_flag
else:
return False
def allocate(self, nbytes):
assert self.alloc_allowed
assert nbytes <= self.alloc_max_size
assert nbytes >= 0
buff = ctypes.c_void_p()
hsa.hsa_memory_allocate(self._id, nbytes, ctypes.byref(buff))
return buff
def free(self, ptr):
hsa.hsa_memory_free(ptr)
_instance_dict = {}
@classmethod
def instance_for(cls, owner, _id):
try:
return cls._instance_dict[_id]
except KeyError:
new_instance = cls(owner, _id)
cls._instance_dict[_id] = new_instance
return new_instance
class Queue(object):
def __init__(self, agent, queue_ptr):
"""The id in a queue is a pointer to the queue object returned by hsa_queue_create.
The Queue object has ownership on that queue object"""
self._agent = weakref.proxy(agent)
self._id = queue_ptr
self._as_parameter_ = self._id
self._finalizer = hsa.hsa_queue_destroy
def release(self):
self._agent.release_queue(self)
def __getattr__(self, fname):
return getattr(self._id.contents, fname)
@contextmanager
def _get_packet(self, packet_type):
# Write AQL packet at the calculated queue index address
queue_struct = self._id.contents
queue_mask = queue_struct.size - 1
assert (ctypes.sizeof(packet_type) ==
ctypes.sizeof(drvapi.hsa_kernel_dispatch_packet_t))
packet_array_t = (packet_type * queue_struct.size)
# Obtain the current queue write index
index = hsa.hsa_queue_add_write_index_acq_rel(self._id, 1)
while True:
read_offset = hsa.hsa_queue_load_read_index_acquire(self._id)
if read_offset <= index < read_offset + queue_struct.size:
break
queue_offset = index & queue_mask
queue = packet_array_t.from_address(queue_struct.base_address)
packet = queue[queue_offset]
# zero init
ctypes.memset(ctypes.addressof(packet), 0, ctypes.sizeof(packet_type))
yield packet
# Increment write index
# Ring the doorbell
hsa.hsa_signal_store_release(self._id.contents.doorbell_signal, index)
def insert_barrier(self, dep_signal):
with self._get_packet(drvapi.hsa_barrier_and_packet_t) as packet:
# Populate packet
packet.dep_signal0 = dep_signal._id
header = 0
header |= enums.HSA_FENCE_SCOPE_SYSTEM << enums.HSA_PACKET_HEADER_ACQUIRE_FENCE_SCOPE
header |= enums.HSA_FENCE_SCOPE_SYSTEM << enums.HSA_PACKET_HEADER_RELEASE_FENCE_SCOPE
header |= enums.HSA_PACKET_TYPE_BARRIER_AND << enums.HSA_PACKET_HEADER_TYPE
header |= 1 << enums.HSA_PACKET_HEADER_BARRIER
# Original example calls for an atomic store.
# Since we are on x86, store of aligned 16 bit is atomic.
# The C code is
# __atomic_store_n((uint16_t*)(&dispatch_packet->header), header, __ATOMIC_RELEASE);
packet.header = header
def dispatch(self, symbol, kernargs,
workgroup_size=None,
grid_size=None,
signal=None):
_logger.info("dispatch %s", symbol.name)
dims = len(workgroup_size)
assert dims == len(grid_size)
assert 0 < dims <= 3
assert grid_size >= workgroup_size
if workgroup_size > tuple(self._agent.workgroup_max_dim)[:dims]:
msg = "workgroupsize is too big {0} > {1}"
raise HsaDriverError(msg.format(workgroup_size,
tuple(self._agent.workgroup_max_dim)[:dims]))
s = signal if signal is not None else hsa.create_signal(1)
# Note: following vector_copy.c
with self._get_packet(drvapi.hsa_kernel_dispatch_packet_t) as packet:
# Populate packet
packet.setup |= dims << enums.HSA_KERNEL_DISPATCH_PACKET_SETUP_DIMENSIONS
packet.workgroup_size_x = workgroup_size[0]
packet.workgroup_size_y = workgroup_size[1] if dims > 1 else 1
packet.workgroup_size_z = workgroup_size[2] if dims > 2 else 1
packet.grid_size_x = grid_size[0]
packet.grid_size_y = grid_size[1] if dims > 1 else 1
packet.grid_size_z = grid_size[2] if dims > 2 else 1
packet.completion_signal = s._id
packet.kernel_object = symbol.kernel_object
packet.kernarg_address = (0 if kernargs is None
else kernargs.value)
packet.private_segment_size = symbol.private_segment_size
packet.group_segment_size = symbol.group_segment_size
header = 0
header |= enums.HSA_FENCE_SCOPE_SYSTEM << enums.HSA_PACKET_HEADER_ACQUIRE_FENCE_SCOPE
header |= enums.HSA_FENCE_SCOPE_SYSTEM << enums.HSA_PACKET_HEADER_RELEASE_FENCE_SCOPE
header |= enums.HSA_PACKET_TYPE_KERNEL_DISPATCH << enums.HSA_PACKET_HEADER_TYPE
# Original example calls for an atomic store.
# Since we are on x86, store of aligned 16 bit is atomic.
# The C code is
# __atomic_store_n((uint16_t*)(&dispatch_packet->header), header, __ATOMIC_RELEASE);
packet.header = header
# Wait on the dispatch completion signal
# synchronous if no signal was provided
if signal is None:
_logger.info('wait for synchronous kernel to complete')
timeout = 10
if not s.wait_until_ne_one(timeout=timeout):
msg = "Kernel timed out after {timeout} second"
raise HsaKernelTimedOut(msg.format(timeout=timeout))
def __dir__(self):
return sorted(set(dir(self._id.contents) +
self.__dict__.keys()))
def owned(self):
return ManagedQueueProxy(self)
class ManagedQueueProxy(object):
def __init__(self, queue):
self._queue = weakref.ref(queue)
def __getattr__(self, item):
return getattr(self._queue(), item)
class Signal(object):
"""The id for the signal is going to be the hsa_signal_t returned by create_signal.
Lifetime of the underlying signal will be tied with this object".
Note that it is likely signals will have lifetime issues."""
def __init__(self, signal_id):
self._id = signal_id
self._as_parameter_ = self._id
weakref.finalize(self, hsa.hsa_signal_destroy, self._id)
def load_relaxed(self):
return hsa.hsa_signal_load_relaxed(self._id)
def load_acquire(self):
return hsa.hsa_signal_load_acquire(self._id)
def wait_until_ne_one(self, timeout=None):
"""
Returns a boolean to indicate whether the wait has timeout
"""
one = 1
mhz = 10 ** 6
if timeout is None:
# Infinite
expire = -1 # UINT_MAX
else:
# timeout as seconds
expire = timeout * hsa.timestamp_frequency * mhz
# XXX: use active wait instead of blocked seem to avoid hang in docker
hsa.hsa_signal_wait_acquire(self._id, enums.HSA_SIGNAL_CONDITION_NE,
one, expire,
enums.HSA_WAIT_STATE_ACTIVE)
return self.load_relaxed() != one
class BrigModule(object):
def __init__(self, brig_buffer):
"""
Take a byte buffer of a Brig module
"""
buf = ctypes.create_string_buffer(brig_buffer)
self._buffer = buf
self._id = ctypes.cast(ctypes.addressof(buf),
drvapi.hsa_ext_module_t)
@classmethod
def from_file(cls, file_name):
with open(file_name, 'rb') as fin:
buf = fin.read()
return BrigModule(buf)
def __len__(self):
return len(self._buffer)
def __repr__(self):
return "<BrigModule id={0} size={1}bytes>".format(hex(id(self)),
len(self))
class Program(object):
def __init__(self, model=enums.HSA_MACHINE_MODEL_LARGE,
profile=enums.HSA_PROFILE_FULL,
rounding_mode=enums.HSA_DEFAULT_FLOAT_ROUNDING_MODE_DEFAULT,
options=None, version_major=1, version_minor=0):
self._id = drvapi.hsa_ext_program_t()
assert options is None
def check_fptr_return(hsa_status):
if hsa_status is not enums.HSA_STATUS_SUCCESS:
msg = ctypes.c_char_p()
hsa.hsa_status_string(hsa_status, ctypes.byref(msg))
_logger.info(msg.value.decode("utf-8"))
exit(-hsa_status)
support = ctypes.c_bool(0)
hsa.hsa_system_extension_supported(enums.HSA_EXTENSION_FINALIZER,
version_major,
version_minor,
ctypes.byref(support))
assert support.value, ('HSA system extension %s.%s not supported' %
(version_major, version_minor))
# struct of function pointers
self._ftabl = drvapi.hsa_ext_finalizer_1_00_pfn_t()
# populate struct
hsa.hsa_system_get_extension_table(enums.HSA_EXTENSION_FINALIZER,
version_major,
version_minor,
ctypes.byref(self._ftabl))
ret = self._ftabl.hsa_ext_program_create(model, profile,
rounding_mode, options,
ctypes.byref(self._id))
check_fptr_return(ret)
self._as_parameter_ = self._id
weakref.finalize(self, self._ftabl.hsa_ext_program_destroy,
self._id)
def add_module(self, module):
self._ftabl.hsa_ext_program_add_module(self._id, module._id)
def finalize(self, isa, callconv=0, options=None):
"""
The program object is safe to be deleted after ``finalize``.
"""
code_object = drvapi.hsa_code_object_t()
control_directives = drvapi.hsa_ext_control_directives_t()
ctypes.memset(ctypes.byref(control_directives), 0,
ctypes.sizeof(control_directives))
self._ftabl.hsa_ext_program_finalize(self._id,
isa,
callconv,
control_directives,
options,
enums.HSA_CODE_OBJECT_TYPE_PROGRAM,
ctypes.byref(code_object))
return CodeObject(code_object)
class CodeObject(object):
def __init__(self, code_object):
self._id = code_object
self._as_parameter_ = self._id
weakref.finalize(self, hsa.hsa_code_object_destroy, self._id)
class Executable(object):
def __init__(self):
ex = drvapi.hsa_executable_t()
hsa.hsa_executable_create(enums.HSA_PROFILE_FULL,
enums.HSA_EXECUTABLE_STATE_UNFROZEN,
None,
ctypes.byref(ex))
self._id = ex
self._as_parameter_ = self._id
weakref.finalize(self, hsa.hsa_executable_destroy, self._id)
def load(self, agent, code_object):
hsa.hsa_executable_load_code_object(self._id, agent._id,
code_object._id, None)
def freeze(self):
"""Freeze executable before we can query for symbol"""
hsa.hsa_executable_freeze(self._id, None)
def get_symbol(self, agent, name):
symbol = drvapi.hsa_executable_symbol_t()
hsa.hsa_executable_get_symbol(self._id, None,
ctypes.create_string_buffer(
name.encode('ascii')),
agent._id, 0,
ctypes.byref(symbol))
return Symbol(name, symbol)
class Symbol(HsaWrapper):
_hsa_info_function = 'hsa_executable_symbol_get_info'
_hsa_properties = {
'kernel_object': (
enums.HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_OBJECT,
ctypes.c_uint64,
),
'kernarg_segment_size': (
enums.HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_KERNARG_SEGMENT_SIZE,
ctypes.c_uint32,
),
'group_segment_size': (
enums.HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_GROUP_SEGMENT_SIZE,
ctypes.c_uint32,
),
'private_segment_size': (
enums.HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_PRIVATE_SEGMENT_SIZE,
ctypes.c_uint32,
),
}
def __init__(self, name, symbol_id):
self._id = symbol_id
self.name = name
class MemoryPointer(object):
__hsa_memory__ = True
def __init__(self, context, pointer, size, finalizer=None):
assert isinstance(context, Context)
self.context = context
self.device_pointer = pointer
self.size = size
self._hsa_memsize_ = size
self.finalizer = finalizer
self.is_managed = finalizer is not None
self.is_alive = True
self.refct = 0
def __del__(self):
try:
if self.is_managed and self.is_alive:
self.finalizer()
except:
traceback.print_exc()
def own(self):
return OwnedPointer(weakref.proxy(self))
def free(self):
"""
Forces the device memory to the trash.
"""
if self.is_managed:
if not self.is_alive:
raise RuntimeError("Freeing dead memory")
self.finalizer()
self.is_alive = False
def view(self):
pointer = self.device_pointer.value
view = MemoryPointer(self.context, pointer, self.size)
return OwnedPointer(weakref.proxy(self), view)
@property
def device_ctypes_pointer(self):
return self.device_pointer
def allow_access_to(self, *agents):
"""
Grant access to given *agents*.
Upon return, only the listed-agents and the owner agent have direct
access to this pointer.
"""
ct = len(agents)
if ct == 0:
return
agent_array = (ct * drvapi.hsa_agent_t)(*[a._id for a in agents])
hsa.hsa_amd_agents_allow_access(ct, agent_array, None,
self.device_pointer)
class HostMemory(mviewbuf.MemAlloc):
def __init__(self, context, owner, pointer, size):
self.context = context
self.owned = owner
self.size = size
self.host_pointer = pointer
self.handle = self.host_pointer
# For buffer interface
self._buflen_ = self.size
self._bufptr_ = self.host_pointer.value
def own(self):
return self
class OwnedPointer(object):
def __init__(self, memptr, view=None):
self._mem = memptr
self._mem.refct += 1
if view is None:
self._view = self._mem
else:
assert not view.is_managed
self._view = view
def __del__(self):
try:
self._mem.refct -= 1
assert self._mem.refct >= 0
if self._mem.refct == 0:
self._mem.free()
except ReferenceError:
pass
except:
traceback.print_exc()
def __getattr__(self, fname):
"""Proxy MemoryPointer methods
"""
return getattr(self._view, fname)
class Context(object):
"""
A context is associated with a component
"""
"""
Parameters:
agent the agent, and instance of the class Agent
"""
# a weak set of active Stream objects
_active_streams = weakref.WeakSet()
def __init__(self, agent):
self._agent = weakref.proxy(agent)
if self._agent.is_component: # only components have queues
qs = agent.queue_max_size
defq = self._agent.create_queue_multi(qs, callback=self._callback)
self._defaultqueue = defq.owned()
self.allocations = utils.UniqueDict()
# get pools
coarse_flag = enums_ext.HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_COARSE_GRAINED
fine_flag = enums_ext.HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_FINE_GRAINED
alloc_mps = [mp for mp in agent.mempools.globals if mp.alloc_allowed]
self._coarsegrain_mempool = None
self._finegrain_mempool = None
for mp in alloc_mps:
if mp.supports(coarse_flag):
self._coarsegrain_mempool = mp
if mp.supports(fine_flag):
self._finegrain_mempool = mp
def _callback(self, status, queue):
drvapi._check_error(status, queue)
sys.exit(1)
@property
def unproxy(self):
# This is a trick to help handle weakproxy comparison with actual
# instance.
# See https://stackoverflow.com/a/49319989 for inspiration and the
# whole page for more general discussion.
return self
@property
def default_queue(self):
return self._defaultqueue
@property
def agent(self):
return self._agent
@property
def coarsegrain_mempool(self):
if self._coarsegrain_mempool is None:
msg = 'coarsegrain mempool is not available in {}'.format(self._agent)
raise ValueError(msg)
return self._coarsegrain_mempool
@property
def finegrain_mempool(self):
if self._finegrain_mempool is None:
msg = 'finegrain mempool is not available in {}'.format(self._agent)
raise ValueError(msg)
return self._finegrain_mempool
def memalloc(self, nbytes, memTypeFlags=None, hostAccessible=True):
"""
Allocates memory.
Parameters:
nbytes the number of bytes to allocate.
memTypeFlags the flags for which the memory region must have support,\
due to the inherent rawness of the underlying call, the\
validity of the flag is not checked, cf. C language.
hostAccessible boolean as to whether the region in which the\
allocation takes place should be host accessible
"""
hw = self._agent.device
all_reg = self._agent.regions
flag_ok_r = list() # regions which pass the memTypeFlags test
regions = list()
# don't support DSP
if hw == "GPU" or hw == "CPU":
# check user requested flags
if memTypeFlags is not None:
for r in all_reg:
count = 0
for flags in memTypeFlags:
if r.supports(flags):
count += 1
if count == len(memTypeFlags):
flag_ok_r.append(r)
else:
flag_ok_r = all_reg
# check system required flags for allocation
for r in flag_ok_r:
# check the mem region is coarse grained if dGPU present
# TODO: this probably ought to explicitly check for a dGPU.
if (hw == "GPU" and
not r.supports(enums.HSA_REGION_GLOBAL_FLAG_COARSE_GRAINED)):
continue
# check accessibility criteria
if hostAccessible:
if r.host_accessible:
regions.append(r)
else:
if not r.host_accessible:
regions.append(r)
else:
raise RuntimeError("Unknown device type string \"%s\"" % hw)
assert len(regions) > 0, "No suitable memory regions found."
# walk though valid regions trying to malloc until there's none left
mem = None
for region_id in regions:
try:
mem = MemRegion.instance_for(self._agent, region_id)\
.allocate(nbytes)
except HsaApiError: # try next memory region if an allocation fails
pass
else: # allocation succeeded, stop looking for memory
break
if mem is None:
raise RuntimeError("Memory allocation failed. No agent/region \
combination could meet allocation restraints \
(hardware = %s, size = %s, flags = %s)." % \
( hw, nbytes, memTypeFlags))
fin = _make_mem_finalizer(hsa.hsa_memory_free)
ret = MemoryPointer(weakref.proxy(self), mem, nbytes,
finalizer=fin(self, mem))
if mem.value is None:
raise RuntimeError("MemoryPointer has no value")
self.allocations[mem.value] = ret
return ret.own()
def mempoolalloc(self, nbytes, allow_access_to=(), finegrain=False):
"""
Allocates memory in a memory pool.
Parameters:
*nbytes* the number of bytes to allocate.
*allow_acces_to*
*finegrain*
"""
mempool = (self.finegrain_mempool
if finegrain
else self.coarsegrain_mempool)
buff = mempool.allocate(nbytes)
fin = _make_mem_finalizer(hsa.hsa_amd_memory_pool_free)
mp = MemoryPointer(weakref.proxy(self), buff, nbytes,
finalizer=fin(self, buff))
mp.allow_access_to(*allow_access_to)
self.allocations[buff.value] = mp
return mp.own()
def memhostalloc(self, size, finegrain, allow_access_to):
mem = self.mempoolalloc(size, allow_access_to=allow_access_to,
finegrain=finegrain)
return HostMemory(weakref.proxy(self), owner=mem,
pointer=mem.device_pointer, size=mem.size)
class Stream(object):
"""
An asynchronous stream for async API
"""
def __init__(self):
self._signals = deque()
self._callbacks = defaultdict(list)
def _add_signal(self, signal):
"""
Add a signal that corresponds to an async task.
"""
# XXX: too many pending signals seem to cause async copy to hang
if len(self._signals) > 100:
self._sync(50)
self._signals.append(signal)
def _add_callback(self, callback):
assert callable(callback)
self._callbacks[self._get_last_signal()].append(callback)
def _get_last_signal(self):
"""
Get the last signal.
"""
return self._signals[-1] if self._signals else None
def synchronize(self):
"""
Synchronize the stream.
"""
self._sync(len(self._signals))
def _sync(self, limit):
ct = 0
while self._signals:
if ct >= limit:
break
sig = self._signals.popleft()
if sig.load_relaxed() == 1:
sig.wait_until_ne_one()
for cb in self._callbacks[sig]:
cb()
del self._callbacks[sig]
ct += 1
@contextmanager
def auto_synchronize(self):
'''
A context manager that waits for all commands in this stream to execute
and commits any pending memory transfers upon exiting the context.
'''
yield self
self.synchronize()
def _make_mem_finalizer(dtor):
"""
finalises memory
Parameters:
dtor a function that will delete/free held memory from a reference
Returns:
Finalising function
"""
def mem_finalize(context, handle):
allocations = context.allocations
sync = hsa.implicit_sync
def core():
_logger.info("Current allocations: %s", allocations)
if allocations:
_logger.info("Attempting delete on %s" % handle.value)
del allocations[handle.value]
sync() # implicit sync
dtor(handle)
return core
return mem_finalize
def device_pointer(obj):
"Get the device pointer as an integer"
return device_ctypes_pointer(obj).value
def device_ctypes_pointer(obj):
"Get the ctypes object for the device pointer"
if obj is None:
return c_void_p(0)
require_device_memory(obj)
return obj.device_ctypes_pointer
def is_device_memory(obj):
"""All HSA dGPU memory object is recognized as an instance with the
attribute "__hsa_memory__" defined and its value evaluated to True.
All HSA memory object should also define an attribute named
"device_pointer" which value is an int(or long) object carrying the pointer
value of the device memory address. This is not tested in this method.
"""
return getattr(obj, '__hsa_memory__', False)
def require_device_memory(obj):
"""A sentry for methods that accept HSA memory object.
"""
if not is_device_memory(obj):
raise Exception("Not a HSA memory object.")
def host_pointer(obj):
"""
NOTE: The underlying data pointer from the host data buffer is used and
it should not be changed until the operation which can be asynchronous
completes.
"""
if isinstance(obj, int):
return obj
forcewritable = isinstance(obj, np.void)
return mviewbuf.memoryview_get_buffer(obj, forcewritable)
def host_to_dGPU(context, dst, src, size):
"""
Copy data from a host memory region to a dGPU.
Parameters:
context the dGPU context
dst a pointer to the destination location in dGPU memory
src a pointer to the source location in host memory
size the size (in bytes) of data to transfer
"""
_logger.info("CPU->dGPU")
if size < 0:
raise ValueError("Invalid size given: %s" % size)
hsa.hsa_memory_copy(device_pointer(dst), host_pointer(src), size)
def dGPU_to_host(context, dst, src, size):
"""
Copy data from a host memory region to a dGPU.
Parameters:
context the dGPU context
dst a pointer to the destination location in dGPU memory
src a pointer to the source location in host memory
size the size (in bytes) of data to transfer
"""
_logger.info("dGPU->CPU")
if size < 0:
raise ValueError("Invalid size given: %s" % size)
hsa.hsa_memory_copy(host_pointer(dst), device_pointer(src), size)
def dGPU_to_dGPU(context, dst, src, size):
_logger.info("dGPU->dGPU")
if size < 0:
raise ValueError("Invalid size given: %s" % size)
hsa.hsa_memory_copy(device_pointer(dst), device_pointer(src), size)
def async_host_to_dGPU(dst_ctx, src_ctx, dst, src, size, stream):
_logger.info("Async CPU->dGPU")
async_copy_dgpu(dst_ctx=dst_ctx, src_ctx=src_ctx,
src=host_pointer(src), dst=device_pointer(dst),
size=size, stream=stream)
def async_dGPU_to_host(dst_ctx, src_ctx, dst, src, size, stream):
_logger.info("Async dGPU->CPU")
async_copy_dgpu(dst_ctx=dst_ctx, src_ctx=src_ctx,
dst=host_pointer(dst), src=device_pointer(src),
size=size, stream=stream)
def async_dGPU_to_dGPU(dst_ctx, src_ctx, dst, src, size, stream):
_logger.info("Async dGPU->dGPU")
async_copy_dgpu(dst_ctx=dst_ctx, src_ctx=src_ctx,
dst=device_pointer(dst), src=device_pointer(src),
size=size, stream=stream)
def async_copy_dgpu(dst_ctx, src_ctx, dst, src, size, stream):
if size < 0:
raise ValueError("Invalid size given: %s" % size)
completion_signal = hsa.create_signal(1)
dependent_signal = stream._get_last_signal()
if dependent_signal is not None:
dsignal = drvapi.hsa_signal_t(dependent_signal._id)
signals = (1, ctypes.byref(dsignal), completion_signal)
else:
signals = (0, None, completion_signal)
hsa.hsa_amd_memory_async_copy(dst, dst_ctx._agent._id,
src, src_ctx._agent._id,
size, *signals)
stream._add_signal(completion_signal)
def dgpu_count():
"""
Returns the number of discrete GPUs present on the current machine.
"""
ngpus = 0
try:
for a in hsa.agents:
if a.is_component and a.device == 'GPU':
ngpus += 1
except:
pass
return ngpus
"""
True if a dGPU is present in the current machine.
"""
dgpu_present = dgpu_count() > 0
| stonebig/numba | numba/roc/hsadrv/driver.py | Python | bsd-2-clause | 51,876 |
#!/usr/bin/env python
#-*-*- encoding: utf-8 -*-*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <pablo@ordunya.com>
#
from __future__ import print_function, unicode_literals
import unittest
import time
import Queue
import test.unit.configuration as configuration_module
import test.unit.weblab.core.test_user_processor as UserProcessorTest
import voodoo.configuration as ConfigurationManager
import voodoo.sessions.manager as SessionManager
import voodoo.sessions.session_type as SessionType
import voodoo.sessions.session_id as SessionId
import weblab.core.user_processor as UserProcessor
import weblab.core.coordinator.store as TemporalInformationStore
import weblab.core.alive_users as AliveUsersCollection
class TimeModule(object):
def __init__(self):
self._next_value = time.time()
def set(self, next_value):
self._next_value = next_value
def time(self):
return self._next_value
class DummyCoordinator(object):
def is_post_reservation(self, reservation_id):
return False
class AliveUsersCollectionTestCase(unittest.TestCase):
def setUp(self):
cfg_manager = ConfigurationManager.ConfigurationManager()
cfg_manager.append_module(configuration_module)
commands_store = TemporalInformationStore.CommandsTemporalInformationStore()
locator = UserProcessorTest.FakeLocator(None)
self.session_mgr = SessionManager.SessionManager(
cfg_manager,
SessionType.Memory,
"foo"
)
coordinator = DummyCoordinator()
self.finished_reservations_store = Queue.Queue()
self.auc = AliveUsersCollection.AliveUsersCollection(
locator,
cfg_manager,
SessionType.Memory,
self.session_mgr,
coordinator,
commands_store,
self.finished_reservations_store
)
self.tm = TimeModule()
self.auc._time_module = self.tm
def test_add(self):
session_id = SessionId.SessionId("my session")
self.auc.add_user(session_id)
self.auc.add_user(session_id) # no exception
def test_remove(self):
session_id = SessionId.SessionId("my session")
self.auc.add_user(session_id)
self.auc.remove_user(session_id)
self.auc.remove_user(session_id) # No exception
def create_session(self, timestamp):
session_id = self.session_mgr.create_session()
self.session_mgr.modify_session(
session_id,
{
'db_session_id' : 'whatever',
'session_polling' : (
timestamp,
UserProcessor.UserProcessor.EXPIRATION_TIME_NOT_SET
)
}
)
return session_id
def test_finished_sessions(self):
session_id1 = self.create_session(self.tm.time())
session_id2 = self.create_session(self.tm.time())
self.auc.add_user(session_id1)
self.auc.add_user(session_id2)
expired_users = self.auc.check_expired_users()
self.assertEquals(0, len(expired_users))
self.finished_reservations_store.put(session_id1)
expired_users = self.auc.check_expired_users()
self.assertEquals(1, len(expired_users))
self.assertEquals(session_id1, expired_users[0])
def test_finished_sessions2(self):
session_id1 = self.create_session(self.tm.time() - 3600) # expired
session_id2 = self.create_session(self.tm.time()) # expired
self.auc.add_user(session_id1)
self.auc.add_user(session_id2)
self.finished_reservations_store.put(session_id2)
expired_users = self.auc.check_expired_users()
self.assertEquals(2, len(expired_users))
self.assertEquals(session_id2, expired_users[0])
self.assertEquals(session_id1, expired_users[1])
expired_users = self.auc.check_expired_users()
self.assertEquals(0, len(expired_users))
def test_three_sessions_one_expired(self):
session_id1 = self.create_session(self.tm.time())
session_id2 = self.create_session(self.tm.time() - 3600) # expired
session_id3 = self.create_session(self.tm.time())
self.auc.add_user(session_id1)
self.auc.add_user(session_id2)
self.auc.add_user(session_id3)
expired_users = self.auc.check_expired_users()
self.assertEquals(1, len(expired_users))
self.assertEquals(session_id2, expired_users[0])
expired_users = self.auc.check_expired_users()
self.assertEquals(0, len(expired_users))
# Some time passes with same results
self.tm.set(self.tm.time() + self.auc._min_time_between_checks + 1)
expired_users = self.auc.check_expired_users()
self.assertEquals(0, len(expired_users))
def test_three_sessions_one_expired_and_then_another_before_time_passes(self):
session_id1 = self.create_session(self.tm.time())
session_id2 = self.create_session(self.tm.time() - 3600) # expired
session_id3 = self.create_session(self.tm.time())
self.auc.add_user(session_id1)
self.auc.add_user(session_id2)
self.auc.add_user(session_id3)
expired_users = self.auc.check_expired_users()
self.assertEquals(1, len(expired_users))
self.assertEquals(session_id2, expired_users[0])
expired_users = self.auc.check_expired_users()
self.assertEquals(0, len(expired_users))
session = self.session_mgr.get_session(session_id3)
session['session_polling'] = (
self.tm.time() - 3600, # Expired
UserProcessor.UserProcessor.EXPIRATION_TIME_NOT_SET
)
self.session_mgr.modify_session(
session_id3,
session
)
# Still it doesn't find it!
expired_users = self.auc.check_expired_users()
self.assertEquals(0, len(expired_users))
# Some time passes
self.tm.set(self.tm.time() + self.auc._min_time_between_checks + 1)
# And now it finds the new expired session
expired_users = self.auc.check_expired_users()
self.assertEquals(1, len(expired_users))
self.assertEquals(session_id3, expired_users[0])
def suite():
return unittest.makeSuite(AliveUsersCollectionTestCase)
if __name__ == '__main__':
unittest.main()
| weblabdeusto/weblabdeusto | server/src/test/unit/weblab/core/test_alive_users.py | Python | bsd-2-clause | 6,851 |
import numpy as np
from scipy import linalg
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from nose.tools import assert_true
from mne.time_frequency.stft import stft, istft, stftfreq, stft_norm2
def test_stft():
"Test stft and istft tight frame property"
sfreq = 1000. # Hz
f = 7. # Hz
for T in [253, 256]: # try with even and odd numbers
t = np.arange(T).astype(np.float)
x = np.sin(2 * np.pi * f * t / sfreq)
x = np.array([x, x + 1.])
wsize = 128
tstep = 4
X = stft(x, wsize, tstep)
xp = istft(X, tstep, Tx=T)
freqs = stftfreq(wsize, sfreq=1000)
max_freq = freqs[np.argmax(np.sum(np.abs(X[0]) ** 2, axis=1))]
assert_true(X.shape[1] == len(freqs))
assert_true(np.all(freqs >= 0.))
assert_true(np.abs(max_freq - f) < 1.)
assert_array_almost_equal(x, xp, decimal=6)
# norm conservation thanks to tight frame property
assert_almost_equal(np.sqrt(stft_norm2(X)),
[linalg.norm(xx) for xx in x], decimal=2)
# Try with empty array
x = np.zeros((0, T))
X = stft(x, wsize, tstep)
xp = istft(X, tstep, T)
assert_true(xp.shape == x.shape)
| jaeilepp/eggie | mne/time_frequency/tests/test_stft.py | Python | bsd-2-clause | 1,272 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ExternalAccount'
db.create_table('users_externalaccount', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['users.UserProfile'])),
('username', self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True)),
('type', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('privacy', self.gf('django.db.models.fields.PositiveIntegerField')(default=3)),
))
db.send_create_signal('users', ['ExternalAccount'])
def backwards(self, orm):
# Deleting model 'ExternalAccount'
db.delete_table('users_externalaccount')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'groups.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'always_auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'irc_channel': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '63', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'steward': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.UserProfile']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'system': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'url': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'wiki': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'})
},
'groups.language': {
'Meta': {'ordering': "['name']", 'object_name': 'Language'},
'always_auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'url': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'})
},
'groups.skill': {
'Meta': {'ordering': "['name']", 'object_name': 'Skill'},
'always_auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'url': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'})
},
'users.externalaccount': {
'Meta': {'object_name': 'ExternalAccount'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'privacy': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3'}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.UserProfile']"}),
'username': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
},
'users.usernameblacklist': {
'Meta': {'ordering': "['value']", 'object_name': 'UsernameBlacklist'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_regex': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'users.userprofile': {
'Meta': {'ordering': "['full_name']", 'object_name': 'UserProfile', 'db_table': "'profile'"},
'allows_community_sites': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'allows_mozilla_sites': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'basket_token': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'date_mozillian': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'date_vouched': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'members'", 'blank': 'True', 'to': "orm['groups.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ircname': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '63', 'blank': 'True'}),
'is_vouched': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'members'", 'blank': 'True', 'to': "orm['groups.Language']"}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'photo': ('sorl.thumbnail.fields.ImageField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'privacy_bio': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_city': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_country': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_date_mozillian': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_email': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_full_name': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_groups': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_ircname': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_languages': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_photo': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_region': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_skills': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_timezone': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_title': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_tshirt': ('mozillians.users.models.PrivacyField', [], {'default': '1'}),
'privacy_vouched_by': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_website': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'region': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'skills': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'members'", 'blank': 'True', 'to': "orm['groups.Skill']"}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'tshirt': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '70', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'vouched_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vouchees'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['users.UserProfile']", 'blank': 'True', 'null': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['users']
| justinpotts/mozillians | mozillians/users/migrations/0039_auto__add_externalaccount.py | Python | bsd-3-clause | 12,687 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserProfile.wiki_profile_url'
db.add_column('profiles_userprofile', 'wiki_profile_url', self.gf('django.db.models.fields.URLField')(default='', max_length=200), keep_default=False)
def backwards(self, orm):
# Deleting field 'UserProfile.wiki_profile_url'
db.delete_column('profiles_userprofile', 'wiki_profile_url')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profiles.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'users_added'", 'null': 'True', 'to': "orm['auth.User']"}),
'bio': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'birth_date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '30'}),
'country': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '30'}),
'diaspora_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '15', 'blank': 'True'}),
'facebook_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'gender': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'irc_channels': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'irc_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '30'}),
'jabber_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'linkedin_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'local_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'lon': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'mentor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'mentors_users'", 'null': 'True', 'to': "orm['auth.User']"}),
'mozillians_profile_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'personal_blog_feed': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'personal_website_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'private_email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '75', 'null': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '30'}),
'registration_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'twitter_account': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '16', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'wiki_profile_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200'})
}
}
complete_apps = ['profiles']
| chirilo/remo | remo/profiles/migrations/0014_auto__add_field_userprofile_wiki_profile_url.py | Python | bsd-3-clause | 6,986 |
from setuptools import (
setup,
find_packages,
)
import sys
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="beautifulsoup4",
# NOTE: We can't import __version__ from bs4 because bs4/__init__.py is Python 2 code,
# and converting it to Python 3 means going through this code to run 2to3.
# So we have to specify it twice for the time being.
version = '4.9.3',
author="Leonard Richardson",
author_email='leonardr@segfault.org',
url="http://www.crummy.com/software/BeautifulSoup/bs4/",
download_url = "http://www.crummy.com/software/BeautifulSoup/bs4/download/",
description="Screen-scraping library",
install_requires=[
"soupsieve >1.2; python_version>='3.0'",
"soupsieve >1.2, <2.0; python_version<'3.0'",
],
long_description=long_description,
long_description_content_type="text/markdown",
license="MIT",
packages=find_packages(exclude=['tests*']),
extras_require = {
'lxml' : [ 'lxml'],
'html5lib' : ['html5lib'],
},
use_2to3 = True,
classifiers=["Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
'Programming Language :: Python :: 3',
"Topic :: Text Processing :: Markup :: HTML",
"Topic :: Text Processing :: Markup :: XML",
"Topic :: Text Processing :: Markup :: SGML",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| catapult-project/catapult | third_party/beautifulsoup4-4.9.3/setup.py | Python | bsd-3-clause | 1,713 |
from django.apps import AppConfig
class UserprofilesConfig(AppConfig):
name = 'userprofiles'
| robalford/reConstruct | userprofiles/apps.py | Python | bsd-3-clause | 99 |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from froi.algorithm import imtool
class IntersectDialog(QDialog):
"""A dialog for action of intersection."""
def __init__(self, model, parent=None):
super(IntersectDialog, self).__init__(parent)
self._model = model
self._init_gui()
self._create_actions()
def _init_gui(self):
"""Initialize GUI."""
# set dialog title
self.setWindowTitle("Intersect")
# initialize widgets
source_label = QLabel("Source")
self.source_combo = QComboBox()
mask_label = QLabel("Mask")
self.mask_combo = QComboBox()
vol_list = self._model.getItemList()
self.source_combo.addItems(QStringList(vol_list))
row = self._model.currentIndex().row()
self.source_combo.setCurrentIndex(row)
self.mask_combo.addItems(QStringList(vol_list))
out_label = QLabel("Output volume name")
self.out_edit = QLineEdit()
# layout config
grid_layout = QGridLayout()
#grid_layout.addWidget(source_label, 0, 0)
#grid_layout.addWidget(self.source_combo, 0, 1)
grid_layout.addWidget(mask_label, 0, 0)
grid_layout.addWidget(self.mask_combo, 0, 1)
grid_layout.addWidget(out_label, 1, 0)
grid_layout.addWidget(self.out_edit, 1, 1)
# button config
self.run_button = QPushButton("Run")
self.cancel_button = QPushButton("Cancel")
hbox_layout = QHBoxLayout()
hbox_layout.addWidget(self.run_button)
hbox_layout.addWidget(self.cancel_button)
vbox_layout = QVBoxLayout()
vbox_layout.addLayout(grid_layout)
vbox_layout.addLayout(hbox_layout)
self.setLayout(vbox_layout)
def _create_actions(self):
self.source_combo.currentIndexChanged.connect(self._create_output)
self.mask_combo.currentIndexChanged.connect(self._create_output)
self.run_button.clicked.connect(self._run_intersect)
self.cancel_button.clicked.connect(self.done)
def _create_output(self):
source_name = self.source_combo.currentText()
mask_name = self.mask_combo.currentText()
output_name = '_'.join([str(source_name), str(mask_name)])
self.out_edit.setText(output_name)
def _run_intersect(self):
"""Run an intersecting processing."""
vol_name = str(self.out_edit.text())
if not vol_name:
QMessageBox.critical(self, "No output volume name",
"Please specify output volume's name!")
return
source_row = self.source_combo.currentIndex()
mask_row = self.mask_combo.currentIndex()
source_data = self._model.data(self._model.index(source_row),
Qt.UserRole + 4)
mask_data = self._model.data(self._model.index(mask_row),
Qt.UserRole + 4)
new_vol = imtool.intersect(source_data, mask_data)
self._model.addItem(new_vol,
None,
vol_name,
self._model._data[0].get_header(),
0, 100, 255, 'rainbow')
self.done(0)
| sealhuang/FreeROI | froi/gui/component/intersectdialog.py | Python | bsd-3-clause | 3,390 |
"""Orthogonal matching pursuit algorithms
"""
# Author: Vlad Niculae
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from ..utils import array2d, as_float_array, check_arrays
from ..cross_validation import _check_cv as check_cv
from ..externals.joblib import Parallel, delayed
from ..utils.arrayfuncs import solve_triangular
premature = """ Orthogonal matching pursuit ended prematurely due to linear
dependence in the dictionary. The requested precision might not have been met.
"""
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True,
return_path=False):
"""Orthogonal Matching Pursuit step using the Cholesky decomposition.
Parameters
----------
X : array, shape (n_samples, n_features)
Input dictionary. Columns are assumed to have unit norm.
y : array, shape (n_samples,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coef : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
"""
if copy_X:
X = X.copy('F')
else: # even if we are allowed to overwrite, still copy it if bad order
X = np.asfortranarray(X)
min_float = np.finfo(X.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
potrs, = get_lapack_funcs(('potrs',), (X,))
alpha = np.dot(X.T, y)
residual = y
gamma = np.empty(0)
n_active = 0
indices = np.arange(X.shape[1]) # keeping track of swapping
max_features = X.shape[1] if tol is not None else n_nonzero_coefs
L = np.empty((max_features, max_features), dtype=X.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(np.dot(X.T, residual)))
if lam < n_active or alpha[lam] ** 2 < min_float:
# atom already selected or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
# Updates the Cholesky decomposition of X' X
L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
solve_triangular(L[:n_active, :n_active], L[n_active, :n_active])
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = np.sqrt(1 - v)
X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
indices[n_active], indices[lam] = indices[lam], indices[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
residual = y - np.dot(X[:, :n_active], gamma)
if tol is not None and nrm2(residual) ** 2 <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active]
else:
return gamma, indices[:n_active]
def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
copy_Gram=True, copy_Xy=True, return_path=False):
"""Orthogonal Matching Pursuit step on a precomputed Gram matrix.
This function uses the the Cholesky decomposition method.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data matrix
Xy : array, shape (n_features,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol_0 : float
Squared norm of y, required if tol is not None.
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coefs : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
"""
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if copy_Xy:
Xy = Xy.copy()
min_float = np.finfo(Gram.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
potrs, = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram)) # keeping track of swapping
alpha = Xy
tol_curr = tol_0
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
L = np.empty((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
# selected same atom twice, or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
solve_triangular(L[:n_active, :n_active], L[n_active, :n_active])
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = np.sqrt(1 - v)
Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
indices[n_active], indices[lam] = indices[lam], indices[n_active]
Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if tol_curr <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active]
else:
return gamma, indices[:n_active]
def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute=False,
copy_X=True, return_path=False, precompute_gram=None):
"""Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems.
An instance of the problem has the form:
When parametrized by the number of non-zero coefficients using
`n_nonzero_coefs`:
argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
When parametrized by error using the parameter `tol`:
argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
Parameters
----------
X: array, shape (n_samples, n_features)
Input data. Columns are assumed to have unit norm.
y: array, shape (n_samples,) or (n_samples, n_targets)
Input targets
n_nonzero_coefs: int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol: float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
precompute: {True, False, 'auto'},
Whether to perform precomputations. Improves performance when n_targets
or n_samples is very large.
copy_X: bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path: bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
coef: array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp_gram
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
if precompute_gram is not None:
warnings.warn("precompute_gram will be removed in 0.15."
" Use the precompute parameter.",
DeprecationWarning, stacklevel=2)
precompute = precompute_gram
del precompute_gram
X = array2d(X, order='F', copy=copy_X)
copy_X = False
y = np.asarray(y)
if y.ndim == 1:
y = y[:, np.newaxis]
if y.shape[1] > 1: # subsequent targets will be affected
copy_X = True
if n_nonzero_coefs is None and tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1)
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > X.shape[1]:
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if precompute == 'auto':
precompute = X.shape[0] > X.shape[1]
if precompute:
G = np.dot(X.T, X)
G = np.asfortranarray(G)
Xy = np.dot(X.T, y)
if tol is not None:
norms_squared = np.sum((y ** 2), axis=0)
else:
norms_squared = None
return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared,
copy_Gram=copy_X, copy_Xy=False)
if return_path:
coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
else:
coef = np.zeros((X.shape[1], y.shape[1]))
for k in range(y.shape[1]):
out = _cholesky_omp(X, y[:, k], n_nonzero_coefs, tol,
copy_X=copy_X, return_path=return_path)
if return_path:
_, idx, coefs = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx = out
coef[idx, k] = x
return np.squeeze(coef)
def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None,
norms_squared=None, copy_Gram=True,
copy_Xy=True, return_path=False):
"""Gram Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems using only
the Gram matrix X.T * X and the product X.T * y.
Parameters
----------
Gram: array, shape (n_features, n_features)
Gram matrix of the input data: X.T * X
Xy: array, shape (n_features,) or (n_features, n_targets)
Input targets multiplied by X: X.T * y
n_nonzero_coefs: int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol: float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
norms_squared: array-like, shape (n_targets,)
Squared L2 norms of the lines of y. Required if tol is not None.
copy_Gram: bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy: bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path: bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
coef: array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
Gram = array2d(Gram, order='F', copy=copy_Gram)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
# or subsequent target will be affected
copy_Gram = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if tol is not None:
norms_squared = [norms_squared]
if n_nonzero_coefs is None and tol is None:
n_nonzero_coefs = int(0.1 * len(Gram))
if tol is not None and norms_squared is None:
raise ValueError('Gram OMP needs the precomputed norms in order '
'to evaluate the error sum of squares.')
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > len(Gram):
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)))
else:
coef = np.zeros((len(Gram), Xy.shape[1]))
for k in range(Xy.shape[1]):
out = _gram_omp(Gram, Xy[:, k], n_nonzero_coefs,
norms_squared[k] if tol is not None else None, tol,
copy_Gram=copy_Gram, copy_Xy=copy_Xy,
return_path=return_path)
if return_path:
_, idx, coefs = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx = out
coef[idx, k] = x
return np.squeeze(coef)
class OrthogonalMatchingPursuit(LinearModel, RegressorMixin):
"""Orthogonal Mathching Pursuit model (OMP)
Parameters
----------
n_nonzero_coefs : int, optional
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float, optional
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
precompute : {True, False, 'auto'}, default 'auto'
Whether to use a precomputed Gram and Xy matrix to speed up
calculations. Improves performance when `n_targets` or `n_samples` is
very large. Note that if you already have such matrices, you can pass
them directly to the fit method.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
WARNING : will be deprecated in 0.15
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
WARNING : will be deprecated in 0.15
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
WARNING : will be deprecated in 0.15
Attributes
----------
`coef_` : array, shape (n_features,) or (n_features, n_targets)
parameter vector (w in the formula)
`intercept_` : float or array, shape (n_targets,)
independent term in decision function.
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
decomposition.sparse_encode
"""
def __init__(self, copy_X=None, copy_Gram=None, copy_Xy=None,
n_nonzero_coefs=None, tol=None, fit_intercept=True,
normalize=True, precompute='auto', precompute_gram=None):
self.n_nonzero_coefs = n_nonzero_coefs
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.precompute_gram = precompute_gram
self.copy_Gram = copy_Gram
self.copy_Xy = copy_Xy
self.copy_X = copy_X
def fit(self, X, y, Gram=None, Xy=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Gram : array-like, shape (n_features, n_features) (optional)
Gram matrix of the input data: X.T * X
WARNING : will be deprecated in 0.15
Xy : array-like, shape (n_features,) or (n_features, n_targets)
(optional)
Input targets multiplied by X: X.T * y
WARNING : will be deprecated in 0.15
Returns
-------
self: object
returns an instance of self.
"""
X = array2d(X)
y = np.asarray(y)
n_features = X.shape[1]
if self.precompute_gram is not None:
warnings.warn("precompute_gram will be removed in 0.15."
" Use the precompute parameter.",
DeprecationWarning, stacklevel=2)
precompute = self.precompute_gram
else:
precompute = self.precompute
if self.copy_Gram is not None:
warnings.warn("copy_Gram will be removed in 0.15."
" Use the orthogonal_mp function for"
" low level memory control.",
DeprecationWarning, stacklevel=2)
copy_Gram = self.copy_Gram
else:
copy_Gram = True
if self.copy_Xy is not None:
warnings.warn("copy_Xy will be removed in 0.15."
" Use the orthogonal_mp function for"
" low level memory control.",
DeprecationWarning, stacklevel=2)
copy_Xy = self.copy_Xy
else:
copy_Xy = True
if self.copy_X is not None:
warnings.warn("copy_X will be removed in 0.15."
" Use the orthogonal_mp function for"
" low level memory control.",
DeprecationWarning, stacklevel=2)
copy_X = self.copy_X
else:
copy_X = True
if Gram is not None:
warnings.warn("Gram will be removed in 0.15."
" Use the orthogonal_mp function for"
" low level memory control.",
DeprecationWarning, stacklevel=2)
if Xy is not None:
warnings.warn("Xy will be removed in 0.15."
" Use the orthogonal_mp function for"
" low level memory control.",
DeprecationWarning, stacklevel=2)
if (Gram is not None or Xy is not None) and (self.fit_intercept
or self.normalize):
warnings.warn('Mean subtraction (fit_intercept) and normalization '
'cannot be applied on precomputed Gram and Xy '
'matrices. Your precomputed values are ignored and '
'recomputed. To avoid this, do the scaling yourself '
'and call with fit_intercept and normalize set to '
'False.', RuntimeWarning, stacklevel=2)
Gram, Xy = None, None
if Gram is not None:
precompute = Gram
if Xy is not None and copy_Xy:
Xy = Xy.copy()
X, y, X_mean, y_mean, X_std, Gram, Xy = \
_pre_fit(X, y, Xy, precompute, self.normalize, self.fit_intercept,
copy=copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
if self.n_nonzero_coefs is None and self.tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
else:
self.n_nonzero_coefs_ = self.n_nonzero_coefs
if Gram is False:
self.coef_ = orthogonal_mp(X, y, self.n_nonzero_coefs_, self.tol,
precompute=False, copy_X=copy_X).T
else:
norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None
self.coef_ = orthogonal_mp_gram(Gram, Xy, self.n_nonzero_coefs_,
self.tol, norms_sq,
copy_Gram, True).T
self._set_intercept(X_mean, y_mean, X_std)
return self
def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True,
fit_intercept=True, normalize=True, max_iter=100):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied. If
False, they may be overwritten.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 100 by default.
Returns
-------
residues: array, shape (n_samples, max_features)
Residues of the prediction on the test data
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None,
precompute=False, copy_X=False,
return_path=True)
if coefs.ndim == 1:
coefs = coefs[:, np.newaxis]
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
return np.dot(coefs.T, X_test.T) - y_test
class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin):
"""Cross-validated Orthogonal Mathching Pursuit model (OMP)
Parameters
----------
copy : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 10% of ``n_features`` but at least 5 if available.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Attributes
----------
`n_nonzero_coefs_` : int
Estimated number of non-zero coefficients giving the best mean
squared error over the cross-validation folds.
`coef_` : array, shape (n_features,) or (n_features, n_targets)
parameter vector (w in the problem formulation).
`intercept_` : float or array, shape (n_targets,)
independent term in decision function.
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
OrthogonalMatchingPursuit
LarsCV
LassoLarsCV
decomposition.sparse_encode
"""
def __init__(self, copy=True, fit_intercept=True, normalize=True,
max_iter=None, cv=None, n_jobs=1, verbose=False):
self.copy = copy
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
y : array-like, shape [n_samples]
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X = array2d(X)
X, y = check_arrays(X, y)
cv = check_cv(self.cv, X, y, classifier=False)
max_iter = (min(max(int(0.1 * X.shape[1]), 5), X.shape[1])
if not self.max_iter
else self.max_iter)
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_omp_path_residues)(
X[train], y[train], X[test], y[test], self.copy,
self.fit_intercept, self.normalize, max_iter)
for train, test in cv)
min_early_stop = min(fold.shape[0] for fold in cv_paths)
mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1)
for fold in cv_paths])
best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1
self.n_nonzero_coefs_ = best_n_nonzero_coefs
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs,
copy_X=None,
fit_intercept=self.fit_intercept,
normalize=self.normalize)
omp.fit(X, y)
self.coef_ = omp.coef_
self.intercept_ = omp.intercept_
return self
| B3AU/waveTree | sklearn/linear_model/omp.py | Python | bsd-3-clause | 31,732 |
# -*- coding: utf-8 -*-
"""
logbook.testsuite
~~~~~~~~~~~~~~~~~
The logbook testsuite.
:copyright: (c) 2010 by Armin Ronacher, Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
import sys
import unittest
import logbook
_skipped_modules = []
_missing = object()
_func_ident = lambda f: f
_func_none = lambda f: None
class LogbookTestSuite(unittest.TestSuite):
def run(self, result):
try:
return unittest.TestSuite.run(self, result)
finally:
sys.stderr.write('\n')
for mod in _skipped_modules:
msg = '*** Failed to import %s, tests skipped.\n' % mod
sys.stderr.write(msg)
class LogbookTestCase(unittest.TestCase):
def setUp(self):
self.log = logbook.Logger('testlogger')
# silence deprecation warning displayed on Py 3.2
LogbookTestCase.assert_ = LogbookTestCase.assertTrue
def make_fake_mail_handler(**kwargs):
class FakeMailHandler(logbook.MailHandler):
mails = []
def get_connection(self):
return self
def close_connection(self, con):
pass
def sendmail(self, fromaddr, recipients, mail):
self.mails.append((fromaddr, recipients, mail))
kwargs.setdefault('level', logbook.ERROR)
return FakeMailHandler('foo@example.com', ['bar@example.com'], **kwargs)
def skip_if(condition):
if condition:
return _func_ident
else:
return _func_none
def require(name):
if name in _skipped_modules:
return _func_none
try:
__import__(name)
except ImportError:
_skipped_modules.append(name)
return _func_none
return _func_ident
def missing(name):
def decorate(f):
def wrapper(*args, **kwargs):
old = sys.modules.get(name, _missing)
sys.modules[name] = None
try:
f(*args, **kwargs)
finally:
if old is _missing:
del sys.modules[name]
else:
sys.modules[name] = old
return wrapper
return decorate
def suite():
loader = unittest.TestLoader()
suite = LogbookTestSuite()
suite.addTests(loader.loadTestsFromName('logbook.testsuite.test_regular'))
if sys.version_info >= (2, 5):
suite.addTests(loader.loadTestsFromName
('logbook.testsuite.test_contextmanager'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| chiehwen/logbook | logbook/testsuite/__init__.py | Python | bsd-3-clause | 2,534 |
from email.utils import formatdate
from typing import Optional, Type, TypeVar
from twisted.internet import defer
from twisted.internet.error import (
ConnectError,
ConnectionDone,
ConnectionLost,
ConnectionRefusedError,
DNSLookupError,
TCPTimedOutError,
TimeoutError,
)
from twisted.web.client import ResponseFailed
from scrapy import signals
from scrapy.crawler import Crawler
from scrapy.exceptions import IgnoreRequest, NotConfigured
from scrapy.http.request import Request
from scrapy.http.response import Response
from scrapy.settings import Settings
from scrapy.spiders import Spider
from scrapy.statscollectors import StatsCollector
from scrapy.utils.misc import load_object
HttpCacheMiddlewareTV = TypeVar("HttpCacheMiddlewareTV", bound="HttpCacheMiddleware")
class HttpCacheMiddleware:
DOWNLOAD_EXCEPTIONS = (defer.TimeoutError, TimeoutError, DNSLookupError,
ConnectionRefusedError, ConnectionDone, ConnectError,
ConnectionLost, TCPTimedOutError, ResponseFailed,
IOError)
def __init__(self, settings: Settings, stats: StatsCollector) -> None:
if not settings.getbool('HTTPCACHE_ENABLED'):
raise NotConfigured
self.policy = load_object(settings['HTTPCACHE_POLICY'])(settings)
self.storage = load_object(settings['HTTPCACHE_STORAGE'])(settings)
self.ignore_missing = settings.getbool('HTTPCACHE_IGNORE_MISSING')
self.stats = stats
@classmethod
def from_crawler(cls: Type[HttpCacheMiddlewareTV], crawler: Crawler) -> HttpCacheMiddlewareTV:
o = cls(crawler.settings, crawler.stats)
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
crawler.signals.connect(o.spider_closed, signal=signals.spider_closed)
return o
def spider_opened(self, spider: Spider) -> None:
self.storage.open_spider(spider)
def spider_closed(self, spider: Spider) -> None:
self.storage.close_spider(spider)
def process_request(self, request: Request, spider: Spider) -> Optional[Response]:
if request.meta.get('dont_cache', False):
return None
# Skip uncacheable requests
if not self.policy.should_cache_request(request):
request.meta['_dont_cache'] = True # flag as uncacheable
return None
# Look for cached response and check if expired
cachedresponse = self.storage.retrieve_response(spider, request)
if cachedresponse is None:
self.stats.inc_value('httpcache/miss', spider=spider)
if self.ignore_missing:
self.stats.inc_value('httpcache/ignore', spider=spider)
raise IgnoreRequest("Ignored request not in cache: %s" % request)
return None # first time request
# Return cached response only if not expired
cachedresponse.flags.append('cached')
if self.policy.is_cached_response_fresh(cachedresponse, request):
self.stats.inc_value('httpcache/hit', spider=spider)
return cachedresponse
# Keep a reference to cached response to avoid a second cache lookup on
# process_response hook
request.meta['cached_response'] = cachedresponse
return None
def process_response(self, request: Request, response: Response, spider: Spider) -> Response:
if request.meta.get('dont_cache', False):
return response
# Skip cached responses and uncacheable requests
if 'cached' in response.flags or '_dont_cache' in request.meta:
request.meta.pop('_dont_cache', None)
return response
# RFC2616 requires origin server to set Date header,
# https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.18
if 'Date' not in response.headers:
response.headers['Date'] = formatdate(usegmt=True)
# Do not validate first-hand responses
cachedresponse = request.meta.pop('cached_response', None)
if cachedresponse is None:
self.stats.inc_value('httpcache/firsthand', spider=spider)
self._cache_response(spider, response, request, cachedresponse)
return response
if self.policy.is_cached_response_valid(cachedresponse, response, request):
self.stats.inc_value('httpcache/revalidate', spider=spider)
return cachedresponse
self.stats.inc_value('httpcache/invalidate', spider=spider)
self._cache_response(spider, response, request, cachedresponse)
return response
def process_exception(
self, request: Request, exception: Exception, spider: Spider
) -> Optional[Response]:
cachedresponse = request.meta.pop('cached_response', None)
if cachedresponse is not None and isinstance(exception, self.DOWNLOAD_EXCEPTIONS):
self.stats.inc_value('httpcache/errorrecovery', spider=spider)
return cachedresponse
return None
def _cache_response(
self, spider: Spider, response: Response, request: Request, cachedresponse: Optional[Response]
) -> None:
if self.policy.should_cache_response(response, request):
self.stats.inc_value('httpcache/store', spider=spider)
self.storage.store_response(spider, request, response)
else:
self.stats.inc_value('httpcache/uncacheable', spider=spider)
| starrify/scrapy | scrapy/downloadermiddlewares/httpcache.py | Python | bsd-3-clause | 5,481 |
"""SCons.Tool.mwcc
Tool-specific initialization for the Metrowerks CodeWarrior compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/mwcc.py 3897 2009/01/13 06:45:54 scons"
import os
import os.path
import string
import SCons.Util
def set_vars(env):
"""Set MWCW_VERSION, MWCW_VERSIONS, and some codewarrior environment vars
MWCW_VERSIONS is set to a list of objects representing installed versions
MWCW_VERSION is set to the version object that will be used for building.
MWCW_VERSION can be set to a string during Environment
construction to influence which version is chosen, otherwise
the latest one from MWCW_VERSIONS is used.
Returns true if at least one version is found, false otherwise
"""
desired = env.get('MWCW_VERSION', '')
# return right away if the variables are already set
if isinstance(desired, MWVersion):
return 1
elif desired is None:
return 0
versions = find_versions()
version = None
if desired:
for v in versions:
if str(v) == desired:
version = v
elif versions:
version = versions[-1]
env['MWCW_VERSIONS'] = versions
env['MWCW_VERSION'] = version
if version is None:
return 0
env.PrependENVPath('PATH', version.clpath)
env.PrependENVPath('PATH', version.dllpath)
ENV = env['ENV']
ENV['CWFolder'] = version.path
ENV['LM_LICENSE_FILE'] = version.license
plus = lambda x: '+%s' % x
ENV['MWCIncludes'] = string.join(map(plus, version.includes), os.pathsep)
ENV['MWLibraries'] = string.join(map(plus, version.libs), os.pathsep)
return 1
def find_versions():
"""Return a list of MWVersion objects representing installed versions"""
versions = []
### This function finds CodeWarrior by reading from the registry on
### Windows. Some other method needs to be implemented for other
### platforms, maybe something that calls env.WhereIs('mwcc')
if SCons.Util.can_read_reg:
try:
HLM = SCons.Util.HKEY_LOCAL_MACHINE
product = 'SOFTWARE\\Metrowerks\\CodeWarrior\\Product Versions'
product_key = SCons.Util.RegOpenKeyEx(HLM, product)
i = 0
while 1:
name = product + '\\' + SCons.Util.RegEnumKey(product_key, i)
name_key = SCons.Util.RegOpenKeyEx(HLM, name)
try:
version = SCons.Util.RegQueryValueEx(name_key, 'VERSION')
path = SCons.Util.RegQueryValueEx(name_key, 'PATH')
mwv = MWVersion(version[0], path[0], 'Win32-X86')
versions.append(mwv)
except SCons.Util.RegError:
pass
i = i + 1
except SCons.Util.RegError:
pass
return versions
class MWVersion:
def __init__(self, version, path, platform):
self.version = version
self.path = path
self.platform = platform
self.clpath = os.path.join(path, 'Other Metrowerks Tools',
'Command Line Tools')
self.dllpath = os.path.join(path, 'Bin')
# The Metrowerks tools don't store any configuration data so they
# are totally dumb when it comes to locating standard headers,
# libraries, and other files, expecting all the information
# to be handed to them in environment variables. The members set
# below control what information scons injects into the environment
### The paths below give a normal build environment in CodeWarrior for
### Windows, other versions of CodeWarrior might need different paths.
msl = os.path.join(path, 'MSL')
support = os.path.join(path, '%s Support' % platform)
self.license = os.path.join(path, 'license.dat')
self.includes = [msl, support]
self.libs = [msl, support]
def __str__(self):
return self.version
CSuffixes = ['.c', '.C']
CXXSuffixes = ['.cc', '.cpp', '.cxx', '.c++', '.C++']
def generate(env):
"""Add Builders and construction variables for the mwcc to an Environment."""
import SCons.Defaults
import SCons.Tool
set_vars(env)
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in CSuffixes:
static_obj.add_action(suffix, SCons.Defaults.CAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCAction)
for suffix in CXXSuffixes:
static_obj.add_action(suffix, SCons.Defaults.CXXAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCXXAction)
env['CCCOMFLAGS'] = '$CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -nolink -o $TARGET $SOURCES'
env['CC'] = 'mwcc'
env['CCCOM'] = '$CC $CFLAGS $CCFLAGS $CCCOMFLAGS'
env['CXX'] = 'mwcc'
env['CXXCOM'] = '$CXX $CXXFLAGS $CCCOMFLAGS'
env['SHCC'] = '$CC'
env['SHCCFLAGS'] = '$CCFLAGS'
env['SHCFLAGS'] = '$CFLAGS'
env['SHCCCOM'] = '$SHCC $SHCFLAGS $SHCCFLAGS $CCCOMFLAGS'
env['SHCXX'] = '$CXX'
env['SHCXXFLAGS'] = '$CXXFLAGS'
env['SHCXXCOM'] = '$SHCXX $SHCXXFLAGS $CCCOMFLAGS'
env['CFILESUFFIX'] = '.c'
env['CXXFILESUFFIX'] = '.cpp'
env['CPPDEFPREFIX'] = '-D'
env['CPPDEFSUFFIX'] = ''
env['INCPREFIX'] = '-I'
env['INCSUFFIX'] = ''
#env['PCH'] = ?
#env['PCHSTOP'] = ?
def exists(env):
return set_vars(env)
| kuiche/chromium | third_party/scons/scons-local/SCons/Tool/mwcc.py | Python | bsd-3-clause | 6,796 |
#!/usr/bin/python
"""
@brief QuickBot class for Beaglebone Black
@author Rowland O'Flaherty (rowlandoflaherty.com)
@date 02/07/2014
@version: 1.0
@copyright: Copyright (C) 2014, Georgia Tech Research Corporation
see the LICENSE file included with this software (see LINENSE file)
"""
from __future__ import division
import sys
import time
import re
import socket
import threading
import numpy as np
import Adafruit_BBIO.GPIO as GPIO
import Adafruit_BBIO.PWM as PWM
import Adafruit_BBIO.ADC as ADC
# Constants
LEFT = 0
RIGHT = 1
MIN = 0
MAX = 1
DEBUG = False
ADCTIME = 0.001
## Tic toc constants
TICTOC_START = 0
TICTOC_COUNT = 0
TICTOC_MEAN = 0
TICTOC_MAX = -float('inf')
TICTOC_MIN = float('inf')
## Encoder buffer constants and variables
ENC_BUF_SIZE = 2**9
ENC_IND = [0, 0]
ENC_TIME = [[0]*ENC_BUF_SIZE, [0]*ENC_BUF_SIZE]
ENC_VAL = [[0]*ENC_BUF_SIZE, [0]*ENC_BUF_SIZE]
ADC_LOCK = threading.Lock()
## Run variables
RUN_FLAG = True
RUN_FLAG_LOCK = threading.Lock()
class QuickBot():
"""The QuickBot Class"""
# === Class Properties ===
# Parameters
sampleTime = 20.0 / 1000.0
# Pins
ledPin = 'USR1'
# Motor Pins -- (LEFT, RIGHT)
dir1Pin = ('P8_14', 'P8_12')
dir2Pin = ('P8_16', 'P8_10')
pwmPin = ('P9_16', 'P9_14')
# ADC Pins
irPin = ('P9_38', 'P9_40', 'P9_36', 'P9_35', 'P9_33')
encoderPin = ('P9_39', 'P9_37')
# Encoder counting parameter and variables
ticksPerTurn = 16 # Number of ticks on encoder disc
encWinSize = 2**5 # Should be power of 2
minPWMThreshold = [45, 45] # Threshold on the minimum value to turn wheel
encTPrev = [0.0, 0.0]
encThreshold = [0.0, 0.0]
encTickState = [0, 0]
encTickStateVec = np.zeros((2, encWinSize))
# Constraints
pwmLimits = [-100, 100] # [min, max]
# State PWM -- (LEFT, RIGHT)
pwm = [0, 0]
# State IR
irVal = [0.0, 0.0, 0.0, 0.0, 0.0]
ithIR = 0
# State Encoder
encTime = [0.0, 0.0] # Last time encoders were read
encPos = [0.0, 0.0] # Last encoder tick position
encVel = [0.0, 0.0] # Last encoder tick velocity
# Encoder counting parameters
encCnt = 0 # Count of number times encoders have been read
encSumN = [0, 0] # Sum of total encoder samples
encBufInd0 = [0, 0] # Index of beginning of new samples in buffer
encBufInd1 = [0, 0] # Index of end of new samples in buffer
encTimeWin = np.zeros((2, encWinSize)) # Moving window of encoder sample times
encValWin = np.zeros((2, encWinSize)) # Moving window of encoder raw sample values
encPWMWin = np.zeros((2, encWinSize)) # Moving window corresponding PWM input values
encTau = [0.0, 0.0] # Average sampling time of encoders
## Stats of encoder values while input = 0 and vel = 0
encZeroCntMin = 2**4 # Min number of recorded values to start calculating stats
encZeroMean = [0.0, 0.0]
encZeroVar = [0.0, 0.0]
encZeroCnt = [0, 0]
encHighCnt = [0, 0]
encLowCnt = [0, 0]
encLowCntMin = 2
# Variables
ledFlag = True
cmdBuffer = ''
# UDP
baseIP = '192.168.7.1'
robotIP = '192.168.7.2'
port = 5005
robotSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
robotSocket.setblocking(False)
# === Class Methods ===
# Constructor
def __init__(self, baseIP, robotIP):
# Initialize GPIO pins
GPIO.setup(self.dir1Pin[LEFT], GPIO.OUT)
GPIO.setup(self.dir2Pin[LEFT], GPIO.OUT)
GPIO.setup(self.dir1Pin[RIGHT], GPIO.OUT)
GPIO.setup(self.dir2Pin[RIGHT], GPIO.OUT)
GPIO.setup(self.ledPin, GPIO.OUT)
# Initialize PWM pins: PWM.start(channel, duty, freq=2000, polarity=0)
PWM.start(self.pwmPin[LEFT], 0)
PWM.start(self.pwmPin[RIGHT], 0)
# Set motor speed to 0
self.setPWM([0, 0])
# Initialize ADC
ADC.setup()
self.encoderRead = encoderRead(self.encoderPin)
# Set IP addresses
self.baseIP = baseIP
self.robotIP = robotIP
self.robotSocket.bind((self.robotIP, self.port))
if DEBUG:
## Stats of encoder values while moving -- high, low, and all tick state
self.encHighLowCntMin = 2**5 # Min number of recorded values to start calculating stats
self.encHighMean = [0.0, 0.0]
self.encHighVar = [0.0, 0.0]
self.encHighTotalCnt = [0, 0]
self.encLowMean = [0.0, 0.0]
self.encLowVar = [0.0, 0.0]
self.encLowTotalCnt = [0, 0]
self.encNonZeroCntMin = 2**5
self.encNonZeroMean = [0.0, 0.0]
self.encNonZeroVar = [0.0, 0.0]
self.encNonZeroCnt = [0, 0]
# Record variables
self.encRecSize = 2**13
self.encRecInd = [0, 0]
self.encTimeRec = np.zeros((2, self.encRecSize))
self.encValRec = np.zeros((2, self.encRecSize))
self.encPWMRec = np.zeros((2, self.encRecSize))
self.encNNewRec = np.zeros((2, self.encRecSize))
self.encPosRec = np.zeros((2, self.encRecSize))
self.encVelRec = np.zeros((2, self.encRecSize))
self.encTickStateRec = np.zeros((2, self.encRecSize))
self.encThresholdRec = np.zeros((2, self.encRecSize))
# Getters and Setters
def setPWM(self, pwm):
# [leftSpeed, rightSpeed]: 0 is off, caps at min and max values
self.pwm[LEFT] = min(
max(pwm[LEFT], self.pwmLimits[MIN]), self.pwmLimits[MAX])
self.pwm[RIGHT] = min(
max(pwm[RIGHT], self.pwmLimits[MIN]), self.pwmLimits[MAX])
# Left motor
if self.pwm[LEFT] > 0:
GPIO.output(self.dir1Pin[LEFT], GPIO.LOW)
GPIO.output(self.dir2Pin[LEFT], GPIO.HIGH)
PWM.set_duty_cycle(self.pwmPin[LEFT], abs(self.pwm[LEFT]))
elif self.pwm[LEFT] < 0:
GPIO.output(self.dir1Pin[LEFT], GPIO.HIGH)
GPIO.output(self.dir2Pin[LEFT], GPIO.LOW)
PWM.set_duty_cycle(self.pwmPin[LEFT], abs(self.pwm[LEFT]))
else:
GPIO.output(self.dir1Pin[LEFT], GPIO.LOW)
GPIO.output(self.dir2Pin[LEFT], GPIO.LOW)
PWM.set_duty_cycle(self.pwmPin[LEFT], 0)
# Right motor
if self.pwm[RIGHT] > 0:
GPIO.output(self.dir1Pin[RIGHT], GPIO.LOW)
GPIO.output(self.dir2Pin[RIGHT], GPIO.HIGH)
PWM.set_duty_cycle(self.pwmPin[RIGHT], abs(self.pwm[RIGHT]))
elif self.pwm[RIGHT] < 0:
GPIO.output(self.dir1Pin[RIGHT], GPIO.HIGH)
GPIO.output(self.dir2Pin[RIGHT], GPIO.LOW)
PWM.set_duty_cycle(self.pwmPin[RIGHT], abs(self.pwm[RIGHT]))
else:
GPIO.output(self.dir1Pin[RIGHT], GPIO.LOW)
GPIO.output(self.dir2Pin[RIGHT], GPIO.LOW)
PWM.set_duty_cycle(self.pwmPin[RIGHT], 0)
# Methods
def run(self):
global RUN_FLAG
self.encoderRead.start()
try:
while RUN_FLAG is True:
self.update()
# Flash BBB LED
if self.ledFlag is True:
self.ledFlag = False
GPIO.output(self.ledPin, GPIO.HIGH)
else:
self.ledFlag = True
GPIO.output(self.ledPin, GPIO.LOW)
time.sleep(self.sampleTime)
except:
RUN_FLAG_LOCK.acquire()
RUN_FLAG = False
RUN_FLAG_LOCK.release()
raise
self.cleanup()
return
def cleanup(self):
sys.stdout.write("Shutting down...")
self.setPWM([0, 0])
self.robotSocket.close()
GPIO.cleanup()
PWM.cleanup()
if DEBUG:
# tictocPrint()
self.writeBuffersToFile()
sys.stdout.write("Done\n")
def update(self):
self.readIRValues()
self.readEncoderValues()
self.parseCmdBuffer()
def parseCmdBuffer(self):
global RUN_FLAG
try:
line = self.robotSocket.recv(1024)
except socket.error as msg:
return
self.cmdBuffer += line
bufferPattern = r'\$[^\$\*]*?\*' # String contained within $ and * symbols with no $ or * symbols in it
bufferRegex = re.compile(bufferPattern)
bufferResult = bufferRegex.search(self.cmdBuffer)
if bufferResult:
msg = bufferResult.group()
print msg
self.cmdBuffer = ''
msgPattern = r'\$(?P<CMD>[A-Z]{3,})(?P<SET>=?)(?P<QUERY>\??)(?(2)(?P<ARGS>.*)).*\*'
msgRegex = re.compile(msgPattern)
msgResult = msgRegex.search(msg)
if msgResult.group('CMD') == 'CHECK':
self.robotSocket.sendto('Hello from QuickBot\n',(self.baseIP, self.port))
elif msgResult.group('CMD') == 'PWM':
if msgResult.group('QUERY'):
self.robotSocket.sendto(str(self.pwm) + '\n',(self.baseIP, self.port))
elif msgResult.group('SET') and msgResult.group('ARGS'):
args = msgResult.group('ARGS')
pwmArgPattern = r'(?P<LEFT>[-]?\d+),(?P<RIGHT>[-]?\d+)'
pwmRegex = re.compile(pwmArgPattern)
pwmResult = pwmRegex.match(args)
if pwmResult:
pwm = [int(pwmRegex.match(args).group('LEFT')),
int(pwmRegex.match(args).group('RIGHT'))]
self.setPWM(pwm)
elif msgResult.group('CMD') == 'IRVAL':
if msgResult.group('QUERY'):
reply = '[' + ', '.join(map(str, self.irVal)) + ']'
print 'Sending: ' + reply
self.robotSocket.sendto(reply + '\n', (self.baseIP, self.port))
elif msgResult.group('CMD') == 'ENVAL':
if msgResult.group('QUERY'):
reply = '[' + ', '.join(map(str, self.encPos)) + ']'
print 'Sending: ' + reply
self.robotSocket.sendto(reply + '\n', (self.baseIP, self.port))
elif msgResult.group('CMD') == 'ENVEL':
if msgResult.group('QUERY'):
reply = '[' + ', '.join(map(str, self.encVel)) + ']'
print 'Sending: ' + reply
self.robotSocket.sendto(reply + '\n', (self.baseIP, self.port))
elif msgResult.group('CMD') == 'RESET':
self.encPos[LEFT] = 0.0
self.encPos[RIGHT] = 0.0
print 'Encoder values reset to [' + ', '.join(map(str, self.encVel)) + ']'
elif msgResult.group('CMD') == 'UPDATE':
if msgResult.group('SET') and msgResult.group('ARGS'):
args = msgResult.group('ARGS')
pwmArgPattern = r'(?P<LEFT>[-]?\d+),(?P<RIGHT>[-]?\d+)'
pwmRegex = re.compile(pwmArgPattern)
pwmResult = pwmRegex.match(args)
if pwmResult:
pwm = [int(pwmRegex.match(args).group('LEFT')),
int(pwmRegex.match(args).group('RIGHT'))]
self.setPWM(pwm)
reply = '[' + ', '.join(map(str, self.encPos)) + ', ' \
+ ', '.join(map(str, self.encVel)) + ']'
print 'Sending: ' + reply
self.robotSocket.sendto(reply + '\n', (self.baseIP, self.port))
elif msgResult.group('CMD') == 'END':
RUN_FLAG_LOCK.acquire()
RUN_FLAG = False
RUN_FLAG_LOCK.release()
def readIRValues(self):
prevVal = self.irVal[self.ithIR]
ADC_LOCK.acquire()
self.irVal[self.ithIR] = ADC.read_raw(self.irPin[self.ithIR])
time.sleep(ADCTIME)
ADC_LOCK.release()
if self.irVal[self.ithIR] >= 1100:
self.irVal[self.ithIR] = prevVal
self.ithIR = ((self.ithIR+1) % 5)
def readEncoderValues(self):
if DEBUG and (self.encCnt % 10) == 0:
print "------------------------------------"
print "EncPos: " + str(self.encPos)
print "EncVel: " + str(self.encVel)
print "***"
print "Threshold: " + str(self.encThreshold)
print "***"
print "Zero Cnt: " + str(self.encZeroCnt)
print "Zero Mean: " + str(self.encZeroMean)
print "Zero Var: " + str(self.encZeroVar)
print "***"
print "NonZero Cnt: " + str(self.encNonZeroCnt)
print "NonZero Mean: " + str(self.encNonZeroMean)
print "NonZero Var: " + str(self.encNonZeroVar)
print "***"
print "High Cnt: " + str(self.encHighTotalCnt)
print "High Mean: " + str(self.encHighMean)
print "High Var: " + str(self.encHighVar)
print "***"
print "Low Cnt: " + str(self.encLowTotalCnt)
print "Low Mean: " + str(self.encLowMean)
print "Low Var: " + str(self.encLowVar)
self.encCnt = self.encCnt + 1
# Fill window
for side in range(0, 2):
self.encTime[side] = self.encTimeWin[side][-1]
self.encBufInd0[side] = self.encBufInd1[side]
self.encBufInd1[side] = ENC_IND[side]
ind0 = self.encBufInd0[side] # starting index
ind1 = self.encBufInd1[side] # ending index (this element is not included until the next update)
if ind0 < ind1:
N = ind1 - ind0 # number of new elements
self.encSumN[side] = self.encSumN[side] + N
self.encTimeWin[side] = np.roll(self.encTimeWin[side], -N)
self.encTimeWin[side, -N:] = ENC_TIME[side][ind0:ind1]
self.encValWin[side] = np.roll(self.encValWin[side], -N)
self.encValWin[side, -N:] = ENC_VAL[side][ind0:ind1]
self.encPWMWin[side] = np.roll(self.encPWMWin[side], -N)
self.encPWMWin[side, -N:] = [self.pwm[side]]*N
elif ind0 > ind1:
N = ENC_BUF_SIZE - ind0 + ind1 # number of new elements
self.encSumN[side] = self.encSumN[side] + N
self.encTimeWin[side] = np.roll(self.encTimeWin[side], -N)
self.encValWin[side] = np.roll(self.encValWin[side], -N)
self.encPWMWin[side] = np.roll(self.encPWMWin[side], -N)
self.encPWMWin[side, -N:] = [self.pwm[side]]*N
if ind1 == 0:
self.encTimeWin[side, -N:] = ENC_TIME[side][ind0:]
self.encValWin[side, -N:] = ENC_VAL[side][ind0:]
else:
self.encTimeWin[side, -N:-ind1] = ENC_TIME[side][ind0:]
self.encValWin[side, -N:-ind1] = ENC_VAL[side][ind0:]
self.encTimeWin[side, -ind1:] = ENC_TIME[side][0:ind1]
self.encValWin[side, -ind1:] = ENC_VAL[side][0:ind1]
if ind0 != ind1:
tauNew = self.encTimeWin[side,-1] - self.encTimeWin[side,-N]
self.encTau[side] = tauNew / self.encCnt + self.encTau[side] * (self.encCnt-1)/self.encCnt # Running average
if self.encSumN[side] > self.encWinSize:
self.countEncoderTicks(side)
# Fill records
if DEBUG:
ind = self.encRecInd[side]
if ind+N < self.encRecSize:
self.encTimeRec[side, ind:ind+N] = self.encTimeWin[side, -N:]
self.encValRec[side, ind:ind+N] = self.encValWin[side, -N:]
self.encPWMRec[side, ind:ind+N] = self.encPWMWin[side, -N:]
self.encNNewRec[side, ind:ind+N] = [N]*N
self.encPosRec[side, ind:ind+N] = [self.encPos[side]]*N
self.encVelRec[side, ind:ind+N] = [self.encVel[side]]*N
self.encTickStateRec[side, ind:ind+N] = self.encTickStateVec[side, -N:]
self.encThresholdRec[side, ind:ind+N] = [self.encThreshold[side]]*N
self.encRecInd[side] = ind+N
def countEncoderTicks(self, side):
# Set variables
t = self.encTimeWin[side] # Time vector of data (non-consistent sampling time)
tPrev = self.encTPrev[side] # Previous read time
pwm = self.encPWMWin[side] # Vector of PWM data
pwmPrev = pwm[-1] # Last PWM value that was applied
tickStatePrev = self.encTickState[side] # Last state of tick (high (1), low (-1), or unsure (0))
tickCnt = self.encPos[side] # Current tick count
tickVel = self.encVel[side] # Current tick velocity
encValWin = self.encValWin[side] # Encoder raw value buffer window
threshold = self.encThreshold[side] # Encoder value threshold
minPWMThreshold = self.minPWMThreshold[side] # Minimum PWM to move wheel
N = np.sum(t > tPrev) # Number of new updates
tickStateVec = np.roll(self.encTickStateVec[side], -N)
# Determine wheel direction
if tickVel != 0:
wheelDir = np.sign(tickVel)
else:
wheelDir = np.sign(pwmPrev)
# Count ticks and record tick state
indTuple = np.where(t == tPrev) # Index of previous sample in window
if len(indTuple[0] > 0):
ind = indTuple[0][0]
newInds = ind + np.arange(1, N+1) # Indices of new samples
for i in newInds:
if encValWin[i] > threshold: # High tick state
tickState = 1
self.encHighCnt[side] = self.encHighCnt[side] + 1
self.encLowCnt[side] = 0
if tickStatePrev == -1: # Increment tick count on rising edge
tickCnt = tickCnt + wheelDir
else: # Low tick state
tickState = -1
self.encLowCnt[side] = self.encLowCnt[side] + 1
self.encHighCnt[side] = 0
tickStatePrev = tickState
tickStateVec[i] = tickState
# Measure tick speed
diffTickStateVec = np.diff(tickStateVec) # Tick state transition differences
fallingTimes = t[np.hstack((False,diffTickStateVec == -2))] # Times when tick state goes from high to low
risingTimes = t[np.hstack((False,diffTickStateVec == 2))] # Times when tick state goes from low to high
fallingPeriods = np.diff(fallingTimes) # Period times between falling edges
risingPeriods = np.diff(risingTimes) # Period times between rising edges
tickPeriods = np.hstack((fallingPeriods, risingPeriods)) # All period times
if len(tickPeriods) == 0:
if all(pwm[newInds] < minPWMThreshold): # If all inputs are less than min set velocity to 0
tickVel = 0
else:
tickVel = wheelDir * 1/np.mean(tickPeriods) # Average signed tick frequency
# Estimate new mean values
newEncRaw = encValWin[newInds]
if pwmPrev == 0 and tickVel == 0:
x = newEncRaw
l = self.encZeroCnt[side]
mu = self.encZeroMean[side]
sigma2 = self.encZeroVar[side]
(muPlus, sigma2Plus, n) = recursiveMeanVar(x, l, mu, sigma2)
self.encZeroMean[side] = muPlus
self.encZeroVar[side] = sigma2Plus
self.encZeroCnt[side] = n
elif tickVel != 0:
if DEBUG:
x = newEncRaw
l = self.encNonZeroCnt[side]
mu = self.encNonZeroMean[side]
sigma2 = self.encNonZeroVar[side]
(muPlus, sigma2Plus, n) = recursiveMeanVar(x, l, mu, sigma2)
self.encNonZeroMean[side] = muPlus
self.encNonZeroVar[side] = sigma2Plus
self.encNonZeroCnt[side] = n
NHigh = np.sum(tickStateVec[newInds] == 1)
if NHigh != 0:
indHighTuple = np.where(tickStateVec[newInds] == 1)
x = newEncRaw[indHighTuple[0]]
l = self.encHighTotalCnt[side]
mu = self.encHighMean[side]
sigma2 = self.encHighVar[side]
(muPlus, sigma2Plus, n) = recursiveMeanVar(x, l, mu, sigma2)
self.encHighMean[side] = muPlus
self.encHighVar[side] = sigma2Plus
self.encHighTotalCnt[side] = n
NLow = np.sum(tickStateVec[newInds] == -1)
if NLow != 0:
indLowTuple = np.where(tickStateVec[newInds] == -1)
x = newEncRaw[indLowTuple[0]]
l = self.encLowTotalCnt[side]
mu = self.encLowMean[side]
sigma2 = self.encLowVar[side]
(muPlus, sigma2Plus, n) = recursiveMeanVar(x, l, mu, sigma2)
self.encLowMean[side] = muPlus
self.encLowVar[side] = sigma2Plus
self.encLowTotalCnt[side] = n
# Set threshold value
if self.encZeroCnt[side] > self.encZeroCntMin:
self.encThreshold[side] = self.encZeroMean[side] - 3*np.sqrt(self.encZeroVar[side])
# elif self.encNonZeroCnt[side] > self.encNonZeroCntMin:
# self.encThreshold[side] = self.encNonZeroMean[side]
# elif self.encHighTotalCnt[side] > self.encHighLowCntMin and self.encLowTotalCnt > self.encHighLowCntMin:
# mu1 = self.encHighMean[side]
# sigma1 = self.encHighVar[side]
# mu2 = self.encLowMean[side]
# sigma2 = self.encLowVar[side]
# alpha = (sigma1 * np.log(sigma1)) / (sigma2 * np.log(sigma1))
# A = (1 - alpha)
# B = -2 * (mu1 - alpha*mu2)
# C = mu1**2 - alpha * mu2**2
# x1 = (-B + np.sqrt(B**2 - 4*A*C)) / (2*A)
# x2 = (-B - np.sqrt(B**2 - 4*A*C)) / (2*A)
# if x1 < mu1 and x1 > mu2:
# self.encThreshold[side] = x1
# else:
# self.encThreshold[side] = x2
# Update variables
self.encPos[side] = tickCnt # New tick count
self.encVel[side] = tickVel # New tick velocity
self.encTickStateVec[side] = tickStateVec # New tick state vector
self.encTPrev[side] = t[-1] # New latest update time
def writeBuffersToFile(self):
matrix = map(list, zip(*[self.encTimeRec[LEFT], self.encValRec[LEFT], self.encPWMRec[LEFT], self.encNNewRec[LEFT], \
self.encTickStateRec[LEFT], self.encPosRec[LEFT], self.encVelRec[LEFT], self.encThresholdRec[LEFT], \
self.encTimeRec[RIGHT], self.encValRec[RIGHT], self.encPWMRec[RIGHT], self.encNNewRec[RIGHT], \
self.encTickStateRec[RIGHT], self.encPosRec[RIGHT], self.encVelRec[RIGHT], self.encThresholdRec[RIGHT]]))
s = [[str(e) for e in row] for row in matrix]
lens = [len(max(col, key=len)) for col in zip(*s)]
fmt = '\t'.join('{{:{}}}'.format(x) for x in lens)
table = [fmt.format(*row) for row in s]
f = open('output.txt', 'w')
f.write('\n'.join(table))
f.close()
print "Wrote buffer to output.txt"
class encoderRead(threading.Thread):
"""The encoderRead Class"""
# === Class Properties ===
# Parameters
# === Class Methods ===
# Constructor
def __init__(self,encPin=('P9_39', 'P9_37')):
# Initialize thread
threading.Thread.__init__(self)
# Set properties
self.encPin = encPin
# Methods
def run(self):
global RUN_FLAG
self.t0 = time.time()
while RUN_FLAG:
global ENC_IND
global ENC_TIME
global ENC_VAL
for side in range(0, 2):
ENC_TIME[side][ENC_IND[side]] = time.time() - self.t0
ADC_LOCK.acquire()
ENC_VAL[side][ENC_IND[side]] = ADC.read_raw(self.encPin[side])
time.sleep(ADCTIME)
ADC_LOCK.release()
ENC_IND[side] = (ENC_IND[side] + 1) % ENC_BUF_SIZE
def recursiveMeanVar(x, l, mu, sigma2):
"""
This function calculates a new mean and variance given
the current mean "mu", current variance "sigma2", current
update count "l", and new samples "x"
"""
m = len(x)
n = l + m
muPlus = l / n * mu + m / n * np.mean(x)
if n > 1:
sigma2Plus = 1/(n-1) * ((l-1)*sigma2 + (m-1)*np.var(x) + l*(mu - muPlus)**2 + m*(np.mean(x) - muPlus)**2)
else:
sigma2Plus = 0
return (muPlus, sigma2Plus, n)
def operatingPoint(uStar, uStarThreshold):
"""
This function returns the steady state tick velocity given some PWM input.
uStar: PWM input.
uStarThreshold: Threshold on the minimum magnitude of a PWM input value
returns: omegaStar - steady state tick velocity
"""
# Matlab code to find beta values
# X = [40; 80; 100]; % Air Test
# Y = [0.85; 2.144; 3.5];
#
# r = 0.0325; % Wheel radius
# c = 2*pi*r;
# X = [ 70; 70; 70; 75; 75; 75; 80; 80; 80; 85; 85; 85; 90; 90; 90]; % Ground Test
# Z = [4.25; 3.95; 4.23; 3.67; 3.53; 3.48; 3.19; 3.08; 2.93; 2.52; 2.59; 2.56; 1.99; 2.02; 2.04]; % Time to go 1 m
# Y = 1./(Z*c);
# H = [X ones(size(X))];
# beta = H \ Y
# beta = [0.0425, -0.9504] # Air Test Results
beta = [0.0606, -3.1475] # Ground Test Results
if np.abs(uStar) <= uStarThreshold:
omegaStar = 0.0
elif uStar > 0:
omegaStar = beta[0]*uStar + beta[1]
else:
omegaStar = -1.0*(beta[0]*np.abs(uStar) + beta[1])
return omegaStar
def kalman(x, P, Phi, H, W, V, z):
"""
This function returns an optimal expected value of the state and covariance
error matrix given an update and system parameters.
x: Estimate of staet at time t-1.
P: Estimate of error covariance matrix at time t-1.
Phi: Discrete time state tranistion matrix at time t-1.
H: Observation model matrix at time t.
W: Process noise covariance at time t-1.
V: Measurement noise covariance at time t.
z: Measurement at time t.
returns: (x,P) tuple
x: Updated estimate of state at time t.
P: Updated estimate of error covariance matrix at time t.
"""
x_p = Phi*x # Prediction of setimated state vector
P_p = Phi*P*Phi + W # Prediction of error covariance matrix
S = H*P_p*H + V # Sum of error variances
S_inv = 1/S # Inverse of sum of error variances
K = P_p*H*S_inv # Kalman gain
r = z - H*x_p # Prediction residual
w = -K*r # Process error
x = x_p - w # Update estimated state vector
v = z - H*x # Measurement error
if np.isnan(K*V):
P = P_p
else:
P = (1 - K*H)*P_p*(1 - K*H) + K*V*K # Updated error covariance matrix
return (x, P)
def tic():
global TICTOC_START
TICTOC_START = time.time()
def toc(tictocName = 'toc', printFlag = True):
global TICTOC_START
global TICTOC_COUNT
global TICTOC_MEAN
global TICTOC_MAX
global TICTOC_MIN
tictocTime = time.time() - TICTOC_START
TICTOC_COUNT = TICTOC_COUNT + 1
TICTOC_MEAN = tictocTime / TICTOC_COUNT + TICTOC_MEAN * (TICTOC_COUNT-1) / TICTOC_COUNT
TICTOC_MAX = max(TICTOC_MAX,tictocTime)
TICTOC_MIN = min(TICTOC_MIN,tictocTime)
if printFlag:
print tictocName + " time: " + str(tictocTime)
def tictocPrint():
global TICTOC_COUNT
global TICTOC_MEAN
global TICTOC_MAX
global TICTOC_MIN
print "Tic Toc Stats:"
print "Count = " + str(TICTOC_COUNT)
print "Mean = " + str(TICTOC_MEAN)
print "Max = " + str(TICTOC_MAX)
print "Min = " + str(TICTOC_MIN)
| chenxinzzu/quickbot_bbb | QuickBot.py | Python | bsd-3-clause | 28,390 |
from requests.auth import HTTPBasicAuth
def apply_updates(doc, update_dict):
# updates the doc with items from the dict
# returns whether or not any updates were made
should_save = False
for key, value in update_dict.items():
if getattr(doc, key, None) != value:
setattr(doc, key, value)
should_save = True
return should_save
class EndpointMixin(object):
@classmethod
def from_config(cls, config):
return cls(config.url, config.username, config.password)
def _auth(self):
return HTTPBasicAuth(self.username, self.password)
def _urlcombine(self, base, target):
return '{base}{target}'.format(base=base, target=target) | puttarajubr/commcare-hq | custom/api/utils.py | Python | bsd-3-clause | 714 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import time
import gzip
import tempfile
import ConfigParser
from cStringIO import StringIO
from datetime import datetime
from ellen.repo import Jagare
from ellen.utils import JagareError
from vilya.libs.permdir import get_tmpdir
from vilya.models.user import User
from vilya.models.ngit.commit import Commit
from vilya.models.ngit.diff import Diff
from vilya.models.ngit.blob import Blob
from vilya.models.ngit.submodule import Submodule
from vilya.models.ngit.tree import Tree
from vilya.models.ngit.blame import Blame
LATEST_UPDATE_REF_THRESHOLD = 60 * 60 * 24
MAX_DIFF_PATCHES = 2000
REFS_HEADS_PREFIX_LENGTH = len('refs/heads/')
class RepoMergeError(Exception):
pass
class RepoPushError(Exception):
pass
class Repo(object):
provided_features = []
def __init__(self, path):
self.type = "repo"
self.path = path
self.repo = Jagare(self.path)
def provide(self, name):
'''检查是否提供某功能,即是否提供某接口'''
return name in self.provided_features
@property
def empty(self):
return self.is_empty
@property
def is_empty(self):
return self.repo.empty
@property
def default_branch(self):
branch = ''
head = self.repo.head
if head:
branch = head.name[REFS_HEADS_PREFIX_LENGTH:]
return branch
def update_default_branch(self, name):
branches = self.repo.branches
if name not in branches:
return None
self.repo.update_head(name)
def clone(self, path, bare=None, branch=None,
mirror=None, env=None, shared=None):
self.repo.clone(path,
bare=bare, branch=branch,
mirror=mirror, env=env)
# shared=shared) why?
def archive(self, name, ref='master', ext='tar.gz'):
content = self.repo.archive(name, ref=ref)
if ext == 'tar':
return content
outbuffer = StringIO()
zipfile = gzip.GzipFile(mode='wb', compresslevel=6, fileobj=outbuffer)
zipfile.write(content)
zipfile.close()
out = outbuffer.getvalue()
return out
def get_submodule(self, ref, path):
path = path.strip()
gitmodules = self.repo.show("%s:%s" % (ref, '.gitmodules'))
if not gitmodules:
return None
submodules_lines = gitmodules["data"].split('\n')
modules_str = '\n'.join([line.strip() for line in submodules_lines])
config = ConfigParser.RawConfigParser()
config.readfp(StringIO(modules_str))
for section in config.sections():
if config.has_option(section, 'path') and config.get(
section, 'path') == path:
url = config.get(section, 'url')
return Submodule(url, path)
return None
def get_file(self, ref, path):
blob = self.repo.show("%s:%s" % (ref, path))
if not blob:
return None
if blob['type'] != 'blob':
return None
# TODO: validate blob
return Blob(self, blob)
def get_file_by_lines(self, ref, path):
blob = self.get_file(ref, path)
# TODO: blob.size < xxx
if not blob or blob.binary:
return None
if not blob.data:
return []
src = blob.data
return src.splitlines()
def get_file_n_lines(self, ref, path):
lines = self.get_file_by_lines(ref, path)
if lines:
return len(lines)
return 0
def get_commits(self, to_ref, from_ref=None, path=None, skip=0,
max_count=0, author=None, query=None, first_parent=None,
since=0, no_merges=None):
commits = self.repo.rev_list(to_ref=to_ref, from_ref=from_ref,
path=path, skip=skip,
max_count=max_count, author=author,
query=query, first_parent=first_parent,
since=since, no_merges=no_merges)
return [Commit(self, commit) for commit in commits]
def get_raw_diff(self, ref, from_ref=None, paths=None, **kw):
''' get Jagare formated diff dict '''
try:
diff = self.repo.diff(ref, from_ref=from_ref, paths=paths, **kw)
except KeyError:
return None
return diff
def get_diff(self, ref=None, from_ref=None,
linecomments=[], raw_diff=None, paths=None, **kw):
''' get ngit wrapped diff object '''
_raw_diff = None
if raw_diff:
_raw_diff = raw_diff
elif ref:
_raw_diff = self.get_raw_diff(ref, from_ref=from_ref,
paths=paths, **kw)
if _raw_diff:
return Diff(self, _raw_diff, linecomments)
else:
return None
def get_diff_length(self, ref, from_ref=None, **kw):
_raw_diff = self.get_raw_diff(ref, from_ref=from_ref, **kw)
return len(_raw_diff['patches']) if _raw_diff else 0
def get_last_commit(self, ref, path=None, no_merges=False):
if not path:
return self.get_commit(ref)
commit = self.repo.rev_list(ref, path=path, max_count=1,
no_merges=no_merges)
if not commit:
return None
commit = commit[0]
commit = Commit(self, commit)
return commit
def get_previours_commit(self, ref, path):
"""previours commit that touch the specified path"""
commits = self.repo.rev_list(ref, path=path, max_count=2,
no_merges=True)
for commit in commits:
if commit['sha'] != self.repo.sha(ref):
return Commit(self, commit)
return None
def get_commit(self, ref):
sha = self.repo.resolve_commit(ref)
if not sha:
return None
commit = self.repo.show(sha)
if not commit:
return None
# TODO: validate commit
return Commit(self, commit)
def delete_branch(self, name):
self.repo.delete_branch(name)
def get_path_by_ref(self, ref):
''' get blob or tree '''
path = self.repo.show(ref)
if not path:
return None
if path['type'] == 'tree':
path = Tree(self, path['entries'])
elif path['type'] == 'blob':
path = Blob(self, path)
else:
path = None
return path
def get_path(self, ref, path):
_item = self.repo.show("%s:%s" % (ref, path))
if not _item:
return None
if _item['type'] == 'tree':
item = Tree(self, _item['entries'])
elif _item['type'] == 'blob':
item = Blob(self, _item)
else:
item = None
return item
def get_last_update_timestamp(self):
commit = self.get_last_commit('HEAD')
if not commit:
return 0
return int(commit.author_timestamp)
class ProjectRepo(Repo):
provided_features = ['project', 'fulltext', 'moreline',
'side_by_side', 'patch_actions']
def __init__(self, project, pull=None):
self.type = "project"
self.pull = pull
self.project = project
self.project_name = project.name
self.name = project.name
self.path = project.repo_path
self.repo = Jagare(self.path)
# TODO: url
@property
def api_url(self):
return ''
@property
def context_url(self):
return 'moreline'
@property
def fulltext_url(self):
return 'fulltext'
@property
def branches(self):
return self.repo.branches
@property
def tags(self):
return self.repo.tags
def get_tree(self, ref, path=None, recursive=False, with_commit=False,
recursive_with_tree_node=False):
tree = self.repo.ls_tree(
ref, path=path, recursive=recursive,
with_commit=with_commit)
# recursive_with_tree_node=recursive_with_tree_node)
if not tree:
return None
return Tree(self, tree)
def get_file_by_ref(self, ref):
blob = self.repo.show(ref)
if not blob:
return None
return blob['data']
def get_contexts(self, ref, path, line_start, line_end):
def fix_line_index(index, max_i, min_i=0):
i = index - 1
i = max(i, min_i)
i = min(i, max_i)
return i
lines = self.get_file_by_lines(ref, path)
if not lines:
return None
n = len(lines)
start = fix_line_index(line_start, n)
end = fix_line_index(line_end, n)
return lines[start:end]
def blame_file(self, *w, **kw):
blame = self.repo.blame(*w, **kw)
if not blame:
return None
return Blame(self, blame)
def get_renamed_files(self, ref, path=None):
return self.repo.detect_renamed(ref)
def commit_file(self, *w, **kw):
return self.repo.commit_file(*w, **kw)
def get_temp_branch(self):
commit = self.get_commit('HEAD')
return 'patch_tmp' + time.strftime('%Y%m%d%H%M%S-') + commit.sha[10]
def get_patch_file(self, ref, from_ref=None):
return self.repo.format_patch(ref, from_ref)
def get_diff_file(self, ref, from_ref=None):
_raw_diff = self.get_raw_diff(ref, from_ref)
if not _raw_diff:
return ''
patch = _raw_diff['diff'].patch
if not patch:
return ''
return patch
@classmethod
def init(cls, path, work_path=None, bare=True):
return Jagare.init(path, work_path=work_path, bare=bare)
@classmethod
def mirror(cls, url, path, env=None):
Jagare.mirror(url, path, env=env)
def add_remote(self, name, url):
return self.repo.add_remote(name, url)
def add_remote_hub(self, name, url):
self.add_remote('hub/%s' % name, url)
def update_ref(self, ref, value):
result = None
try:
result = self.repo.update_ref(ref, value)
except JagareError:
# FIXME: logging
# FIXME: more meaningful error (JagareError)
pass
return result
def sha(self, rev='HEAD'):
return self.repo.sha(rev)
def merge_base(self, to_sha, from_sha):
return self.repo.merge_base(to_sha, from_sha)
@property
def remotes(self):
return self.repo.remotes
def fetch_all(self):
self.repo.fetch_all()
def fetch(self, name):
self.repo.fetch(name)
def fetch_(self, *w, **kw):
return self.repo.fetch_(*w, **kw)
def get_latest_update_branches(self):
refs = self.repo.listall_references()
refs = filter(lambda r: r.startswith('refs/heads'), refs)
current_time = time.time()
latest_branches = []
for ref in refs:
commit_time = self.repo.lookup_reference(ref).get_object().commit_time # noqa
delta = current_time - commit_time
if delta < LATEST_UPDATE_REF_THRESHOLD:
latest_branches.append((commit_time, ref.split('/')[-1]))
return sorted(latest_branches, key=lambda r: r[0], reverse=True)
def get_all_src_objects(self):
refs = self.repo.listall_references()
refs = filter(lambda r: r.startswith('refs/heads'), refs)
commits_dict = {}
for ref in refs:
commits = self.repo.rev_list(ref)
commits = {c['sha']: c for c in commits}
commits_dict.update(commits)
commits = sorted(commits_dict.values(),
key=lambda x: x['committer']['time'],
reverse=True)
pruned_set = set()
objects_dict = {}
treenode_list = [(commit['sha'], commit['tree'], '')
for commit in commits]
while treenode_list:
commit_id, tree_id, path = treenode_list.pop()
if tree_id in pruned_set:
continue
pruned_set.add(tree_id)
objects = self.repo.ls_tree(tree_id, size=True)
for obj in objects:
obj_id = obj['id']
obj_path = '%s/%s' % (path, obj['name'])
if obj['type'] == 'tree':
treenode_list.append((commit_id, obj_id, obj_path))
elif obj['type'] == 'blob':
if obj_id not in objects_dict:
commit = commits_dict[commit_id]
objects_dict[obj_id] = dict(
path=obj_path[1:],
commit=commit_id,
size=obj['size'],
commit_time=datetime.fromtimestamp(
commit['committer']['time']),
committer=commit['committer']['name']
)
return objects_dict
class GistRepo(Repo):
provided_features = []
# TODO: move to utils
PREFIX = 'gistfile'
def __init__(self, gist):
self.type = "gist"
self.gist = gist
self.name = gist.name
self.path = gist.repo_path
self.repo = Jagare(gist.repo_path)
@classmethod
def init(cls, gist):
Jagare.init(gist.repo_path, bare=True)
def clone(self, gist):
super(GistRepo, self).clone(gist.repo_path, bare=True)
def get_files(self):
files = []
if self.empty:
return files
tree = self.repo.ls_tree('HEAD')
for f in tree:
files.append([f['sha'], f['name']])
return files
# TODO: move to utils
def check_filename(self, fn):
for c in (' ', '<', '>', '|', ';', ':', '&', '`', "'"):
fn = fn.replace(c, '\%s' % c)
fn = fn.replace('/', '')
return fn
def commit_all_files(self, names, contents, oids, author):
data = []
for i, (name, content, oid) in enumerate(zip(names, contents, oids),
start=1):
if not name and not content:
continue
if not name:
name = self.PREFIX + str(i)
name = self.check_filename(name)
data.append([name, content, 'insert'])
files = self.get_files()
for sha, name in files:
if name in names:
continue
data.append([name, '', 'remove'])
self.repo.commit_file(branch='master',
parent='master',
author_name=author.name,
author_email=author.email,
message=' ',
reflog=' ',
data=data)
def is_commit(self, ref):
commit = self.repo.show(ref)
if commit:
return True
class PullRepo(ProjectRepo):
provided_features = ProjectRepo.provided_features + ['show_inline_toggle']
def __init__(self, pull):
# TODO: When to_proj or from_proj not exist?
# TODO: catch exception if from_proj was deleted
super(PullRepo, self).__init__(pull.to_proj, pull)
self.type = "pull"
self.from_repo = None
try:
if pull.from_proj:
self.from_repo = ProjectRepo(pull.from_proj, pull)
except JagareError:
self.from_repo = None
self._temp_dir = None
# no use
#self.merge_repo = None
#self.test_repo = None
# TODO: 统一 url
@property
def api_url(self):
project_name = self.project.name
ticket_id = self.pull.ticket_id
# FIXME: pull/new,没有ticket
if not ticket_id:
return '/api/%s/diff/' % project_name
url = "/api/%s/pulls/%s/" % (project_name, ticket_id)
return url
@property
def context_url(self):
project_name = self.project.name
ticket_id = self.pull.ticket_id
if not ticket_id:
return '/api/%s/diff/moreline' % project_name
url = "/api/%s/pulls/%s/moreline" % (project_name, ticket_id)
return url
@property
def fulltext_url(self):
project_name = self.project.name
ticket_id = self.pull.ticket_id
# FIXME: pull/new,没有ticket
if not ticket_id:
return '/api/%s/diff/fulltext' % project_name
url = "/api/%s/pulls/%s/fulltext" % (project_name, ticket_id)
return url
@property
def temp_dir(self):
if self._temp_dir:
return self._temp_dir
# TODO: move to Jagare
pulltmp = os.path.join(get_tmpdir(), "pulltmp")
if not os.path.exists(pulltmp):
os.makedirs(pulltmp)
worktree = tempfile.mkdtemp(dir=pulltmp)
self._temp_dir = worktree
return worktree
def init(self):
import os
path = os.path.join(self.temp_dir, '.git')
work_path = self.temp_dir
return Jagare.init(path, work_path=work_path, bare=False)
@property
def from_local(self):
return self.pull.to_proj == self.pull.from_proj
@property
def from_sha(self):
sha = None
ticket_id = self.pull.ticket_id
if ticket_id:
from vilya.models.consts import PULL_REF_H
# FIXME: catch more exceptions
try:
sha = self.sha(PULL_REF_H % ticket_id)
except:
# 旧有的被close但又未merge的pr可能出错
pass
if not sha and self.from_repo:
sha = self.from_repo.sha(self.pull.from_ref)
return sha
@property
def to_sha(self):
sha = None
ticket_id = self.pull.ticket_id
if ticket_id:
from vilya.models.consts import PULL_REF_M
# FIXME: catch more exceptions
try:
sha = self.sha(PULL_REF_M % ticket_id)
except:
# 旧有的被close但又未merge的pr可能出错
pass
if not sha:
sha = self.sha(self.pull.to_ref)
return sha
def merge(self, merger, message_header, message_body):
import shutil
from vilya.models.git import make_git_env
# TODO: Use User only
if merger and isinstance(merger, basestring):
merger = User(merger)
if not isinstance(merger, User):
raise Exception("User is needed to merge pull")
env = make_git_env(merger)
worktree = self.temp_dir
merge_commit_sha = None
try:
if self.pull.is_up_to_date():
return ''
from_sha = self.from_sha
to_sha = self.to_sha
repo = self.pull.pull_clone(worktree)
ref = self.pull.pull_fetch(repo)
result = repo.merge(ref, message_header, message_body, no_ff=True,
_env=env)
errcode = result['returncode']
if errcode != 0:
raise RepoMergeError()
result = repo.push('origin', self.pull.to_ref,
_env=dict(CODE_REMOTE_USER=merger.name))
errcode = result['returncode']
if errcode != 0:
raise RepoPushError
merge_commit_sha = self.sha(self.pull.to_ref)
except RepoMergeError:
# FIXME: error msg
pass
except RepoPushError:
# FIXME: error msg
pass
else:
if merge_commit_sha and self.pull.ticket:
self.pull._save_merged(merger.name,
from_sha,
to_sha,
merge_commit_sha)
finally:
shutil.rmtree(worktree)
return merge_commit_sha
def can_merge(self):
import os
import shutil
worktree = self.temp_dir
try:
self.clone(worktree, branch=self.pull.to_ref,
bare=False, shared=True)
repo = ProjectRepo.init(
os.path.join(worktree, '.git'), worktree, bare=False)
ref = self.pull.pull_fetch(repo)
result = repo.merge_commits(self.pull.to_ref, ref)
except KeyError: # dummy result
result = {}
finally:
shutil.rmtree(worktree)
if result.get('has_conflicts', None) is False:
return True
else:
return False
def can_fastforward(self):
if not self.get_commits(self.to_sha, self.from_sha):
return True
def backport_project_name(name):
return name.replace('~', '_')
| douban/code | vilya/models/ngit/repo.py | Python | bsd-3-clause | 21,073 |
def file_1_function_a():
pass
a_number = 7
a_string = 'bleh'
def file_1_function_b():
pass
file_1_function_a.is_setup_function = True
file_1_function_b.is_setup_function = True
| Pysellus/pysellus | spec/fixtures/multiple_files/file_1.py | Python | mit | 190 |
# coding: utf-8
# Maria Clara Dantas, UFCG
# xenia_and_ringroad
n, m = map(int, raw_input().split())
atividades_casas = map(int, raw_input().split())
tempo = 0
posicao = 1
for i in range(len(atividades_casas)):
if posicao < atividades_casas[i]:
tempo += (atividades_casas[i] - posicao)
elif posicao > atividades_casas[i]:
tempo += (n - posicao + atividades_casas[i])
posicao = atividades_casas[i]
print tempo
| clarammdantas/Online-Jugde-Problems | online_judge_solutions/first-ones/xenia_and_ringroad.py | Python | mit | 425 |
import lxml
import requests
def requests_session():
"""
Get a suitable requests session for use in SmartBot.
In particular, this sets the `User-Agent` header to the value of
'SmartBot'.
"""
session = requests.Session()
session.headers.update({"User-Agent": "SmartBot"})
return session
def _check_content_type(response, content_type="text/html"):
return response.headers.get("Content-Type", "").startswith(content_type)
def get_title(url):
"""Get the title of a website."""
try:
page = requests_session().get(url, timeout=5, stream=True)
if page.status_code == 200 and _check_content_type(page):
try:
tree = lxml.html.fromstring(page.text)
except ValueError: # lxml seems to have issues with unicode
tree = lxml.html.fromstring(page.content)
title = tree.cssselect("title")[0].text_content()
return title.strip().replace("\n", "").replace("\r", "")
except requests.exceptions.Timeout:
return "Timeout!"
except IndexError: # no title element
return "No title."
def sprunge(data):
"""Upload the data to `sprunge.us` (a popular plain-text paste bin)."""
payload = {"sprunge": data}
page = requests_session().post("http://sprunge.us", data=payload)
return page.text
| thomasleese/smartbot-old | smartbot/utils/web.py | Python | mit | 1,353 |
from stormed.util import add_method, AmqpError, logger
from stormed.serialization import table2str
from stormed.heartbeat import HeartbeatMonitor
from stormed.frame import status
from stormed.method.codegen import id2class
from stormed.method.constant import id2constant
from stormed.method.codegen.connection import *
@add_method(Start)
def handle(self, conn):
if 'AMQPLAIN' not in self.mechanisms.split(' '):
raise AmqpError("'AMQPLAIN' not in mechanisms")
if 'en_US' not in self.locales.split(' '):
raise AmqpError("'en_US' not in locales")
response = table2str(dict(LOGIN = conn.username,
PASSWORD = conn.password))
client_properties = {'client': 'stormed-amqp'}
start_ok = StartOk(client_properties=client_properties,
mechanism='AMQPLAIN', response=response,
locale='en_US')
conn.write_method(start_ok)
@add_method(Tune)
def handle(self, conn):
conn.frame_max = self.frame_max or 2**16
tune_ok = TuneOk(frame_max = conn.frame_max,
channel_max = self.channel_max,
heartbeat = conn.heartbeat)
conn.write_method(tune_ok)
_open = Open(virtual_host = conn.vhost,
capabilities = '',
insist = 0)
conn.write_method(_open)
@add_method(OpenOk)
def handle(self, conn):
conn.status = status.OPENED
if conn.heartbeat:
HeartbeatMonitor(conn).start()
try:
conn.on_connect()
except Exception:
logger.error('ERROR in on_connect() callback', exc_info=True)
@add_method(CloseOk)
def handle(self, conn):
conn.status = status.CLOSED
conn.invoke_callback()
conn.reset()
class ConnectionError(object):
def __init__(self, reply_code, reply_text, method):
self.reply_code = reply_code
self.reply_text = reply_text
self.method = method
@add_method(Close)
def handle(self, conn):
try:
mod = id2class[self.class_id]
method = getattr(mod, 'id2method')[self.method_id]
except:
method = None
conn.reset()
error_code = id2constant.get(self.reply_code, '')
logger.warn('Connection Hard Error. code=%r. %s', error_code,
self.reply_text)
if conn.on_error:
try:
conn.on_error(ConnectionError(error_code, self.reply_text, method))
except Exception:
logger.error('ERROR in on_error() callback', exc_info=True)
| paolo-losi/stormed-amqp | stormed/method/connection.py | Python | mit | 2,532 |
#!/usr/bin/env python
""" cythonize.py
Cythonize pyx files into C++ files as needed.
Usage: cythonize.py [root]
Checks pyx files to see if they have been changed relative to their
corresponding C++ files. If they have, then runs cython on these files to
recreate the C++ files.
Additionally, checks pxd files and setup.py if they have been changed. If
they have, rebuilds everything.
Change detection based on file hashes stored in JSON format.
For now, this script should be run by developers when changing Cython files
and the resulting C++ files checked in, so that end-users (and Python-only
developers) do not get the Cython dependencies.
Based upon:
https://raw.github.com/dagss/private-scipy-refactor/cythonize/cythonize.py
https://raw.githubusercontent.com/numpy/numpy/master/tools/cythonize.py
Note: this script does not check any of the dependent C++ libraries.
"""
from __future__ import print_function
import os
import sys
import json
import hashlib
import subprocess
import argparse
HASH_FILE = 'cythonize.json'
def process_pyx(fromfile, tofile):
print('Processing %s' % fromfile)
try:
from Cython.Compiler.Version import version as cython_version
from distutils.version import LooseVersion
if LooseVersion(cython_version) < LooseVersion('0.19'):
raise Exception('Require Cython >= 0.19')
except ImportError:
pass
flags = ['--fast-fail']
if tofile.endswith('.cpp'):
flags += ['--cplus']
try:
try:
r = subprocess.call(['cython'] + flags + ['-o', tofile, fromfile])
if r != 0:
raise Exception('Cython failed')
except OSError:
# There are ways of installing Cython that don't result in a cython
# executable on the path, see gh-2397.
r = subprocess.call([sys.executable, '-c',
'import sys; from Cython.Compiler.Main import '
'setuptools_main as main; sys.exit(main())'] + flags +
['-o', tofile, fromfile])
if r != 0:
raise Exception('Cython failed')
except OSError:
raise OSError('Cython needs to be installed')
def preserve_cwd(path, func, *args):
orig_cwd = os.getcwd()
try:
os.chdir(path)
func(*args)
finally:
os.chdir(orig_cwd)
def load_hashes(filename):
try:
return json.load(open(filename))
except (ValueError, IOError):
return {}
def save_hashes(hash_db, filename):
with open(filename, 'w') as f:
f.write(json.dumps(hash_db))
def get_hash(path):
return hashlib.md5(open(path, 'rb').read()).hexdigest()
def hash_changed(base, path, db):
full_path = os.path.normpath(os.path.join(base, path))
return not get_hash(full_path) == db.get(full_path)
def hash_add(base, path, db):
full_path = os.path.normpath(os.path.join(base, path))
db[full_path] = get_hash(full_path)
def process(base, filename, db):
root, ext = os.path.splitext(filename)
if ext in ['.pyx', '.cpp']:
if hash_changed(base, filename, db) or not os.path.isfile(os.path.join(base, root + '.cpp')):
preserve_cwd(base, process_pyx, root + '.pyx', root + '.cpp')
hash_add(base, root + '.cpp', db)
hash_add(base, root + '.pyx', db)
def check_changes(root, db):
res = False
new_db = {}
setup_filename = 'setup.py'
hash_add('.', setup_filename, new_db)
if hash_changed('.', setup_filename, db):
res = True
for base, _, files in os.walk(root):
for filename in files:
if filename.endswith('.pxd'):
hash_add(base, filename, new_db)
if hash_changed(base, filename, db):
res = True
if res:
db.clear()
db.update(new_db)
return res
def run(root):
db = load_hashes(HASH_FILE)
try:
check_changes(root, db)
for base, _, files in os.walk(root):
for filename in files:
process(base, filename, db)
finally:
save_hashes(db, HASH_FILE)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Cythonize pyx files into C++ files as needed')
parser.add_argument('root', help='root directory')
args = parser.parse_args()
run(args.root)
| syllog1sm/cymem | bin/cythonize.py | Python | mit | 4,402 |
'''Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
'''
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
from .. import backend as K
def random_rotation(x, rg, row_index=1, col_index=2, channel_index=0,
fill_mode='nearest', cval=0.):
theta = np.pi / 180 * np.random.uniform(-rg, rg)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_index], x.shape[col_index]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_index=1, col_index=2, channel_index=0,
fill_mode='nearest', cval=0.):
h, w = x.shape[row_index], x.shape[col_index]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval)
return x
def random_shear(x, intensity, row_index=1, col_index=2, channel_index=0,
fill_mode='nearest', cval=0.):
shear = np.random.uniform(-intensity, intensity)
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_index], x.shape[col_index]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_index=1, col_index=2, channel_index=0,
fill_mode='nearest', cval=0.):
if len(zoom_range) != 2:
raise Exception('zoom_range should be a tuple or list of two floats. '
'Received arg: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_index], x.shape[col_index]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval)
return x
def random_barrel_transform(x, intensity):
# TODO
pass
def random_channel_shift(x, intensity, channel_index=0):
x = np.rollaxis(x, channel_index, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [np.clip(x_channel + np.random.uniform(-intensity, intensity), min_x, max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_index+1)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x, transform_matrix, channel_index=0, fill_mode='nearest', cval=0.):
x = np.rollaxis(x, channel_index, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(x_channel, final_affine_matrix,
final_offset, order=0, mode=fill_mode, cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_index+1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, dim_ordering='default', scale=True):
from PIL import Image
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
if dim_ordering == 'th':
x = x.transpose(1, 2, 0)
if scale:
x += max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return Image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return Image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise Exception('Unsupported channel number: ', x.shape[2])
def img_to_array(img, dim_ordering='default'):
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
if dim_ordering not in ['th', 'tf']:
raise Exception('Unknown dim_ordering: ', dim_ordering)
# image has dim_ordering (height, width, channel)
x = np.asarray(img, dtype='float32')
if len(x.shape) == 3:
if dim_ordering == 'th':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if dim_ordering == 'th':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise Exception('Unsupported image shape: ', x.shape)
return x
def load_img(path, grayscale=False, target_size=None):
from PIL import Image
img = Image.open(path)
if grayscale:
img = img.convert('L')
else: # Ensure 3 channel even when loaded image is grayscale
img = img.convert('RGB')
if target_size:
img = img.resize((target_size[1], target_size[0]))
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png'):
return [os.path.join(directory, f) for f in os.listdir(directory)
if os.path.isfile(os.path.join(directory, f)) and re.match('([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
'''Generate minibatches with
real-time data augmentation.
# Arguments
featurewise_center: set input mean to 0 over the dataset.
samplewise_center: set each sample mean to 0.
featurewise_std_normalization: divide inputs by std of the dataset.
samplewise_std_normalization: divide each input by its std.
zca_whitening: apply ZCA whitening.
rotation_range: degrees (0 to 180).
width_shift_range: fraction of total width.
height_shift_range: fraction of total height.
shear_range: shear intensity (shear angle in radians).
zoom_range: amount of zoom. if scalar z, zoom will be randomly picked
in the range [1-z, 1+z]. A sequence of two can be passed instead
to select this range.
channel_shift_range: shift range for each channels.
fill_mode: points outside the boundaries are filled according to the
given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default
is 'nearest'.
cval: value used for points outside the boundaries when fill_mode is
'constant'. Default is 0.
horizontal_flip: whether to randomly flip images horizontally.
vertical_flip: whether to randomly flip images vertically.
rescale: rescaling factor. If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided (before applying
any other transformation).
dim_ordering: 'th' or 'tf'. In 'th' mode, the channels dimension
(the depth) is at index 1, in 'tf' mode it is at index 3.
It defaults to the `image_dim_ordering` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "th".
'''
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
dim_ordering='default'):
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
self.__dict__.update(locals())
self.mean = None
self.std = None
self.principal_components = None
self.rescale = rescale
if dim_ordering not in {'tf', 'th'}:
raise Exception('dim_ordering should be "tf" (channel after row and '
'column) or "th" (channel before row and column). '
'Received arg: ', dim_ordering)
self.dim_ordering = dim_ordering
if dim_ordering == 'th':
self.channel_index = 1
self.row_index = 2
self.col_index = 3
if dim_ordering == 'tf':
self.channel_index = 3
self.row_index = 1
self.col_index = 2
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise Exception('zoom_range should be a float or '
'a tuple or list of two floats. '
'Received arg: ', zoom_range)
def flow(self, X, y=None, batch_size=32, shuffle=True, seed=None,
save_to_dir=None, save_prefix='', save_format='jpeg'):
return NumpyArrayIterator(
X, y, self,
batch_size=batch_size, shuffle=shuffle, seed=seed,
dim_ordering=self.dim_ordering,
save_to_dir=save_to_dir, save_prefix=save_prefix, save_format=save_format)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None, save_prefix='', save_format='jpeg'):
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
dim_ordering=self.dim_ordering,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir, save_prefix=save_prefix, save_format=save_format)
def standardize(self, x):
if self.rescale:
x *= self.rescale
# x is a single image, so it doesn't have image number at index 0
img_channel_index = self.channel_index - 1
if self.samplewise_center:
x -= np.mean(x, axis=img_channel_index, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, axis=img_channel_index, keepdims=True) + 1e-7)
if self.featurewise_center:
x -= self.mean
if self.featurewise_std_normalization:
x /= (self.std + 1e-7)
if self.zca_whitening:
flatx = np.reshape(x, (x.size))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, (x.shape[0], x.shape[1], x.shape[2]))
return x
def random_transform(self, x):
# x is a single image, so it doesn't have image number at index 0
img_row_index = self.row_index - 1
img_col_index = self.col_index - 1
img_channel_index = self.channel_index - 1
# use composition of homographies to generate final transform that needs to be applied
if self.rotation_range:
theta = np.pi / 180 * np.random.uniform(-self.rotation_range, self.rotation_range)
else:
theta = 0
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
if self.height_shift_range:
tx = np.random.uniform(-self.height_shift_range, self.height_shift_range) * x.shape[img_row_index]
else:
tx = 0
if self.width_shift_range:
ty = np.random.uniform(-self.width_shift_range, self.width_shift_range) * x.shape[img_col_index]
else:
ty = 0
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
if self.shear_range:
shear = np.random.uniform(-self.shear_range, self.shear_range)
else:
shear = 0
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(self.zoom_range[0], self.zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = np.dot(np.dot(np.dot(rotation_matrix, translation_matrix), shear_matrix), zoom_matrix)
h, w = x.shape[img_row_index], x.shape[img_col_index]
transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_index,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x, self.channel_shift_range, img_channel_index)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_index)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_index)
# TODO:
# channel-wise normalization
# barrel/fisheye
return x
def fit(self, X,
augment=False,
rounds=1,
seed=None):
'''Required for featurewise_center, featurewise_std_normalization
and zca_whitening.
# Arguments
X: Numpy array, the data to fit on.
augment: whether to fit on randomly augmented samples
rounds: if `augment`,
how many augmentation passes to do over the data
seed: random seed.
'''
X = np.copy(X)
if augment:
aX = np.zeros(tuple([rounds * X.shape[0]] + list(X.shape)[1:]))
for r in range(rounds):
for i in range(X.shape[0]):
aX[i + r * X.shape[0]] = self.random_transform(X[i])
X = aX
if self.featurewise_center:
self.mean = np.mean(X, axis=0)
X -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(X, axis=0)
X /= (self.std + 1e-7)
if self.zca_whitening:
flatX = np.reshape(X, (X.shape[0], X.shape[1] * X.shape[2] * X.shape[3]))
sigma = np.dot(flatX.T, flatX) / flatX.shape[1]
U, S, V = linalg.svd(sigma)
self.principal_components = np.dot(np.dot(U, np.diag(1. / np.sqrt(S + 10e-7))), U.T)
class Iterator(object):
def __init__(self, N, batch_size, shuffle, seed):
self.N = N
self.batch_size = batch_size
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_generator = self._flow_index(N, batch_size, shuffle, seed)
def reset(self):
self.batch_index = 0
def _flow_index(self, N, batch_size=32, shuffle=False, seed=None):
# ensure self.batch_index is 0
self.reset()
while 1:
if self.batch_index == 0:
index_array = np.arange(N)
if shuffle:
if seed is not None:
np.random.seed(seed + self.total_batches_seen)
index_array = np.random.permutation(N)
current_index = (self.batch_index * batch_size) % N
if N >= current_index + batch_size:
current_batch_size = batch_size
self.batch_index += 1
else:
current_batch_size = N - current_index
self.batch_index = 0
self.total_batches_seen += 1
yield (index_array[current_index: current_index + current_batch_size],
current_index, current_batch_size)
def __iter__(self):
# needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
class NumpyArrayIterator(Iterator):
def __init__(self, X, y, image_data_generator,
batch_size=32, shuffle=False, seed=None,
dim_ordering='default',
save_to_dir=None, save_prefix='', save_format='jpeg'):
if y is not None and len(X) != len(y):
raise Exception('X (images tensor) and y (labels) '
'should have the same length. '
'Found: X.shape = %s, y.shape = %s' % (np.asarray(X).shape, np.asarray(y).shape))
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
self.X = X
self.y = y
self.image_data_generator = image_data_generator
self.dim_ordering = dim_ordering
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(X.shape[0], batch_size, shuffle, seed)
def next(self):
# for python 2.x.
# Keeps under lock only the mechanism which advances
# the indexing of each batch
# see http://anandology.com/blog/using-iterators-and-generators/
with self.lock:
index_array, current_index, current_batch_size = next(self.index_generator)
# The transformation of images is not under thread lock so it can be done in parallel
batch_x = np.zeros(tuple([current_batch_size] + list(self.X.shape)[1:]))
for i, j in enumerate(index_array):
x = self.X[j]
x = self.image_data_generator.random_transform(x.astype('float32'))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i in range(current_batch_size):
img = array_to_img(batch_x[i], self.dim_ordering, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix,
index=current_index + i,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
if self.y is None:
return batch_x
batch_y = self.y[index_array]
return batch_x, batch_y
class DirectoryIterator(Iterator):
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
dim_ordering='default',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None, save_prefix='', save_format='jpeg'):
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.dim_ordering = dim_ordering
if self.color_mode == 'rgb':
if self.dim_ordering == 'tf':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.dim_ordering == 'tf':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp'}
# first, count the number of samples and classes
self.nb_sample = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.nb_class = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
for subdir in classes:
subpath = os.path.join(directory, subdir)
for fname in os.listdir(subpath):
is_valid = False
for extension in white_list_formats:
if fname.lower().endswith('.' + extension):
is_valid = True
break
if is_valid:
self.nb_sample += 1
print('Found %d images belonging to %d classes.' % (self.nb_sample, self.nb_class))
# second, build an index of the images in the different class subfolders
self.filenames = []
self.classes = np.zeros((self.nb_sample,), dtype='int32')
i = 0
for subdir in classes:
subpath = os.path.join(directory, subdir)
for fname in os.listdir(subpath):
is_valid = False
for extension in white_list_formats:
if fname.lower().endswith('.' + extension):
is_valid = True
break
if is_valid:
self.classes[i] = self.class_indices[subdir]
self.filenames.append(os.path.join(subdir, fname))
i += 1
super(DirectoryIterator, self).__init__(self.nb_sample, batch_size, shuffle, seed)
def next(self):
with self.lock:
index_array, current_index, current_batch_size = next(self.index_generator)
# The transformation of images is not under thread lock so it can be done in parallel
batch_x = np.zeros((current_batch_size,) + self.image_shape)
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname), grayscale=grayscale, target_size=self.target_size)
x = img_to_array(img, dim_ordering=self.dim_ordering)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i in range(current_batch_size):
img = array_to_img(batch_x[i], self.dim_ordering, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix,
index=current_index + i,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype('float32')
elif self.class_mode == 'categorical':
batch_y = np.zeros((len(batch_x), self.nb_class), dtype='float32')
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
| kemaswill/keras | keras/preprocessing/image.py | Python | mit | 25,315 |
import pytest
@pytest.mark.online
class TestMyEpisodes:
"""Uses test account at MyEpisodes, username and password are 'flexget'"""
config = """
tasks:
test:
mock:
- title: the.simpsons.S10E10.hdtv
all_series: yes
myepisodes:
username: flexget
password: flexget
"""
@pytest.mark.skip(reason="Test myepisodes (DISABLED) -- account locked?")
def test_myepisodes_id(self, execute_task):
task = execute_task('test')
entry = task.find_entry('accepted', title='the.simpsons.S10E10.hdtv')
assert entry, 'entry not present'
# It's tough to verify the marking worked properly, at least check that myepisodes_id is populated
assert entry['myepisodes_id'] == '10', 'myepisodes_id should be 10 for The Simpsons'
| Flexget/Flexget | flexget/tests/test_myepisodes.py | Python | mit | 859 |
"""2005_叫地主接口"""
import clr, sys
from action import *
from lang import Lang
clr.AddReference('ZyGames.Framework.Game')
clr.AddReference('ZyGames.Doudizhu.Lang')
clr.AddReference('ZyGames.Doudizhu.Model')
clr.AddReference('ZyGames.Doudizhu.Bll')
from ZyGames.Framework.Game.Service import *
from ZyGames.Doudizhu.Lang import *
from ZyGames.Doudizhu.Model import *
from ZyGames.Doudizhu.Bll.Logic import *
class UrlParam(HttpParam):
def __init__(self):
HttpParam.__init__(self)
self.op = 0
class ActionResult(DataResult):
def __init__(self):
DataResult.__init__(self)
def getUrlElement(httpGet, parent):
urlParam = UrlParam()
if httpGet.Contains("op"):
urlParam.op = httpGet.GetIntValue("op")
else:
urlParam.Result = False
return urlParam
def takeAction(urlParam, parent):
actionResult = ActionResult()
user = parent.Current.User
table = GameRoom.Current.GetTableData(user)
if not table or not user:
parent.ErrorCode = Lang.getLang("ErrorCode")
parent.ErrorInfo = Lang.getLang("LoadError")
actionResult.Result = False
return actionResult
if table.IsCallEnd:
parent.ErrorCode = Lang.getLang("ErrorCode")
parent.ErrorInfo = Lang.getLang("St2005_CalledIsEnd")
actionResult.Result = False
return actionResult
position = GameTable.Current.GetUserPosition(user, table)
if not position:
parent.ErrorCode = Lang.getLang("ErrorCode")
parent.ErrorInfo = Lang.getLang("LoadError")
actionResult.Result = False
return actionResult
if position.IsAI:
position.IsAI = False
GameTable.Current.NotifyAutoAiUser(user.UserId, False)
isCall = urlParam.op == 1 and True or False
GameTable.Current.CallCard(user.Property.PositionId, table, isCall)
GameTable.Current.ReStarTableTimer(table)
return actionResult
def buildPacket(writer, urlParam, actionResult):
return True | wenhulove333/ScutServer | Sample/Doudizhu/Server/src/ZyGames.Doudizhu.HostServer/bin/Debug/Script/PyScript/Action/Action2005.py | Python | mit | 2,064 |
"""Pathname and path-related operations for the Macintosh."""
import os
from stat import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","islink","exists","isdir","isfile",
"walk","expanduser","expandvars","normpath","abspath"]
# Normalize the case of a pathname. Dummy in Posix, but <s>.lower() here.
def normcase(path):
return path.lower()
def isabs(s):
"""Return true if a path is absolute.
On the Mac, relative paths begin with a colon,
but as a special case, paths with no colons at all are also relative.
Anything else is absolute (the string up to the first colon is the
volume name)."""
return ':' in s and s[0] != ':'
def join(s, *p):
path = s
for t in p:
if (not s) or isabs(t):
path = t
continue
if t[:1] == ':':
t = t[1:]
if ':' not in path:
path = ':' + path
if path[-1:] != ':':
path = path + ':'
path = path + t
return path
def split(s):
"""Split a pathname into two parts: the directory leading up to the final
bit, and the basename (the filename, without colons, in that directory).
The result (s, t) is such that join(s, t) yields the original argument."""
if ':' not in s: return '', s
colon = 0
for i in range(len(s)):
if s[i] == ':': colon = i + 1
path, file = s[:colon-1], s[colon:]
if path and not ':' in path:
path = path + ':'
return path, file
def splitext(p):
"""Split a path into root and extension.
The extension is everything starting at the last dot in the last
pathname component; the root is everything before that.
It is always true that root + ext == p."""
root, ext = '', ''
for c in p:
if c == ':':
root, ext = root + ext + c, ''
elif c == '.':
if ext:
root, ext = root + ext, c
else:
ext = c
elif ext:
ext = ext + c
else:
root = root + c
return root, ext
def splitdrive(p):
"""Split a pathname into a drive specification and the rest of the
path. Useful on DOS/Windows/NT; on the Mac, the drive is always
empty (don't use the volume name -- it doesn't have the same
syntactic and semantic oddities as DOS drive letters, such as there
being a separate current directory per drive)."""
return '', p
# Short interfaces to split()
def dirname(s): return split(s)[0]
def basename(s): return split(s)[1]
def isdir(s):
"""Return true if the pathname refers to an existing directory."""
try:
st = os.stat(s)
except os.error:
return 0
return S_ISDIR(st[ST_MODE])
# Get size, mtime, atime of files.
def getsize(filename):
"""Return the size of a file, reported by os.stat()."""
st = os.stat(filename)
return st[ST_SIZE]
def getmtime(filename):
"""Return the last modification time of a file, reported by os.stat()."""
st = os.stat(filename)
return st[ST_MTIME]
def getatime(filename):
"""Return the last access time of a file, reported by os.stat()."""
st = os.stat(filename)
return st[ST_ATIME]
def islink(s):
"""Return true if the pathname refers to a symbolic link.
Always false on the Mac, until we understand Aliases.)"""
return 0
def isfile(s):
"""Return true if the pathname refers to an existing regular file."""
try:
st = os.stat(s)
except os.error:
return 0
return S_ISREG(st[ST_MODE])
def exists(s):
"""Return true if the pathname refers to an existing file or directory."""
try:
st = os.stat(s)
except os.error:
return 0
return 1
# Return the longest prefix of all list elements.
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
prefix = m[0]
for item in m:
for i in range(len(prefix)):
if prefix[:i+1] != item[:i+1]:
prefix = prefix[:i]
if i == 0: return ''
break
return prefix
def expandvars(path):
"""Dummy to retain interface-compatibility with other operating systems."""
return path
def expanduser(path):
"""Dummy to retain interface-compatibility with other operating systems."""
return path
norm_error = 'macpath.norm_error: path cannot be normalized'
def normpath(s):
"""Normalize a pathname. Will return the same result for
equivalent paths."""
if ":" not in s:
return ":"+s
comps = s.split(":")
i = 1
while i < len(comps)-1:
if comps[i] == "" and comps[i-1] != "":
if i > 1:
del comps[i-1:i+1]
i = i - 1
else:
# best way to handle this is to raise an exception
raise norm_error, 'Cannot use :: immedeately after volume name'
else:
i = i + 1
s = ":".join(comps)
# remove trailing ":" except for ":" and "Volume:"
if s[-1] == ":" and len(comps) > 2 and s != ":"*len(s):
s = s[:-1]
return s
def walk(top, func, arg):
"""Directory tree walk.
For each directory under top (including top itself),
func(arg, dirname, filenames) is called, where
dirname is the name of the directory and filenames is the list
of files (and subdirectories etc.) in the directory.
The func may modify the filenames list, to implement a filter,
or to impose a different order of visiting."""
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
path = join(os.getcwd(), path)
return normpath(path)
| ai-ku/langvis | jython-2.1/Lib/macpath.py | Python | mit | 6,265 |
from csv import DictReader
from datetime import datetime, timedelta
from collections import defaultdict
import cPickle as pickle
from math import exp, log, sqrt
import random, gc
from util import read_dump, write_dump, cache, read_tsv, convert_ts, data, next_row, get_category
import argparse, ast, re, json
def filter_row(row, data_type, sr):
object_type = int(row["ObjectType"])
if object_type != 3:
return False
y = int(row.get("IsClick", 0))
if data_type == 0 and y == 0 and random.random() > sr:
return False
return True
def calc_ctr(x, y):
avg_ctr = 0.0060281
return int(round((x + avg_ctr * 10) * 100.0 / (y + 10)))
def log_trans(x):
return int(round(log(x + 1)))
def get_user_info():
user_info_map = {}
for t, row in read_tsv("data/UserInfo.tsv"):
for k in row:
row[k] = int(row[k])
uid = row["UserID"]
del row["UserID"]
user_info_map[uid] = row
return user_info_map
def trans_ad_info(ad_info):
if int(ad_info["IsContext"]) == 0:
return None
trans_keys = [
"CategoryID",
"Price",
"Params",
"Title",
]
del_keys = ["AdID", "IsContext", "_id", "LocationID",]
for k in del_keys:
if k in ad_info:
del ad_info[k]
for key in trans_keys:
val = ad_info[key]
if key == "Price":
if val == "":
pass
else:
ad_info[key] = float(ad_info[key])
elif key == "Params":
params = ad_info[key]
params = ast.literal_eval(params) if params else {}
for par_key in params:
params[par_key] = unicode(params[par_key], "utf-8")
val = tuple([hash_val(0, (k, v)) for k, v in params.items()])
if len(val) == 0:
val = (-1,)
ad_info[key] = val
elif key == "Title":
if not isinstance(ad_info[key], unicode):
ad_info[key] = unicode(ad_info[key], "utf-8")
else:
if val == "":
val = -1
ad_info[key] = int(val)
return ad_info
ad_info_list = []
ad_info_iter = read_tsv("data/AdsInfo.tsv")
def get_ad_info(aid):
while aid - 1 >= len(ad_info_list):
t, row = next(ad_info_iter, (None, None))
if row is None:
break
ad_info_list.append(trans_ad_info(row))
return ad_info_list[aid - 1]
se_params_iter = read_tsv("data/search_params.csv", delimiter=",")
se_param_list = [None]
def get_se_param(sid):
while se_param_list[0] is None or se_param_list[0]["SearchID"] < sid:
t, se_param = next(se_params_iter, (None, None))
se_param["SearchID"] = int(se_param["SearchID"])
params = json.loads(se_param["SearchParams"])
se_param["SearchParams"] = [hash_val(0, (int(k), v)) for (k, v) in params.items()]
se_param_list[0] = se_param
params = [-1,] if se_param_list[0]["SearchID"] != sid else se_param_list[0]["SearchParams"]
return params
ad_price_list = []
ad_price_iter = read_tsv("data/ad_price.tsv", delimiter=" ")
def get_ad_price(aid):
while aid - 1 >= len(ad_price_list):
t, row = next(ad_price_iter, (None, None))
if row is None:
break
price = row["Price"]
price = float(price) if price else ""
ad_price_list.append(price)
return ad_price_list[aid - 1]
def get_features(sinfo, rows, test=False):
feature_map = defaultdict(list)
sid = sinfo["SearchID"]
sinfo["SearchParams"] = get_se_param(sid)
user_cnt_row = next(user_cnt_iter, (None, None))[1]
while int(user_cnt_row["SearchID"]) != sid:
user_cnt_row = next(user_cnt_iter, (None, None))[1]
user_aid_cnt_rows = next(user_aid_cnt_iter, (None, None))[1]
while int(user_aid_cnt_rows[0]["SearchID"]) != sid:
user_aid_cnt_rows = next(user_aid_cnt_iter, (None, None))[1]
user_aid_cnt_dict = {}
for row in user_aid_cnt_rows:
aid = int(row["AdID"])
user_aid_cnt_dict[aid] = row
ad_infos = []
for row in rows:
aid = int(row["AdID"])
row.update(user_aid_cnt_dict[aid])
ad_infos.append(get_ad_info(aid))
uid = int(sinfo["UserID"])
user_info = user_info_map.get(uid, {"UserAgentID": "",
"UserAgentOSID": "",
"UserDeviceID": "",
"UserAgentFamilyID": ""})
feature_map["user_cnt"] = [user_cnt_row]
feature_map["user_info"] = [user_info]
feature_map["ad_info"] = ad_infos
feature_map["stream_info"] = rows
feature_map["sinfo"] = [sinfo]
return feature_map
def extract_slot_feas(rows, sinfo):
data = map(lambda x: (int(x["Position"]), int(x["ObjectType"]), x), rows)
data.sort()
price_data = []
ot_cnt = defaultdict(int)
all_pos = []
all_ot = []
for i in range(len(data)):
all_pos.append(data[i][0])
all_ot.append(data[i][1])
aid = int(data[i][2]["AdID"])
price_data.append((get_ad_price(aid), i))
i_obt = data[i][1]
ot_cnt[i_obt] += 1
ucnt, lcnt = 0, 0
for j in range(len(data)):
if i == j:
continue
j_obt = data[j][1]
if j_obt == 2:
if i < j:
lcnt += 1
else:
ucnt += 1
data[i][2]["hl_lcnt"] = lcnt
data[i][2]["hl_ucnt"] = ucnt
for k in range(1, 4):
v = ot_cnt[k]
sinfo["ot%s_cnt"%k] = v
sinfo["record_cnt"] = len(rows)
sinfo["pos_type"] = hash_val(0, tuple(all_pos))
sinfo["pos_ot_type"] = hash_val(0, tuple(all_ot))
price_data.sort()
avg_price, avg_cnt = 0, 0
for p, i in price_data:
if p != "":
avg_price += p
avg_cnt += 1
data[i][2]["price_pos"] = i
else:
data[i][2]["price_pos"] = -1
if avg_cnt == 0 or avg_price <= 0:
pass
else:
avg_price /= avg_cnt
for p, i in price_data:
if not p:
ratio = -1
elif avg_price <= 0:
ratio = -2
else:
ratio = int(round((p / avg_price) * 100))
data[i][2]["price_ratio"] = ratio
def stream_info_func(vs, name=False):
keys = ["AdID",
"Position",
"HistCTR",
"hl_lcnt",
"hl_ucnt",
"clk_cnt",
"show_cnt",
"t_show_cnt",
"price_pos",
"price_ratio",
]
for v in vs[0]:
if name:
yield keys
else:
x = {}
for k in keys:
if k == "HistCTR":
val = v[k]
if val != "":
val = int(round(float(val) * 1000))
elif k in ("pos_show_cnt",):
val = log_trans(int(v[k]))
else:
val = v[k]
x[k] = val
x["u_aid_ctr"] = calc_ctr(int(x["clk_cnt"]), int(x["show_cnt"]))
# x["u_pos_ctr"] = calc_ctr(int(x["pos_clk_cnt"]), int(x["pos_show_cnt"]))
yield x
def sinfo_func(vs, name=False):
keys = [
"IPID",
"UserID",
"IsUserLoggedOn",
"SearchQuery",
"SearchParams",
"ot1_cnt",
"ot2_cnt",
"ot3_cnt",
"record_cnt",
"pos_type",
"pos_ot_type",
"s_LocationID",
"s_CategoryID",
]
for v in vs[0]:
if name:
yield keys
else:
x = {}
for k in keys:
if k == "SearchQuery":
query = unicode(v["SearchQuery"], "utf-8")
val = map(lambda x : hash_val(0, x), query.split())
if len(val) == 0:
val = [-1,]
else:
val = v[k]
x[k] = val
# date_str = v["SearchDate"]
# d = datetime.strptime(date_str, "%Y-%m-%d %H:%M:%S.0")
# x["hour"] = d.hour
# x["weekday"] = d.weekday()
yield x
def user_info_func(vs, name=False):
for v in vs[0]:
if name:
yield v.keys()
else:
x = {}
for k in v:
val = v[k]
x[k] = val
yield x
def ad_info_func(vs, name):
keys = ["CategoryID",
"Price",
"Params",
]
for v in vs[0]:
if name:
yield keys
else:
x = {}
for k in keys:
val = v[k]
if k == "Price" and val != "":
val = int(round(log(val + 1)))
x[k] = val
yield x
def ngram(query_word):
sz = len(query_word)
res = []
for i in range(sz - 1):
res.append(u"%s %s"%(query_word[i], query_word[i + 1]))
return res
def calc_sim(qw, tw):
cnt = 0
min_pos = 10000
match_w = []
for w in qw:
if w in tw:
match_w.append(w)
cnt += 1
min_pos = min(min_pos, tw.index(w))
ratio = int(round((cnt* 1.0 / len(qw)) * 100))
return {"cnt": cnt,
"pos": min_pos,
"ratio": ratio,
"match_w": match_w}
def query_feas(query_word, ad_info, name=False):
if name:
return ["qe_w_cnt", "qe_w_ratio", "qe_w_pos",
"qe_ng_cnt", "qe_ng_ratio", "qe_ng_min_pos", "t_match"]
x = {}
title = ad_info["Title"].split()
x["title_len"] = len(title)
if " ".join(query_word) in ad_info["Title"]:
x["t_match"] = 1
else:
x["t_match"] = 0
title = ad_info["Title"]
title_val = map(lambda x : hash_val(0, x), title.split())
x["title"] = title_val if title_val else [-1,]
if len(query_word) == 0:
x["qe_w_cnt"] = -1
x["qe_w_ratio"] = -1
x["qe_w_pos"] = -1
else:
sim = calc_sim(query_word, title)
x["qe_w_cnt"] = sim["cnt"]
x["qe_w_ratio"] = sim["ratio"]
x["qe_w_pos"] = sim["pos"]
x["match_w"] = sim["match_w"]
qw_ngram = ngram(query_word)
if len(qw_ngram) == 0:
x["qe_ng_cnt"] = -1
x["qe_ng_ratio"] = -1
x["qe_ng_min_pos"] = -1
else:
title_ngram = ngram(title)
sim = calc_sim(qw_ngram, title_ngram)
x["qe_ng_cnt"] = sim["cnt"]
x["qe_ng_ratio"] = sim["ratio"]
x["qe_ng_min_pos"] = sim["pos"]
x["match_ng"] = sim["match_w"]
return x
unmatch_set = set()
def param_feas(se_params, ad_info, name=False):
if name:
return ["par_match", "par_nmatch", "par_miss"]
if len(se_params) == 0:
return [-1, -1, -1]
ad_params = ad_info["Params"]
x = {}
par_match = 0
par_miss = 0
par_nmatch = 0
for par_key, par_v in se_params.items():
ad_v = ad_params.get(par_key)
if par_v == ad_v:
par_match += 1
elif ad_v is not None:
par_nmatch += 1
key = (type(par_v), type(ad_v))
if key not in unmatch_set:
unmatch_set.add(key)
print key
print par_v.encode("utf-8"), u"----", ad_v.encode("utf-8")
else:
par_miss += 1
x["par_match"] = par_match
x["par_nmatch"] = par_nmatch
x["par_miss"] = par_miss
return x
def match_info_func(vs, name):
ad_infos = vs[0]
sinfo = vs[1][0]
s_ca_id = int(sinfo.get("CategoryID"))
s_ca_pid = category_map[s_ca_id]
# se_params = sinfo["Params"]
query = unicode(sinfo["SearchQuery"], "utf-8")
query = query.split() if query else []
keys = [
"ca_match",
"ca_pid_match"
] + query_feas(None, None, True)
# + param_feas(None, None, True)
for ad_info in ad_infos:
if name:
yield keys
else:
x = {}
""" ca_match """
ca_id = int(ad_info.get("CategoryID", -1))
if ca_id == s_ca_id:
x["ca_match"] = ca_id
else:
x["ca_match"] = -1
ca_pid = category_map.get(ca_id, -1)
if ca_pid == s_ca_pid:
x["ca_pid_match"] = ca_pid
else:
x["ca_pid_match"] = -1
x.update(query_feas(query, ad_info))
# x.update(param_feas(se_params, ad_info))
yield x
log_cnt_keys = set(["t_cnt","bf_cnt","af_cnt"])
def user_cnt_func(vs, name):
keys = ["t_cnt","bf_cnt","af_cnt",
"bf_3h_cnt","af_3h_cnt",
"bf_clk_cnt",]
for v in vs[0]:
if name:
yield keys
else:
x = {}
for k in keys:
if k not in v:
continue
val = v[k]
if k in log_cnt_keys:
val = log_trans(int(val))
x[k] = val
x["bf_ctr"] = calc_ctr(int(v["bf_clk_cnt"]), int(v["bf_cnt"]))
yield x
log_cnt_keys = set(["t_cnt","bf_cnt","af_cnt"])
def new_user_cnt_func(vs, name):
keys = ["t_cnt","bf_cnt","af_cnt",
"bf_3h_cnt","af_3h_cnt",
"bf_clk_cnt", "bag"]
for v in vs[0]:
if name:
yield keys
else:
x = {}
for k in keys:
if k not in v:
continue
val = v[k]
if k in log_cnt_keys:
val = log_trans(int(val))
x["new_" + k] = val
x["new_bf_ctr"] = calc_ctr(int(v["bf_clk_cnt"]), int(v["bf_cnt"]))
yield x
extract_func = [
(stream_info_func, ["stream_info"]),
(sinfo_func, ["sinfo"]),
(user_info_func, ["user_info"]),
(ad_info_func, ["ad_info"]),
# (new_user_cnt_func, ["new_user_cnt"]),
(user_cnt_func, ["user_cnt"]),
(match_info_func, ["ad_info", "sinfo"]),
]
def extract(feature_map, name=False):
ins_size = 0
for k, v in feature_map.items():
ins_size = max(ins_size, len(v))
instances = [{} for _ in xrange(ins_size)]
for func, in_keys in extract_func:
vls = map(lambda k:feature_map[k], in_keys)
msize = reduce(lambda x, y: max(x, len(y)), vls, 0)
if msize == 1:
for x in func(vls, name):
for ins in instances:
ins.update(x)
break
else:
for t, x in enumerate(func(vls, name)):
instances[t].update(x)
return instances
def hash_val(t, v, dtype=None, D=22):
if dtype is not None and "xgb" in dtype:
return u"%s:%s"%(t, v)
else:
return (t << D) | (hash(unicode(v)) & ((1 << D) - 1))
def main():
random.seed(args.seed)
xgb_set =set([
"pos_type", "price_pos", "ot1_cnt", "pos_ot_type",
"bf_cnt", "bf_clk_cnt", "u_aid_ctr", "record_cnt",
"show_cnt", "clk_cnt", "t_cnt", "qe_w_pos",
"HistCTR", "qe_ng_min_pos", "t_show_cnt", "bf_ctr",
"ot2_cnt", "Price", "qe_ng_cnt", "title_len",
"hl_ucnt", "price_ratio", "hl_lcnt", "t_match",
"qe_w_ratio", "qe_ng_ratio", "ca_match", "Position",
"bf_3h_cnt", "qe_w_cnt", "af_cnt", "ot3_cnt",
"ca_pid_match", "af_3h_cnt",
])
if args.test:
fh_list = [ open("data/tr_%s.%s"%(args.test, args.type), "w"),
open("data/cv_%s.%s"%(args.test, args.type), "w"),
open("data/te_%s.%s"%(args.test, args.type), "w")]
else:
fh_list = [open("data/tr.%s"%(args.type), "w"),
open("data/cv.%s"%(args.type), "w"),
open("data/te.%s"%(args.type), "w")]
data_iter = data(args.test, maxlines=args.maxl)
print "sr: %s"%args.sr
avg_ctr = defaultdict(lambda : [0, 0])
for line_cnt, (data_type, rows, sinfo) in enumerate(data_iter):
sinfo["s_LocationID"] = int(sinfo["LocationID"])
sinfo["s_CategoryID"] = int(sinfo["CategoryID"])
extract_slot_feas(rows, sinfo)
rows = filter(lambda x: filter_row(x, data_type, sr=args.sr), rows)
if not rows:
continue
feature_map = get_features(sinfo, rows, data_type > 0)
instances = extract(feature_map)
if line_cnt == 0:
for k, feas in feature_map.items():
print "-" * 80
print k
print feas[0].keys()
feas_name = sorted(instances[0].keys())
print len(feas_name), feas_name
if args.sz is not None:
write_dump("feas_name.dump", feas_name)
elif args.test:
write_dump("feas_name%s.dump"%args.test, feas_name)
else:
write_dump("feas_name.dump", feas_name)
fh = fh_list[data_type]
for ins_map, row in zip(instances, rows):
y = int(row.get("IsClick", 0))
avg_ctr[data_type][0] += y
avg_ctr[data_type][1] += 1
ins = []
for kt, k in enumerate(feas_name):
if "xgb" in args.type and k not in xgb_set:
continue
feas = ins_map[k]
if line_cnt == 0:
print kt, k, type(feas), feas
if isinstance(feas, list) or isinstance(feas, tuple):
for f in feas:
ins.append(hash_val(kt + 1, f, args.type))
else:
ins.append(hash_val(kt + 1, feas, args.type))
fh.write(unicode(y) + " " + " ".join(map(unicode, ins)) + "\n")
for key, value in avg_ctr.items():
print "%s, %s"%(key, value[0] * 1. / value[1])
for fh in fh_list:
fh.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--test', type=int, default=0)
parser.add_argument('--mongo', type=int, default=0)
parser.add_argument('--sz', type=int, default=None)
parser.add_argument('--maxl', type=int, default=1e6)
parser.add_argument('--type', type=str, default="ins")
parser.add_argument('--sr', type=float, default=0.1)
parser.add_argument('--seed', type=int, default=9)
args = parser.parse_args()
if args.mongo:
from pymongo import MongoClient
import functools32 as functools
client = MongoClient('localhost', 27017)
db = client.test
@functools.lru_cache(maxsize=1000000)
def get_ad_info(aid):
ad_info = db.ad_info.find_one({"AdID": aid})
return trans_ad_info(ad_info)
user_info_map = get_user_info()
category_map = get_category()
user_cnt_iter = read_tsv("data/user_cnt.csv", delimiter=",")
user_aid_cnt_iter = next_row(read_tsv("data/user_aid_cnt.csv", delimiter=","))
main()
| Gzsiceberg/kaggle-avito | ins2/gen_data.py | Python | mit | 19,154 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PolicyAssignment(Model):
"""The policy definition.
:param display_name: The display name of the policy assignment.
:type display_name: str
:param policy_definition_id: The ID of the policy definition.
:type policy_definition_id: str
:param scope: The scope for the policy assignment.
:type scope: str
:param id: The ID of the policy assignment.
:type id: str
:param type: The type of the policy assignment.
:type type: str
:param name: The name of the policy assignment.
:type name: str
"""
_attribute_map = {
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'policy_definition_id': {'key': 'properties.policyDefinitionId', 'type': 'str'},
'scope': {'key': 'properties.scope', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(self, display_name=None, policy_definition_id=None, scope=None, id=None, type=None, name=None):
self.display_name = display_name
self.policy_definition_id = policy_definition_id
self.scope = scope
self.id = id
self.type = type
self.name = name
| SUSE/azure-sdk-for-python | azure-mgmt-resource/azure/mgmt/resource/policy/v2016_04_01/models/policy_assignment.py | Python | mit | 1,773 |
#!/usr/bin/env python
# 2015 Copyright (C) White
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from white.orm import Backend
from white.model import Pair
from flask.json import dumps
import re
class StorageService(object):
def __init__(self):
self.pair_repo = Backend('storage')
def site_meta(self):
return self.pair_repo.find('system')
def update_site_meta(self, sitename, description, site_page,
posts_per_page, auto_published_comments, comment_moderation_keys):
meta = self.site_meta()
config = meta.json_value()
try:
sitename = sitename or sitename.strip()
if sitename:
config['sitename'] = sitename
description = description or description.strip()
if description:
config['description'] = description
site_page = int(site_page)
if site_page >= 0:
config['site_page'] = site_page
posts_per_page = int(posts_per_page)
if posts_per_page:
config['posts_per_page'] = posts_per_page
auto_published_comments = bool(auto_published_comments)
config['auto_published_comments'] = auto_published_comments
if comment_moderation_keys is not None:
keys = [key.strip() for key in re.split(' +', comment_moderation_keys) if key.strip()]
config['comment_moderation_keys'] = keys
meta.value = dumps(config)
self.pair_repo.update(meta)
return True
except:
return False
| 7anshuai/white | white/domain/storage.py | Python | gpl-2.0 | 2,170 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
gateway tests - Testing the gateway.getObject() and searchObjects() methods
Copyright 2013 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
pytest fixtures used as defined in conftest.py:
- gatewaywrapper
- author_testimg_generated
- author_testimg_tiny
"""
import omero
import omero_ext.uuid as uuid
import time
class TestDeleteObject (object):
def testDeleteAnnotation(self, author_testimg_generated):
image = author_testimg_generated
gateway = image._conn
# create Tag on Image and try to delete Tag
tag = omero.gateway.TagAnnotationWrapper(gateway)
ns_tag = "omero.gateway.test.get_objects.test_delete_annotation_tag"
tag.setNs(ns_tag)
tag.setValue("Test Delete Tag")
tag = image.linkAnnotation(tag)
tagId = tag.getId()
handle = gateway.deleteObjects("Annotation", [tagId])
gateway._waitOnCmd(handle)
assert None == gateway.getObject("Annotation", tagId)
def testDeleteImage(self, gatewaywrapper, author_testimg_generated):
image = author_testimg_generated
imageId = image.getId()
project = gatewaywrapper.getTestProject()
projectId = project.getId()
ns = "omero.gateway.test.get_objects.test_delete_image_comment"
ns_tag = "omero.gateway.test.get_objects.test_delete_image_tag"
# create Comment
ann = omero.gateway.CommentAnnotationWrapper(gatewaywrapper.gateway)
ann.setNs(ns)
ann.setValue("Test Comment")
ann = image.linkAnnotation(ann)
# create Tag
tag = omero.gateway.TagAnnotationWrapper(gatewaywrapper.gateway)
tag.setNs(ns_tag)
tag.setValue("Test Tag")
tag = image.linkAnnotation(tag)
# check the Comment
assert gatewaywrapper.gateway.getObject("Annotation", ann.id) != None
assert gatewaywrapper.gateway.getObject("Annotation", tag.id) != None
# check Image, delete (wait) and check
assert gatewaywrapper.gateway.getObject("Image", imageId) != None
handle = gatewaywrapper.gateway.deleteObjects("Image", [imageId])
gatewaywrapper.gateway._waitOnCmd(handle)
assert gatewaywrapper.gateway.getObject("Image", imageId) == None
# Comment should be deleted but not the Tag (becomes orphan)
assert gatewaywrapper.gateway.getObject("Annotation", ann.id) == None
assert gatewaywrapper.gateway.getObject("Annotation", tag.id) != None
# Add the tag to project and delete (with Tags)
assert gatewaywrapper.gateway.getObject("Project", projectId) != None
project.linkAnnotation(tag)
datasetIds = [d.getId() for d in project.listChildren()]
assert len(datasetIds) > 0
handle = gatewaywrapper.gateway.deleteObjects("Project", [projectId], deleteAnns=True, deleteChildren=True)
gatewaywrapper.gateway._waitOnCmd(handle)
assert gatewaywrapper.gateway.getObject("Project", projectId) == None
assert gatewaywrapper.gateway.getObject("Annotation", tag.id) is None # Tag should be gone
# check datasets gone too
for dId in datasetIds:
assert gatewaywrapper.gateway.getObject("Dataset", dId) is None
class TestFindObject (object):
def testFindProject(self, gatewaywrapper):
gatewaywrapper.loginAsAuthor()
project = gatewaywrapper.getTestProject()
pName = project.getName()
findProjects = list (gatewaywrapper.gateway.getObjects("Project", None, attributes={"name":pName}) )
assert len(findProjects) > 0, "Did not find Project by name"
for p in findProjects:
assert p.getName() == pName, "All projects should have queried name"
def testFindExperimenter(self, gatewaywrapper, author_testimg_tiny):
omeName = author_testimg_tiny.getOwnerOmeName()
group = author_testimg_tiny.getDetails().getGroup()
groupName = group.getName()
gatewaywrapper.loginAsAdmin()
# findObjects
findAuthor = list (gatewaywrapper.gateway.getObjects("Experimenter", None, attributes={"omeName":omeName}) )
assert len(findAuthor) == 1, "Did not find Experimenter by omeName"
assert findAuthor[0].omeName == omeName
# findObject
author = gatewaywrapper.gateway.getObject("Experimenter", None, attributes={"omeName":omeName})
assert author != None
assert author.omeName == omeName
# find group
grp = gatewaywrapper.gateway.getObject("ExperimenterGroup", None, attributes={"name":groupName})
assert grp != None
assert grp.getName() == groupName
def testFindAnnotation(self, gatewaywrapper):
gatewaywrapper.loginAsAuthor()
# start by deleting any tag created by this method that may have been left behind
tag_value = "FindThisTag"
find_ns = "omero.gateway.test.test_find_annotations"
find_tag = gatewaywrapper.gateway.getObjects("Annotation", attributes={"textValue":tag_value,
"ns":find_ns})
for t in find_tag:
gatewaywrapper.gateway.deleteObjectDirect(t._obj)
# create Tag
tag = omero.gateway.TagAnnotationWrapper(gatewaywrapper.gateway)
tag.setNs(find_ns)
tag.setValue(tag_value)
tag.save()
tagId = tag.getId()
# findObject by name
find_tag = gatewaywrapper.gateway.getObject("Annotation", attributes={"textValue":tag_value})
assert find_tag != None
assert find_tag.getValue() == tag_value
# find by namespace
find_tag = gatewaywrapper.gateway.getObject("Annotation", attributes={"ns":find_ns})
assert find_tag != None
assert find_tag.getNs() == find_ns
# find by text value
find_tag = gatewaywrapper.gateway.getObject("TagAnnotation", attributes={"textValue":tag_value})
assert find_tag != None
assert find_tag.getValue() == tag_value
# create some other annotations... (not linked!)
longAnn = omero.gateway.LongAnnotationWrapper(gatewaywrapper.gateway)
longAnn.setValue(12345)
longAnn.save()
longId = longAnn.getId()
boolAnn = omero.gateway.BooleanAnnotationWrapper(gatewaywrapper.gateway)
boolAnn.setValue(True)
boolAnn.save()
boolId = boolAnn.getId()
commAnn = omero.gateway.CommentAnnotationWrapper(gatewaywrapper.gateway)
commAnn.setValue("This is a blitz gatewaytest Comment.")
commAnn.save()
commId = commAnn.getId()
fileAnn = omero.gateway.FileAnnotationWrapper(gatewaywrapper.gateway)
# An original file object needs to be linked to the annotation or it will
# fail to be loaded on getObject(s).
fileObj = omero.model.OriginalFileI(False)
fileObj = omero.gateway.OriginalFileWrapper(gatewaywrapper.gateway, fileObj)
fileObj.setName(omero.rtypes.rstring('a'))
fileObj.setPath(omero.rtypes.rstring('a'))
fileObj.setHash(omero.rtypes.rstring('a'))
fileObj.setSize(omero.rtypes.rlong(0))
fileObj.save()
fileAnn.setFile(fileObj)
fileAnn.save()
fileId = fileAnn.getId()
doubleAnn = omero.gateway.DoubleAnnotationWrapper(gatewaywrapper.gateway)
doubleAnn.setValue(1.23456)
doubleAnn.save()
doubleId = doubleAnn.getId()
termAnn = omero.gateway.TermAnnotationWrapper(gatewaywrapper.gateway)
termAnn.setValue("Metaphase")
termAnn.save()
termId = termAnn.getId()
timeAnn = omero.gateway.TimestampAnnotationWrapper(gatewaywrapper.gateway)
timeAnn.setValue(1000)
timeAnn.save()
timeId = timeAnn.getId()
# list annotations of various types - check they include ones from above
tags = list( gatewaywrapper.gateway.getObjects("TagAnnotation") )
for t in tags:
assert t.OMERO_TYPE == tag.OMERO_TYPE
assert tagId in [t.getId() for t in tags]
longs = list( gatewaywrapper.gateway.getObjects("LongAnnotation") )
for l in longs:
assert l.OMERO_TYPE == longAnn.OMERO_TYPE
assert longId in [l.getId() for l in longs]
bools = list( gatewaywrapper.gateway.getObjects("BooleanAnnotation") )
for b in bools:
assert b.OMERO_TYPE == boolAnn.OMERO_TYPE
assert boolId in [b.getId() for b in bools]
comms = list( gatewaywrapper.gateway.getObjects("CommentAnnotation") )
for c in comms:
assert c.OMERO_TYPE == commAnn.OMERO_TYPE
assert commId in [c.getId() for c in comms]
files = list( gatewaywrapper.gateway.getObjects("FileAnnotation") )
for f in files:
assert f.OMERO_TYPE == fileAnn.OMERO_TYPE
assert fileId in [f.getId() for f in files]
doubles = list( gatewaywrapper.gateway.getObjects("DoubleAnnotation") )
for d in doubles:
assert d.OMERO_TYPE == doubleAnn.OMERO_TYPE
assert doubleId in [d.getId() for d in doubles]
terms = list( gatewaywrapper.gateway.getObjects("TermAnnotation") )
for t in terms:
assert t.OMERO_TYPE == termAnn.OMERO_TYPE
assert termId in [t.getId() for t in terms]
times = list( gatewaywrapper.gateway.getObjects("TimestampAnnotation") )
for t in times:
assert t.OMERO_TYPE == timeAnn.OMERO_TYPE
assert timeId in [t.getId() for t in times]
# delete what we created
gatewaywrapper.gateway.deleteObjectDirect(longAnn._obj) # direct delete
assert gatewaywrapper.gateway.getObject("Annotation", longId) == None
gatewaywrapper.gateway.deleteObjectDirect(boolAnn._obj)
assert gatewaywrapper.gateway.getObject("Annotation", boolId) == None
gatewaywrapper.gateway.deleteObjectDirect(fileAnn._obj)
assert gatewaywrapper.gateway.getObject("Annotation", fileId) == None
gatewaywrapper.gateway.deleteObjectDirect(commAnn._obj)
assert gatewaywrapper.gateway.getObject("Annotation", commId) == None
gatewaywrapper.gateway.deleteObjectDirect(tag._obj)
assert gatewaywrapper.gateway.getObject("Annotation", tagId) == None
class TestGetObject (object):
def testSearchObjects(self, gatewaywrapper):
gatewaywrapper.loginAsAuthor()
# search for Projects
pros = list( gatewaywrapper.gateway.searchObjects(["Project"], "weblitz") )
for p in pros:
#assert p.getId() in projectIds
assert p.OMERO_CLASS == "Project", "Should only return Projects"
# P/D/I is default objects to search
# pdis = list( gatewaywrapper.gateway.simpleSearch("weblitz") ) # method removed from blitz gateway
#pdis.sort(key=lambda r: "%s%s"%(r.OMERO_CLASS, r.getId()) )
pdiResult = list( gatewaywrapper.gateway.searchObjects(None, "weblitz") )
pdiResult.sort(key=lambda r: "%s%s"%(r.OMERO_CLASS, r.getId()) )
# can directly check that sorted lists are the same
#for r1, r2 in zip(pdis, pdiResult):
# assert r1.OMERO_CLASS == r2.OMERO_CLASS
# assert r1.getId() == r2.getId()
def testListProjects(self, gatewaywrapper):
gatewaywrapper.loginAsAuthor()
# params limit query by owner
params = omero.sys.Parameters()
params.theFilter = omero.sys.Filter()
# should be no Projects owned by root (in the current group)
params.theFilter.ownerId = omero.rtypes.rlong(0) # owned by 'root'
pros = gatewaywrapper.gateway.getObjects("Project", None, params)
assert len(list(pros)) == 0, "Should be no Projects owned by root"
# filter by current user should get same as above.
params.theFilter.ownerId = omero.rtypes.rlong(gatewaywrapper.gateway.getEventContext().userId) # owned by 'author'
pros = list( gatewaywrapper.gateway.getObjects("Project", None, params) )
projects = list( gatewaywrapper.gateway.listProjects() )
assert len(pros) == len(projects) # check unordered lists are the same length & ids
projectIds = [p.getId() for p in projects]
for p in pros:
assert p.getId() in projectIds
def testListExperimentersAndGroups(self, gatewaywrapper):
gatewaywrapper.loginAsAuthor()
# experimenters
# experimenters = list( gatewaywrapper.gateway.listExperimenters() ) # removed from blitz gateway
exps = list( gatewaywrapper.gateway.getObjects("Experimenter") ) # all experimenters
#self.assertEqual(len(exps), len(experimenters)) # check unordered lists are the same length & ids
#eIds = [e.getId() for e in experimenters]
for e in exps:
#assert e.getId() in eIds
for groupExpMap in e.copyGroupExperimenterMap(): # check iQuery has loaded groups
assert e.id == groupExpMap.child.id.val
# returns all experimenters except current user - now moved to webclient_gateway
#allBarOne = list( gatewaywrapper.gateway.getExperimenters() )
#assert len(allBarOne)+1 == len(exps)
#for e in allBarOne:
# assert e.getId() in eIds
# groups
#groups = list( gatewaywrapper.gateway.listGroups() ) # now removed from blitz gateway.
gps = list( gatewaywrapper.gateway.getObjects("ExperimenterGroup") )
for grp in gps:
grpExpMap = grp.copyGroupExperimenterMap()
#self.assertEqual(len(gps), len(groups)) # check unordered lists are the same length & ids
#gIds = [g.getId() for g in gps]
#for g in groups:
# assert g.getId() in gIds
# uses gateway.getObjects("ExperimenterGroup") - check this doesn't throw
colleagues = gatewaywrapper.gateway.listColleagues()
for e in colleagues:
cName = e.getOmeName()
# check we can find some groups
exp = gatewaywrapper.gateway.getObject("Experimenter", attributes={'omeName': gatewaywrapper.USER.name})
for groupExpMap in exp.copyGroupExperimenterMap():
gName = groupExpMap.parent.name.val
gId = groupExpMap.parent.id.val
findG = gatewaywrapper.gateway.getObject("ExperimenterGroup", attributes={'name': gName})
assert gId == findG.id, "Check we found the same group"
def testGetExperimenter(self, gatewaywrapper):
gatewaywrapper.loginAsAuthor()
noExp = gatewaywrapper.gateway.getObject("Experimenter", attributes={'omeName': "Dummy Fake Name"})
assert noExp == None, "Should not find any matching experimenter"
findExp = gatewaywrapper.gateway.getObject("Experimenter", attributes={'omeName': gatewaywrapper.USER.name})
exp = gatewaywrapper.gateway.getObject("Experimenter", findExp.id) # uses iQuery
assert exp.omeName == findExp.omeName
# check groupExperimenterMap loaded for exp
groupIds = []
for groupExpMap in exp.copyGroupExperimenterMap():
assert findExp.id == groupExpMap.child.id.val
groupIds.append(groupExpMap.parent.id.val)
#for groupExpMap in experimenter.copyGroupExperimenterMap():
# assert findExp.id == groupExpMap.child.id.val
groupGen = gatewaywrapper.gateway.getObjects("ExperimenterGroup", groupIds)
#gGen = gatewaywrapper.gateway.getExperimenterGroups(groupIds) # removed from blitz gateway
groups = list(groupGen)
#gs = list(gGen)
assert len(groups) == len(groupIds)
for g in groups:
assert g.getId() in groupIds
for m in g.copyGroupExperimenterMap(): # check exps are loaded
ex = m.child
def testGetAnnotations(self, gatewaywrapper, author_testimg_tiny):
obj = author_testimg_tiny
dataset = gatewaywrapper.getTestDataset()
ns = "omero.gateway.test.get_objects.test_get_annotations_comment"
ns_tag = "omero.gateway.test.get_objects.test_get_annotations_tag"
# create Comment
ann = omero.gateway.CommentAnnotationWrapper(gatewaywrapper.gateway)
ann.setNs(ns)
ann.setValue("Test Comment")
ann = obj.linkAnnotation(ann)
# create Tag
tag = omero.gateway.TagAnnotationWrapper(gatewaywrapper.gateway)
tag.setNs(ns_tag)
tag.setValue("Test Tag")
tag = obj.linkAnnotation(tag)
dataset.linkAnnotation(tag)
# get the Comment
annotation = gatewaywrapper.gateway.getObject("CommentAnnotation", ann.id)
assert "Test Comment" == annotation.textValue
assert ann.OMERO_TYPE == annotation.OMERO_TYPE
# test getObject throws exception if more than 1 returned
threw = True
try:
gatewaywrapper.gateway.getObject("Annotation")
threw = False
except:
threw = True
assert threw, "getObject() didn't throw exception with >1 result"
# get the Comment and Tag
annGen = gatewaywrapper.gateway.getObjects("Annotation", [ann.id, tag.id])
anns = list(annGen)
assert len(anns) == 2
assert anns[0].ns in [ns, ns_tag]
assert anns[1].ns in [ns, ns_tag]
assert anns[0].OMERO_TYPE != anns[1].OMERO_TYPE
# get all available annotation links on the image
annLinks = gatewaywrapper.gateway.getAnnotationLinks("Image")
for al in annLinks:
assert isinstance(al.getAnnotation(), omero.gateway.AnnotationWrapper)
assert al.parent.__class__ == omero.model.ImageI
# get selected links - On image only
annLinks = gatewaywrapper.gateway.getAnnotationLinks("Image", parent_ids=[obj.getId()])
for al in annLinks:
assert obj.getId() == al.parent.id.val
assert al.parent.__class__ == omero.model.ImageI
# get selected links - On image only
annLinks = gatewaywrapper.gateway.getAnnotationLinks("Image", parent_ids=[obj.getId()])
for al in annLinks:
assert obj.getId() == al.parent.id.val
assert al.parent.__class__ == omero.model.ImageI
# compare with getObjectsByAnnotations
annImages = list( gatewaywrapper.gateway.getObjectsByAnnotations('Image', [tag.getId()]) )
assert obj.getId() in [i.getId() for i in annImages]
# params limit query by owner
params = omero.sys.Parameters()
params.theFilter = omero.sys.Filter()
# should be no links owned by root (in the current group)
params.theFilter.ownerId = omero.rtypes.rlong(0) # owned by 'root'
annLinks = gatewaywrapper.gateway.getAnnotationLinks("Image", parent_ids=[obj.getId()], params=params)
assert len( list(annLinks)) == 0, "No annotations on this image by root"
# links owned by author
eid = gatewaywrapper.gateway.getEventContext().userId
params.theFilter.ownerId = omero.rtypes.rlong(eid) # owned by 'author'
omeName = gatewaywrapper.gateway.getObject("Experimenter", eid).getName()
annLinks = gatewaywrapper.gateway.getAnnotationLinks("Image", parent_ids=[obj.getId()], params=params)
for al in annLinks:
assert al.getOwnerOmeName() == omeName
# all links on Image with specific ns
annLinks = gatewaywrapper.gateway.getAnnotationLinks("Image", ns=ns)
for al in annLinks:
assert al.getAnnotation().ns == ns
# get all uses of the Tag - have to check various types separately
annList = list( gatewaywrapper.gateway.getAnnotationLinks("Image", ann_ids=[tag.id]) )
assert len(annList) == 1
for al in annList:
assert al.getAnnotation().id == tag.id
annList = list( gatewaywrapper.gateway.getAnnotationLinks("Dataset", ann_ids=[tag.id]) )
assert len(annList) == 1
for al in annList:
assert al.getAnnotation().id == tag.id
# remove annotations
obj.removeAnnotations(ns)
dataset.unlinkAnnotations(ns_tag) # unlink tag
obj.removeAnnotations(ns_tag) # delete tag
def testGetImage (self, gatewaywrapper, author_testimg_tiny):
testImage = author_testimg_tiny
# This should return image wrapper
image = gatewaywrapper.gateway.getObject("Image", testImage.id)
pr = image.getProject()
ds = image.getDataset()
# test a few methods that involve lazy loading, rendering etc.
assert image.getSizeZ() == testImage.getSizeZ()
assert image.getSizeY() == testImage.getSizeY()
image.isGreyscaleRenderingModel() # loads rendering engine
testImage.isGreyscaleRenderingModel()
assert image._re.getDefaultZ() == testImage._re.getDefaultZ()
assert image._re.getDefaultT() == testImage._re.getDefaultT()
assert image.getOwnerOmeName == testImage.getOwnerOmeName
def testGetProject (self, gatewaywrapper):
gatewaywrapper.loginAsAuthor()
testProj = gatewaywrapper.getTestProject()
p = gatewaywrapper.gateway.getObject("Project", testProj.getId())
assert testProj.getName() == p.getName()
assert testProj.getDescription() == p.getDescription()
assert testProj.getId() == p.getId()
assert testProj.OMERO_CLASS == p.OMERO_CLASS
assert testProj.countChildren_cached() == p.countChildren_cached()
assert testProj.getOwnerOmeName == p.getOwnerOmeName
def testTraversal (self, author_testimg_tiny):
image = author_testimg_tiny
# This should return image wrapper
pr = image.getProject()
ds = image.getDataset()
assert ds == image.getParent()
assert image.listParents()[0] == image.getParent()
assert ds == image.getParent(withlinks=True)[0]
assert image.getParent(withlinks=True) == image.listParents(withlinks=True)[0]
assert ds.getParent() == pr
assert pr.getParent() == None
assert len(pr.listParents()) == 0
def testListOrphans(self, gatewaywrapper):
gatewaywrapper.loginAsUser()
eid = gatewaywrapper.gateway.getUserId()
imageList = list()
for i in range(0,5):
imageList.append(gatewaywrapper.createTestImage(imageName=(str(uuid.uuid1()))).getName())
findImages = list(gatewaywrapper.gateway.listOrphans("Image"))
assert len(findImages) == 5, "Did not find orphaned images"
for p in findImages:
assert p.getName() in imageList, "All images should have queried name"
params = omero.sys.ParametersI()
params.page(1, 3)
findImagesInPage = list(gatewaywrapper.gateway.listOrphans("Image", eid=eid, params=params))
assert len(findImagesInPage) == 3, "Did not find orphaned images in page"
for p in findImages:
client = p._conn
handle = client.deleteObjects('Image', [p.getId()], deleteAnns=True)
try:
client._waitOnCmd(handle)
finally:
handle.close() | jballanc/openmicroscopy | components/tools/OmeroPy/test/gatewaytest/test_get_objects.py | Python | gpl-2.0 | 23,630 |
#!/usr/bin/env python
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import bdb
import inspect
import logging
import os
import re
import shutil
import sys
import time
import traceback
import warnings
warnings.filterwarnings(action="ignore", message=".*was already imported", category=UserWarning)
warnings.filterwarnings(action="ignore", category=DeprecationWarning)
from lib.utils import versioncheck # this has to be the first non-standard import
from lib.controller.controller import start
from lib.core.common import banner
from lib.core.common import createGithubIssue
from lib.core.common import dataToStdout
from lib.core.common import getSafeExString
from lib.core.common import getUnicode
from lib.core.common import maskSensitiveData
from lib.core.common import setPaths
from lib.core.common import weAreFrozen
from lib.core.data import cmdLineOptions
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import paths
from lib.core.common import unhandledExceptionMessage
from lib.core.exception import SqlmapBaseException
from lib.core.exception import SqlmapShellQuitException
from lib.core.exception import SqlmapSilentQuitException
from lib.core.exception import SqlmapUserQuitException
from lib.core.option import initOptions
from lib.core.option import init
from lib.core.profiling import profile
from lib.core.settings import LEGAL_DISCLAIMER
from lib.core.testing import smokeTest
from lib.core.testing import liveTest
from lib.parse.cmdline import cmdLineParser
from lib.utils.api import setRestAPILog
from lib.utils.api import StdDbOut
def modulePath():
"""
This will get us the program's directory, even if we are frozen
using py2exe
"""
try:
_ = sys.executable if weAreFrozen() else __file__
except NameError:
_ = inspect.getsourcefile(modulePath)
return getUnicode(os.path.dirname(os.path.realpath(_)), encoding=sys.getfilesystemencoding())
def main():
"""
Main function of sqlmap when running from command line.
"""
try:
paths.SQLMAP_ROOT_PATH = modulePath()
try:
os.path.isdir(paths.SQLMAP_ROOT_PATH)
except UnicodeEncodeError:
errMsg = "your system does not properly handle non-ASCII paths. "
errMsg += "Please move the sqlmap's directory to the other location"
logger.error(errMsg)
exit()
setPaths()
# Store original command line options for possible later restoration
cmdLineOptions.update(cmdLineParser().__dict__)
initOptions(cmdLineOptions)
if hasattr(conf, "api"):
# Overwrite system standard output and standard error to write
# to an IPC database
sys.stdout = StdDbOut(conf.taskid, messagetype="stdout")
sys.stderr = StdDbOut(conf.taskid, messagetype="stderr")
setRestAPILog()
conf.showTime = True
dataToStdout("[!] legal disclaimer: %s\n\n" % LEGAL_DISCLAIMER, forceOutput=True)
dataToStdout("[*] starting at %s\n\n" % time.strftime("%X"), forceOutput=True)
init()
if conf.profile:
profile()
elif conf.smokeTest:
smokeTest()
elif conf.liveTest:
liveTest()
else:
start()
except SqlmapUserQuitException:
errMsg = "user quit"
logger.error(errMsg)
except (SqlmapSilentQuitException, bdb.BdbQuit):
pass
except SqlmapShellQuitException:
cmdLineOptions.sqlmapShell = False
except SqlmapBaseException as ex:
errMsg = getSafeExString(ex)
logger.critical(errMsg)
sys.exit(1)
except KeyboardInterrupt:
print
errMsg = "user aborted"
logger.error(errMsg)
except EOFError:
print
errMsg = "exit"
logger.error(errMsg)
except SystemExit:
pass
except:
print
errMsg = unhandledExceptionMessage()
excMsg = traceback.format_exc()
for match in re.finditer(r'File "(.+?)", line', excMsg):
file_ = match.group(1)
file_ = os.path.relpath(file_, os.path.dirname(__file__))
file_ = file_.replace("\\", '/')
file_ = re.sub(r"\.\./", '/', file_).lstrip('/')
excMsg = excMsg.replace(match.group(1), file_)
errMsg = maskSensitiveData(errMsg)
excMsg = maskSensitiveData(excMsg)
logger.critical(errMsg)
kb.stickyLevel = logging.CRITICAL
dataToStdout(excMsg)
createGithubIssue(errMsg, excMsg)
finally:
if conf.get("showTime"):
dataToStdout("\n[*] shutting down at %s\n\n" % time.strftime("%X"), forceOutput=True)
if kb.get("tempDir"):
shutil.rmtree(kb.tempDir, ignore_errors=True)
kb.threadContinue = False
kb.threadException = True
if conf.get("hashDB"):
try:
conf.hashDB.flush(True)
except KeyboardInterrupt:
pass
if cmdLineOptions.get("sqlmapShell"):
cmdLineOptions.clear()
conf.clear()
kb.clear()
main()
if hasattr(conf, "api"):
try:
conf.database_cursor.disconnect()
except KeyboardInterrupt:
pass
if conf.get("dumper"):
conf.dumper.flush()
# Reference: http://stackoverflow.com/questions/1635080/terminate-a-multi-thread-python-program
if conf.get("threads", 0) > 1 or conf.get("dnsServer"):
os._exit(0)
if __name__ == "__main__":
main()
| krintoxi/NoobSec-Toolkit | NoobSecToolkit /tools/sqli/sqlmap.py | Python | gpl-2.0 | 5,756 |
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from librehatti.catalog.models import PurchaseOrder
from librehatti.catalog.models import Category
from librehatti.catalog.models import PurchasedItem
from librehatti.catalog.models import ModeOfPayment
from librehatti.catalog.models import Product
from librehatti.catalog.models import HeaderFooter
from librehatti.catalog.models import Surcharge
from librehatti.catalog.request_change import request_notify
from librehatti.bills.models import QuotedTaxesApplied
from librehatti.bills.models import QuotedOrder
from librehatti.bills.models import QuotedBill
from librehatti.bills.models import QuotedItem
from librehatti.bills.models import QuotedOrderofSession
from librehatti.bills.models import QuotedOrderNote
from librehatti.bills.models import NoteLine
from librehatti.bills.forms import SelectNoteForm
from librehatti.bills.forms import ItemSelectForm
from librehatti.suspense.models import QuotedSuspenseOrder
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
import useraccounts
from django.db.models import Sum
from django.db.models import Max
import simplejson
from django.core.urlresolvers import reverse
from librehatti.voucher.models import FinancialSession
"""
This view calculate taxes on quoted order, bill data
and save those values in database.
"""
@login_required
def quoted_bill_cal(request):
old_post = request.session.get('old_post')
quoted_order_id = request.session.get('quoted_order_id')
quoted_order = QuotedOrder.objects.get(id=quoted_order_id)
quoted_order_obj = QuotedOrder.objects.values('total_discount').\
get(id=quoted_order_id)
quoted_item = QuotedItem.objects.\
filter(quoted_order=quoted_order_id).aggregate(Sum('price'))
total = quoted_item['price__sum']
price_total = total - quoted_order_obj['total_discount']
totalplusdelivery = price_total
surcharge = Surcharge.objects.values('id', 'value', 'taxes_included')
delivery_rate = Surcharge.objects.values('value').\
filter(tax_name='Transportation')
distance = QuotedSuspenseOrder.objects.\
filter(quoted_order=quoted_order_id).aggregate(Sum('distance_estimated'))
if distance['distance_estimated__sum']:
delivery_charges = int(distance['distance_estimated__sum'])*\
delivery_rate[0]['value']
totalplusdelivery = totalplusdelivery + delivery_charges
else:
delivery_charges = 0
for value in surcharge:
surcharge_id = value['id']
surcharge_value = value['value']
surcharge_tax = value['taxes_included']
if surcharge_tax == 1:
taxes = round((totalplusdelivery * surcharge_value)/100)
surcharge_obj = Surcharge.objects.get(id=surcharge_id)
taxes_applied = QuotedTaxesApplied(quoted_order=quoted_order,
surcharge=surcharge_obj, tax=taxes)
taxes_applied.save()
taxes_applied_obj = QuotedTaxesApplied.objects.\
filter(quoted_order=quoted_order_id).aggregate(Sum('tax'))
tax_total = taxes_applied_obj['tax__sum']
grand_total = price_total + tax_total + delivery_charges
amount_received = grand_total
bill = QuotedBill(quoted_order=quoted_order, total_cost=price_total,
total_tax=tax_total, grand_total=grand_total,
delivery_charges=delivery_charges, amount_received=amount_received,
totalplusdelivery=totalplusdelivery)
bill.save()
request.session['old_post'] = old_post
request.session['quoted_order_id'] = quoted_order_id
return HttpResponseRedirect(reverse("librehatti.bills.views.select_note"))
@login_required
def quoted_order_added_success(request):
quoted_order_id = request.session.get('quoted_order_id')
details = QuotedOrder.objects.values('buyer__first_name',\
'buyer__last_name', 'buyer__customer__address__street_address',\
'buyer__customer__title', 'buyer__customer__address__city').\
filter(id=quoted_order_id)[0]
return render(request, 'bills/quoted_success.html', {'details':details,
'quoted_order_id':quoted_order_id})
@login_required
def select_note(request):
quoted_order_id = request.session.get('quoted_order_id')
form = SelectNoteForm(initial={'quoted_order':quoted_order_id})
request_status = request_notify()
return render(request, 'bills/select_note.html', \
{'form':form, 'request':request_status})
@login_required
def select_note_save(request):
if request.method == 'POST':
form = SelectNoteForm(request.POST)
if form.is_valid():
formdata = form.cleaned_data
quoted_order = formdata['quoted_order']
quoted_order_id = QuotedOrder.objects.get(id=quoted_order)
note_list = []
for note in formdata['note_line']:
note_list.append(note)
for value in note_list:
obj = QuotedOrderNote(quoted_order=quoted_order_id, note=value)
obj.save()
return HttpResponseRedirect(\
reverse("librehatti.bills.views.quoted_order_added_success"))
else:
return HttpResponseRedirect(\
reverse("librehatti.bills.views.quoted_order_added_success"))
else:
error_type = "404 Forbidden"
error = "Please again place the order"
temp = {'type': error_type, 'message':error}
return render(request, 'error_page.html', temp)
@login_required
def new_note_line(request):
note_line = request.GET['note_line']
obj = NoteLine(note=note_line)
obj.save()
return HttpResponse('')
@login_required
def delete_note(request):
delete_note = request.GET['delete_note']
delete_note_id = delete_note.split(',')
for id in delete_note_id:
NoteLine.objects.filter(id=id).delete()
return HttpResponse('')
@login_required
def quoted_order_of_session(request):
old_post = request.session.get('old_post')
quoted_order_id = request.session.get('quoted_order_id')
quoted_order = QuotedOrder.objects.get(id=quoted_order_id)
quoted_order_obj = QuotedOrder.objects.values('id', 'date_time').\
get(id=quoted_order_id)
quoted_order_date = quoted_order_obj['date_time']
financialsession = FinancialSession.objects.\
values('id', 'session_start_date', 'session_end_date')
for value in financialsession:
start_date = value['session_start_date']
end_date = value['session_end_date']
if start_date <= quoted_order_date <= end_date:
session_id = value['id']
session = FinancialSession.objects.get(id=session_id)
max_id = QuotedOrderofSession.objects.all().aggregate(Max('id'))
if max_id['id__max'] == None:
obj = QuotedOrderofSession(quoted_order=quoted_order,\
session=session, quoted_order_session=1)
obj.save()
else:
quoted_order_of_session = QuotedOrderofSession.objects.\
values('quoted_order_session', 'session').get(id=max_id['id__max'])
if quoted_order_of_session['session'] == session_id:
obj = QuotedOrderofSession(quoted_order=quoted_order,\
session=session, quoted_order_session=\
quoted_order_of_session['quoted_order_session']+1)
obj.save()
else:
obj = QuotedOrderofSession(quoted_order=quoted_order,\
session=session, quoted_order_session=1)
obj.save()
request.session['old_post'] = old_post
request.session['quoted_order_id'] = quoted_order_id
return HttpResponseRedirect(\
reverse("librehatti.suspense.views.quoted_add_distance")) | s-monisha/LibreHatti | src/librehatti/bills/views.py | Python | gpl-2.0 | 7,704 |
import collections
import ctypes
import hashlib
import os
import platform
import random
import re
import string
import sys
import traceback
from couchpotato.core.helpers.encoding import simplifyString, toSafeString, ss, sp
from couchpotato.core.logger import CPLog
import six
from six.moves import map, zip, filter
log = CPLog(__name__)
def fnEscape(pattern):
return pattern.replace('[', '[[').replace(']', '[]]').replace('[[', '[[]')
def link(src, dst):
if os.name == 'nt':
import ctypes
if ctypes.windll.kernel32.CreateHardLinkW(six.text_type(dst), six.text_type(src), 0) == 0: raise ctypes.WinError()
else:
os.link(src, dst)
def symlink(src, dst):
if os.name == 'nt':
import ctypes
if ctypes.windll.kernel32.CreateSymbolicLinkW(six.text_type(dst), six.text_type(src), 1 if os.path.isdir(src) else 0) in [0, 1280]: raise ctypes.WinError()
else:
os.symlink(src, dst)
def getUserDir():
try:
import pwd
if not os.environ['HOME']:
os.environ['HOME'] = sp(pwd.getpwuid(os.geteuid()).pw_dir)
except:
pass
return sp(os.path.expanduser('~'))
def getDownloadDir():
user_dir = getUserDir()
# OSX
if 'darwin' in platform.platform().lower():
return os.path.join(user_dir, 'Downloads')
if os.name == 'nt':
return os.path.join(user_dir, 'Downloads')
return user_dir
def getDataDir():
# Windows
if os.name == 'nt':
return os.path.join(os.environ['APPDATA'], 'CouchPotato')
user_dir = getUserDir()
# OSX
if 'darwin' in platform.platform().lower():
return os.path.join(user_dir, 'Library', 'Application Support', 'CouchPotato')
# FreeBSD
if 'freebsd' in sys.platform:
return os.path.join('/usr/local/', 'couchpotato', 'data')
# Linux
return os.path.join(user_dir, '.couchpotato')
def isDict(obj):
return isinstance(obj, dict)
def mergeDicts(a, b, prepend_list = False):
assert isDict(a), isDict(b)
dst = a.copy()
stack = [(dst, b)]
while stack:
current_dst, current_src = stack.pop()
for key in current_src:
if key not in current_dst:
current_dst[key] = current_src[key]
else:
if isDict(current_src[key]) and isDict(current_dst[key]):
stack.append((current_dst[key], current_src[key]))
elif isinstance(current_src[key], list) and isinstance(current_dst[key], list):
current_dst[key] = current_src[key] + current_dst[key] if prepend_list else current_dst[key] + current_src[key]
current_dst[key] = removeListDuplicates(current_dst[key])
else:
current_dst[key] = current_src[key]
return dst
def removeListDuplicates(seq):
checked = []
for e in seq:
if e not in checked:
checked.append(e)
return checked
def flattenList(l):
if isinstance(l, list):
return sum(map(flattenList, l))
else:
return l
def md5(text):
return hashlib.md5(ss(text)).hexdigest()
def sha1(text):
return hashlib.sha1(text).hexdigest()
def isLocalIP(ip):
ip = ip.lstrip('htps:/')
regex = '/(^127\.)|(^192\.168\.)|(^10\.)|(^172\.1[6-9]\.)|(^172\.2[0-9]\.)|(^172\.3[0-1]\.)|(^::1)$/'
return re.search(regex, ip) is not None or 'localhost' in ip or ip[:4] == '127.'
def getExt(filename):
return os.path.splitext(filename)[1][1:]
def cleanHost(host, protocol = True, ssl = False, username = None, password = None):
"""Return a cleaned up host with given url options set
Changes protocol to https if ssl is set to True and http if ssl is set to false.
>>> cleanHost("localhost:80", ssl=True)
'https://localhost:80/'
>>> cleanHost("localhost:80", ssl=False)
'http://localhost:80/'
Username and password is managed with the username and password variables
>>> cleanHost("localhost:80", username="user", password="passwd")
'http://user:passwd@localhost:80/'
Output without scheme (protocol) can be forced with protocol=False
>>> cleanHost("localhost:80", protocol=False)
'localhost:80'
"""
if not '://' in host and protocol:
host = ('https://' if ssl else 'http://') + host
if not protocol:
host = host.split('://', 1)[-1]
if protocol and username and password:
try:
auth = re.findall('^(?:.+?//)(.+?):(.+?)@(?:.+)$', host)
if auth:
log.error('Cleanhost error: auth already defined in url: %s, please remove BasicAuth from url.', host)
else:
host = host.replace('://', '://%s:%s@' % (username, password), 1)
except:
pass
host = host.rstrip('/ ')
if protocol:
host += '/'
return host
def getImdb(txt, check_inside = False, multiple = False):
if not check_inside:
txt = simplifyString(txt)
else:
txt = ss(txt)
if check_inside and os.path.isfile(txt):
output = open(txt, 'r')
txt = output.read()
output.close()
try:
ids = re.findall('(tt\d{4,7})', txt)
if multiple:
return removeDuplicate(['tt%07d' % tryInt(x[2:]) for x in ids]) if len(ids) > 0 else []
return 'tt%07d' % tryInt(ids[0][2:])
except IndexError:
pass
return False
def tryInt(s, default = 0):
try: return int(s)
except: return default
def tryFloat(s):
try:
if isinstance(s, str):
return float(s) if '.' in s else tryInt(s)
else:
return float(s)
except: return 0
def natsortKey(string_):
"""See http://www.codinghorror.com/blog/archives/001018.html"""
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_)]
def toIterable(value):
if isinstance(value, collections.Iterable):
return value
return [value]
def getIdentifier(media):
return media.get('identifier') or media.get('identifiers', {}).get('imdb')
def getTitle(media_dict):
try:
try:
return media_dict['title']
except:
try:
return media_dict['titles'][0]
except:
try:
return media_dict['info']['titles'][0]
except:
try:
return media_dict['media']['info']['titles'][0]
except:
log.error('Could not get title for %s', getIdentifier(media_dict))
return None
except:
log.error('Could not get title for library item: %s', media_dict)
return None
def possibleTitles(raw_title):
titles = [
toSafeString(raw_title).lower(),
raw_title.lower(),
simplifyString(raw_title)
]
# replace some chars
new_title = raw_title.replace('&', 'and')
titles.append(simplifyString(new_title))
return removeDuplicate(titles)
def randomString(size = 8, chars = string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
def splitString(str, split_on = ',', clean = True):
l = [x.strip() for x in str.split(split_on)] if str else []
return removeEmpty(l) if clean else l
def removeEmpty(l):
return list(filter(None, l))
def removeDuplicate(l):
seen = set()
return [x for x in l if x not in seen and not seen.add(x)]
def dictIsSubset(a, b):
return all([k in b and b[k] == v for k, v in a.items()])
# Returns True if sub_folder is the same as or inside base_folder
def isSubFolder(sub_folder, base_folder):
if base_folder and sub_folder:
base = sp(os.path.realpath(base_folder)) + os.path.sep
subfolder = sp(os.path.realpath(sub_folder)) + os.path.sep
return os.path.commonprefix([subfolder, base]) == base
return False
# From SABNZBD
re_password = [re.compile(r'(.+){{([^{}]+)}}$'), re.compile(r'(.+)\s+password\s*=\s*(.+)$', re.I)]
def scanForPassword(name):
m = None
for reg in re_password:
m = reg.search(name)
if m: break
if m:
return m.group(1).strip('. '), m.group(2).strip()
under_pat = re.compile(r'_([a-z])')
def underscoreToCamel(name):
return under_pat.sub(lambda x: x.group(1).upper(), name)
def removePyc(folder, only_excess = True, show_logs = True):
folder = sp(folder)
for root, dirs, files in os.walk(folder):
pyc_files = filter(lambda filename: filename.endswith('.pyc'), files)
py_files = set(filter(lambda filename: filename.endswith('.py'), files))
excess_pyc_files = filter(lambda pyc_filename: pyc_filename[:-1] not in py_files, pyc_files) if only_excess else pyc_files
for excess_pyc_file in excess_pyc_files:
full_path = os.path.join(root, excess_pyc_file)
if show_logs: log.debug('Removing old PYC file: %s', full_path)
try:
os.remove(full_path)
except:
log.error('Couldn\'t remove %s: %s', (full_path, traceback.format_exc()))
for dir_name in dirs:
full_path = os.path.join(root, dir_name)
if len(os.listdir(full_path)) == 0:
try:
os.rmdir(full_path)
except:
log.error('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc()))
def getFreeSpace(directories):
single = not isinstance(directories, (tuple, list))
if single:
directories = [directories]
free_space = {}
for folder in directories:
size = None
if os.path.isdir(folder):
if os.name == 'nt':
_, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), \
ctypes.c_ulonglong()
if sys.version_info >= (3,) or isinstance(folder, unicode):
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExW #@UndefinedVariable
else:
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExA #@UndefinedVariable
ret = fun(folder, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free))
if ret == 0:
raise ctypes.WinError()
return [total.value, free.value]
else:
s = os.statvfs(folder)
size = [s.f_blocks * s.f_frsize / (1024 * 1024), (s.f_bavail * s.f_frsize) / (1024 * 1024)]
if single: return size
free_space[folder] = size
return free_space
def getSize(paths):
single = not isinstance(paths, (tuple, list))
if single:
paths = [paths]
total_size = 0
for path in paths:
path = sp(path)
if os.path.isdir(path):
total_size = 0
for dirpath, _, filenames in os.walk(path):
for f in filenames:
total_size += os.path.getsize(sp(os.path.join(dirpath, f)))
elif os.path.isfile(path):
total_size += os.path.getsize(path)
return total_size / 1048576 # MB
def find(func, iterable):
for item in iterable:
if func(item):
return item
return None
def compareVersions(version1, version2):
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
return cmp(normalize(version1), normalize(version2))
| xombiemp/CouchPotatoServer | couchpotato/core/helpers/variable.py | Python | gpl-3.0 | 11,486 |
########################################################################
# $HeadURL$
# File : SSHGEComputingElement.py
# Author : A.T. V.H.
########################################################################
""" Grid Engine Computing Element with remote job submission via ssh/scp and using site
shared area for the job proxy placement
"""
__RCSID__ = "092c1d9 (2011-06-02 15:20:46 +0200) atsareg <atsareg@in2p3.fr>"
from DIRAC.Resources.Computing.SSHComputingElement import SSHComputingElement
from DIRAC.Core.Utilities.Pfn import pfnparse
from DIRAC import S_OK
CE_NAME = 'SSHGE'
MANDATORY_PARAMETERS = [ 'Queue' ]
class SSHGEComputingElement( SSHComputingElement ):
""" The SUN Grid Engine interfacr
"""
#############################################################################
def __init__( self, ceUniqueID ):
""" Standard constructor.
"""
SSHComputingElement.__init__( self, ceUniqueID )
self.ceType = CE_NAME
self.controlScript = 'sgece'
self.mandatoryParameters = MANDATORY_PARAMETERS
def _getJobOutputFiles( self, jobID ):
""" Get output file names for the specific CE
"""
result = pfnparse( jobID )
if not result['OK']:
return result
jobStamp = result['Value']['FileName']
host = result['Value']['Host']
output = '%s/DIRACPilot.o%s' % ( self.batchOutput, jobStamp )
error = '%s/DIRACPilot.e%s' % ( self.batchError, jobStamp )
return S_OK( (jobStamp, host, output, error) )
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
| sposs/DIRAC | Resources/Computing/SSHGEComputingElement.py | Python | gpl-3.0 | 1,662 |
#!/usr/bin/env python
#
# backend for serial IO for POSIX compatible systems, like Linux, OSX
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C) 2001-2016 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
#
# parts based on code from Grant B. Edwards <grante@visi.com>:
# ftp://ftp.visi.com/users/grante/python/PosixSerial.py
#
# references: http://www.easysw.com/~mike/serial/serial.html
# Collection of port names (was previously used by number_to_device which was
# removed.
# - Linux /dev/ttyS%d (confirmed)
# - cygwin/win32 /dev/com%d (confirmed)
# - openbsd (OpenBSD) /dev/cua%02d
# - bsd*, freebsd* /dev/cuad%d
# - darwin (OS X) /dev/cuad%d
# - netbsd /dev/dty%02d (NetBSD 1.6 testing by Erk)
# - irix (IRIX) /dev/ttyf%d (partially tested) names depending on flow control
# - hp (HP-UX) /dev/tty%dp0 (not tested)
# - sunos (Solaris/SunOS) /dev/tty%c (letters, 'a'..'z') (confirmed)
# - aix (AIX) /dev/tty%d
# pylint: disable=abstract-method
import errno
import fcntl
import os
import select
import struct
import sys
import termios
import time
import serial
from serial.serialutil import SerialBase, SerialException, to_bytes, portNotOpenError, writeTimeoutError
class PlatformSpecificBase(object):
BAUDRATE_CONSTANTS = {}
def _set_special_baudrate(self, baudrate):
raise NotImplementedError('non-standard baudrates are not supported on this platform')
def _set_rs485_mode(self, rs485_settings):
raise NotImplementedError('RS485 not supported on this platform')
# try to detect the OS so that a device can be selected...
# this code block should supply a device() and set_special_baudrate() function
# for the platform
plat = sys.platform.lower()
if plat[:5] == 'linux': # Linux (confirmed) # noqa
import array
# baudrate ioctls
TCGETS2 = 0x802C542A
TCSETS2 = 0x402C542B
BOTHER = 0o010000
# RS485 ioctls
TIOCGRS485 = 0x542E
TIOCSRS485 = 0x542F
SER_RS485_ENABLED = 0b00000001
SER_RS485_RTS_ON_SEND = 0b00000010
SER_RS485_RTS_AFTER_SEND = 0b00000100
SER_RS485_RX_DURING_TX = 0b00010000
class PlatformSpecific(PlatformSpecificBase):
BAUDRATE_CONSTANTS = {
0: 0o000000, # hang up
50: 0o000001,
75: 0o000002,
110: 0o000003,
134: 0o000004,
150: 0o000005,
200: 0o000006,
300: 0o000007,
600: 0o000010,
1200: 0o000011,
1800: 0o000012,
2400: 0o000013,
4800: 0o000014,
9600: 0o000015,
19200: 0o000016,
38400: 0o000017,
57600: 0o010001,
115200: 0o010002,
230400: 0o010003,
460800: 0o010004,
500000: 0o010005,
576000: 0o010006,
921600: 0o010007,
1000000: 0o010010,
1152000: 0o010011,
1500000: 0o010012,
2000000: 0o010013,
2500000: 0o010014,
3000000: 0o010015,
3500000: 0o010016,
4000000: 0o010017
}
def _set_special_baudrate(self, baudrate):
# right size is 44 on x86_64, allow for some growth
buf = array.array('i', [0] * 64)
try:
# get serial_struct
fcntl.ioctl(self.fd, TCGETS2, buf)
# set custom speed
buf[2] &= ~termios.CBAUD
buf[2] |= BOTHER
buf[9] = buf[10] = baudrate
# set serial_struct
fcntl.ioctl(self.fd, TCSETS2, buf)
except IOError as e:
raise ValueError('Failed to set custom baud rate ({}): {}'.format(baudrate, e))
def _set_rs485_mode(self, rs485_settings):
buf = array.array('i', [0] * 8) # flags, delaytx, delayrx, padding
try:
fcntl.ioctl(self.fd, TIOCGRS485, buf)
buf[0] |= SER_RS485_ENABLED
if rs485_settings is not None:
if rs485_settings.loopback:
buf[0] |= SER_RS485_RX_DURING_TX
else:
buf[0] &= ~SER_RS485_RX_DURING_TX
if rs485_settings.rts_level_for_tx:
buf[0] |= SER_RS485_RTS_ON_SEND
else:
buf[0] &= ~SER_RS485_RTS_ON_SEND
if rs485_settings.rts_level_for_rx:
buf[0] |= SER_RS485_RTS_AFTER_SEND
else:
buf[0] &= ~SER_RS485_RTS_AFTER_SEND
if rs485_settings.delay_before_tx is not None:
buf[1] = int(rs485_settings.delay_before_tx * 1000)
if rs485_settings.delay_before_rx is not None:
buf[2] = int(rs485_settings.delay_before_rx * 1000)
else:
buf[0] = 0 # clear SER_RS485_ENABLED
fcntl.ioctl(self.fd, TIOCSRS485, buf)
except IOError as e:
raise ValueError('Failed to set RS485 mode: {}'.format(e))
elif plat == 'cygwin': # cygwin/win32 (confirmed)
class PlatformSpecific(PlatformSpecificBase):
BAUDRATE_CONSTANTS = {
128000: 0x01003,
256000: 0x01005,
500000: 0x01007,
576000: 0x01008,
921600: 0x01009,
1000000: 0x0100a,
1152000: 0x0100b,
1500000: 0x0100c,
2000000: 0x0100d,
2500000: 0x0100e,
3000000: 0x0100f
}
elif plat[:6] == 'darwin': # OS X
import array
IOSSIOSPEED = 0x80045402 # _IOW('T', 2, speed_t)
class PlatformSpecific(PlatformSpecificBase):
osx_version = os.uname()[2].split('.')
# Tiger or above can support arbitrary serial speeds
if int(osx_version[0]) >= 8:
def _set_special_baudrate(self, baudrate):
# use IOKit-specific call to set up high speeds
buf = array.array('i', [baudrate])
fcntl.ioctl(self.fd, IOSSIOSPEED, buf, 1)
else:
class PlatformSpecific(PlatformSpecificBase):
pass
# load some constants for later use.
# try to use values from termios, use defaults from linux otherwise
TIOCMGET = getattr(termios, 'TIOCMGET', 0x5415)
TIOCMBIS = getattr(termios, 'TIOCMBIS', 0x5416)
TIOCMBIC = getattr(termios, 'TIOCMBIC', 0x5417)
TIOCMSET = getattr(termios, 'TIOCMSET', 0x5418)
# TIOCM_LE = getattr(termios, 'TIOCM_LE', 0x001)
TIOCM_DTR = getattr(termios, 'TIOCM_DTR', 0x002)
TIOCM_RTS = getattr(termios, 'TIOCM_RTS', 0x004)
# TIOCM_ST = getattr(termios, 'TIOCM_ST', 0x008)
# TIOCM_SR = getattr(termios, 'TIOCM_SR', 0x010)
TIOCM_CTS = getattr(termios, 'TIOCM_CTS', 0x020)
TIOCM_CAR = getattr(termios, 'TIOCM_CAR', 0x040)
TIOCM_RNG = getattr(termios, 'TIOCM_RNG', 0x080)
TIOCM_DSR = getattr(termios, 'TIOCM_DSR', 0x100)
TIOCM_CD = getattr(termios, 'TIOCM_CD', TIOCM_CAR)
TIOCM_RI = getattr(termios, 'TIOCM_RI', TIOCM_RNG)
# TIOCM_OUT1 = getattr(termios, 'TIOCM_OUT1', 0x2000)
# TIOCM_OUT2 = getattr(termios, 'TIOCM_OUT2', 0x4000)
if hasattr(termios, 'TIOCINQ'):
TIOCINQ = termios.TIOCINQ
else:
TIOCINQ = getattr(termios, 'FIONREAD', 0x541B)
TIOCOUTQ = getattr(termios, 'TIOCOUTQ', 0x5411)
TIOCM_zero_str = struct.pack('I', 0)
TIOCM_RTS_str = struct.pack('I', TIOCM_RTS)
TIOCM_DTR_str = struct.pack('I', TIOCM_DTR)
TIOCSBRK = getattr(termios, 'TIOCSBRK', 0x5427)
TIOCCBRK = getattr(termios, 'TIOCCBRK', 0x5428)
CMSPAR = 0o10000000000 # Use "stick" (mark/space) parity
class Serial(SerialBase, PlatformSpecific):
"""\
Serial port class POSIX implementation. Serial port configuration is
done with termios and fcntl. Runs on Linux and many other Un*x like
systems.
"""
def open(self):
"""\
Open port with current settings. This may throw a SerialException
if the port cannot be opened."""
if self._port is None:
raise SerialException("Port must be configured before it can be used.")
if self.is_open:
raise SerialException("Port is already open.")
self.fd = None
# open
try:
self.fd = os.open(self.portstr, os.O_RDWR | os.O_NOCTTY | os.O_NONBLOCK)
except OSError as msg:
self.fd = None
raise SerialException(msg.errno, "could not open port {}: {}".format(self._port, msg))
#~ fcntl.fcntl(self.fd, fcntl.F_SETFL, 0) # set blocking
try:
self._reconfigure_port(force_update=True)
except:
try:
os.close(self.fd)
except:
# ignore any exception when closing the port
# also to keep original exception that happened when setting up
pass
self.fd = None
raise
else:
self.is_open = True
try:
if not self._dsrdtr:
self._update_dtr_state()
if not self._rtscts:
self._update_rts_state()
except IOError as e:
if e.errno == 22: # ignore Invalid argument
pass
else:
raise
self.reset_input_buffer()
self.pipe_abort_read_r, self.pipe_abort_read_w = os.pipe()
self.pipe_abort_write_r, self.pipe_abort_write_w = os.pipe()
fcntl.fcntl(self.pipe_abort_read_r, fcntl.F_SETFL, os.O_NONBLOCK)
fcntl.fcntl(self.pipe_abort_write_r, fcntl.F_SETFL, os.O_NONBLOCK)
def _reconfigure_port(self, force_update=False):
"""Set communication parameters on opened port."""
if self.fd is None:
raise SerialException("Can only operate on a valid file descriptor")
custom_baud = None
vmin = vtime = 0 # timeout is done via select
if self._inter_byte_timeout is not None:
vmin = 1
vtime = int(self._inter_byte_timeout * 10)
try:
orig_attr = termios.tcgetattr(self.fd)
iflag, oflag, cflag, lflag, ispeed, ospeed, cc = orig_attr
except termios.error as msg: # if a port is nonexistent but has a /dev file, it'll fail here
raise SerialException("Could not configure port: {}".format(msg))
# set up raw mode / no echo / binary
cflag |= (termios.CLOCAL | termios.CREAD)
lflag &= ~(termios.ICANON | termios.ECHO | termios.ECHOE |
termios.ECHOK | termios.ECHONL |
termios.ISIG | termios.IEXTEN) # |termios.ECHOPRT
for flag in ('ECHOCTL', 'ECHOKE'): # netbsd workaround for Erk
if hasattr(termios, flag):
lflag &= ~getattr(termios, flag)
oflag &= ~(termios.OPOST | termios.ONLCR | termios.OCRNL)
iflag &= ~(termios.INLCR | termios.IGNCR | termios.ICRNL | termios.IGNBRK)
if hasattr(termios, 'IUCLC'):
iflag &= ~termios.IUCLC
if hasattr(termios, 'PARMRK'):
iflag &= ~termios.PARMRK
# setup baud rate
try:
ispeed = ospeed = getattr(termios, 'B{}'.format(self._baudrate))
except AttributeError:
try:
ispeed = ospeed = self.BAUDRATE_CONSTANTS[self._baudrate]
except KeyError:
#~ raise ValueError('Invalid baud rate: %r' % self._baudrate)
# may need custom baud rate, it isn't in our list.
ispeed = ospeed = getattr(termios, 'B38400')
try:
custom_baud = int(self._baudrate) # store for later
except ValueError:
raise ValueError('Invalid baud rate: {!r}'.format(self._baudrate))
else:
if custom_baud < 0:
raise ValueError('Invalid baud rate: {!r}'.format(self._baudrate))
# setup char len
cflag &= ~termios.CSIZE
if self._bytesize == 8:
cflag |= termios.CS8
elif self._bytesize == 7:
cflag |= termios.CS7
elif self._bytesize == 6:
cflag |= termios.CS6
elif self._bytesize == 5:
cflag |= termios.CS5
else:
raise ValueError('Invalid char len: {!r}'.format(self._bytesize))
# setup stop bits
if self._stopbits == serial.STOPBITS_ONE:
cflag &= ~(termios.CSTOPB)
elif self._stopbits == serial.STOPBITS_ONE_POINT_FIVE:
cflag |= (termios.CSTOPB) # XXX same as TWO.. there is no POSIX support for 1.5
elif self._stopbits == serial.STOPBITS_TWO:
cflag |= (termios.CSTOPB)
else:
raise ValueError('Invalid stop bit specification: {!r}'.format(self._stopbits))
# setup parity
iflag &= ~(termios.INPCK | termios.ISTRIP)
if self._parity == serial.PARITY_NONE:
cflag &= ~(termios.PARENB | termios.PARODD)
elif self._parity == serial.PARITY_EVEN:
cflag &= ~(termios.PARODD)
cflag |= (termios.PARENB)
elif self._parity == serial.PARITY_ODD:
cflag |= (termios.PARENB | termios.PARODD)
elif self._parity == serial.PARITY_MARK and plat[:5] == 'linux':
cflag |= (termios.PARENB | CMSPAR | termios.PARODD)
elif self._parity == serial.PARITY_SPACE and plat[:5] == 'linux':
cflag |= (termios.PARENB | CMSPAR)
cflag &= ~(termios.PARODD)
else:
raise ValueError('Invalid parity: {!r}'.format(self._parity))
# setup flow control
# xonxoff
if hasattr(termios, 'IXANY'):
if self._xonxoff:
iflag |= (termios.IXON | termios.IXOFF) # |termios.IXANY)
else:
iflag &= ~(termios.IXON | termios.IXOFF | termios.IXANY)
else:
if self._xonxoff:
iflag |= (termios.IXON | termios.IXOFF)
else:
iflag &= ~(termios.IXON | termios.IXOFF)
# rtscts
if hasattr(termios, 'CRTSCTS'):
if self._rtscts:
cflag |= (termios.CRTSCTS)
else:
cflag &= ~(termios.CRTSCTS)
elif hasattr(termios, 'CNEW_RTSCTS'): # try it with alternate constant name
if self._rtscts:
cflag |= (termios.CNEW_RTSCTS)
else:
cflag &= ~(termios.CNEW_RTSCTS)
# XXX should there be a warning if setting up rtscts (and xonxoff etc) fails??
# buffer
# vmin "minimal number of characters to be read. 0 for non blocking"
if vmin < 0 or vmin > 255:
raise ValueError('Invalid vmin: {!r}'.format(vmin))
cc[termios.VMIN] = vmin
# vtime
if vtime < 0 or vtime > 255:
raise ValueError('Invalid vtime: {!r}'.format(vtime))
cc[termios.VTIME] = vtime
# activate settings
if force_update or [iflag, oflag, cflag, lflag, ispeed, ospeed, cc] != orig_attr:
termios.tcsetattr(
self.fd,
termios.TCSANOW,
[iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
# apply custom baud rate, if any
if custom_baud is not None:
self._set_special_baudrate(custom_baud)
if self._rs485_mode is not None:
self._set_rs485_mode(self._rs485_mode)
def close(self):
"""Close port"""
if self.is_open:
if self.fd is not None:
os.close(self.fd)
self.fd = None
os.close(self.pipe_abort_read_w)
os.close(self.pipe_abort_read_r)
os.close(self.pipe_abort_write_w)
os.close(self.pipe_abort_write_r)
self.pipe_abort_read_r, self.pipe_abort_read_w = None, None
self.pipe_abort_write_r, self.pipe_abort_write_w = None, None
self.is_open = False
# - - - - - - - - - - - - - - - - - - - - - - - -
@property
def in_waiting(self):
"""Return the number of bytes currently in the input buffer."""
#~ s = fcntl.ioctl(self.fd, termios.FIONREAD, TIOCM_zero_str)
s = fcntl.ioctl(self.fd, TIOCINQ, TIOCM_zero_str)
return struct.unpack('I', s)[0]
# select based implementation, proved to work on many systems
def read(self, size=1):
"""\
Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read.
"""
if not self.is_open:
raise portNotOpenError
read = bytearray()
timeout = self._timeout
while len(read) < size:
try:
start_time = time.time()
ready, _, _ = select.select([self.fd, self.pipe_abort_read_r], [], [], timeout)
if self.pipe_abort_read_r in ready:
os.read(self.pipe_abort_read_r, 1000)
break
# If select was used with a timeout, and the timeout occurs, it
# returns with empty lists -> thus abort read operation.
# For timeout == 0 (non-blocking operation) also abort when
# there is nothing to read.
if not ready:
break # timeout
buf = os.read(self.fd, size - len(read))
# read should always return some data as select reported it was
# ready to read when we get to this point.
if not buf:
# Disconnected devices, at least on Linux, show the
# behavior that they are always ready to read immediately
# but reading returns nothing.
raise SerialException(
'device reports readiness to read but returned no data '
'(device disconnected or multiple access on port?)')
read.extend(buf)
except OSError as e:
# this is for Python 3.x where select.error is a subclass of
# OSError ignore EAGAIN errors. all other errors are shown
if e.errno != errno.EAGAIN and e.errno != errno.EINTR:
raise SerialException('read failed: {}'.format(e))
except select.error as e:
# this is for Python 2.x
# ignore EAGAIN errors. all other errors are shown
# see also http://www.python.org/dev/peps/pep-3151/#select
if e[0] != errno.EAGAIN:
raise SerialException('read failed: {}'.format(e))
if timeout is not None:
timeout -= time.time() - start_time
if timeout <= 0:
break
return bytes(read)
def cancel_read(self):
os.write(self.pipe_abort_read_w, b"x")
def cancel_write(self):
os.write(self.pipe_abort_write_w, b"x")
def write(self, data):
"""Output the given byte string over the serial port."""
if not self.is_open:
raise portNotOpenError
d = to_bytes(data)
tx_len = len(d)
timeout = self._write_timeout
if timeout and timeout > 0: # Avoid comparing None with zero
timeout += time.time()
while tx_len > 0:
try:
n = os.write(self.fd, d)
if timeout == 0:
# Zero timeout indicates non-blocking - simply return the
# number of bytes of data actually written
return n
elif timeout and timeout > 0: # Avoid comparing None with zero
# when timeout is set, use select to wait for being ready
# with the time left as timeout
timeleft = timeout - time.time()
if timeleft < 0:
raise writeTimeoutError
abort, ready, _ = select.select([self.pipe_abort_write_r], [self.fd], [], timeleft)
if abort:
os.read(self.pipe_abort_write_r, 1000)
break
if not ready:
raise writeTimeoutError
else:
assert timeout is None
# wait for write operation
abort, ready, _ = select.select([self.pipe_abort_write_r], [self.fd], [], None)
if abort:
os.read(self.pipe_abort_write_r, 1)
break
if not ready:
raise SerialException('write failed (select)')
d = d[n:]
tx_len -= n
except SerialException:
raise
except OSError as v:
if v.errno != errno.EAGAIN:
raise SerialException('write failed: {}'.format(v))
# still calculate and check timeout
if timeout and timeout - time.time() < 0:
raise writeTimeoutError
return len(data)
def flush(self):
"""\
Flush of file like objects. In this case, wait until all data
is written.
"""
if not self.is_open:
raise portNotOpenError
termios.tcdrain(self.fd)
def reset_input_buffer(self):
"""Clear input buffer, discarding all that is in the buffer."""
if not self.is_open:
raise portNotOpenError
termios.tcflush(self.fd, termios.TCIFLUSH)
def reset_output_buffer(self):
"""\
Clear output buffer, aborting the current output and discarding all
that is in the buffer.
"""
if not self.is_open:
raise portNotOpenError
termios.tcflush(self.fd, termios.TCOFLUSH)
def send_break(self, duration=0.25):
"""\
Send break condition. Timed, returns to idle state after given
duration.
"""
if not self.is_open:
raise portNotOpenError
termios.tcsendbreak(self.fd, int(duration / 0.25))
def _update_break_state(self):
"""\
Set break: Controls TXD. When active, no transmitting is possible.
"""
if self._break_state:
fcntl.ioctl(self.fd, TIOCSBRK)
else:
fcntl.ioctl(self.fd, TIOCCBRK)
def _update_rts_state(self):
"""Set terminal status line: Request To Send"""
if self._rts_state:
fcntl.ioctl(self.fd, TIOCMBIS, TIOCM_RTS_str)
else:
fcntl.ioctl(self.fd, TIOCMBIC, TIOCM_RTS_str)
def _update_dtr_state(self):
"""Set terminal status line: Data Terminal Ready"""
if self._dtr_state:
fcntl.ioctl(self.fd, TIOCMBIS, TIOCM_DTR_str)
else:
fcntl.ioctl(self.fd, TIOCMBIC, TIOCM_DTR_str)
@property
def cts(self):
"""Read terminal status line: Clear To Send"""
if not self.is_open:
raise portNotOpenError
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
return struct.unpack('I', s)[0] & TIOCM_CTS != 0
@property
def dsr(self):
"""Read terminal status line: Data Set Ready"""
if not self.is_open:
raise portNotOpenError
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
return struct.unpack('I', s)[0] & TIOCM_DSR != 0
@property
def ri(self):
"""Read terminal status line: Ring Indicator"""
if not self.is_open:
raise portNotOpenError
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
return struct.unpack('I', s)[0] & TIOCM_RI != 0
@property
def cd(self):
"""Read terminal status line: Carrier Detect"""
if not self.is_open:
raise portNotOpenError
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
return struct.unpack('I', s)[0] & TIOCM_CD != 0
# - - platform specific - - - -
@property
def out_waiting(self):
"""Return the number of bytes currently in the output buffer."""
#~ s = fcntl.ioctl(self.fd, termios.FIONREAD, TIOCM_zero_str)
s = fcntl.ioctl(self.fd, TIOCOUTQ, TIOCM_zero_str)
return struct.unpack('I', s)[0]
def fileno(self):
"""\
For easier use of the serial port instance with select.
WARNING: this function is not portable to different platforms!
"""
if not self.is_open:
raise portNotOpenError
return self.fd
def set_input_flow_control(self, enable=True):
"""\
Manually control flow - when software flow control is enabled.
This will send XON (true) or XOFF (false) to the other device.
WARNING: this function is not portable to different platforms!
"""
if not self.is_open:
raise portNotOpenError
if enable:
termios.tcflow(self.fd, termios.TCION)
else:
termios.tcflow(self.fd, termios.TCIOFF)
def set_output_flow_control(self, enable=True):
"""\
Manually control flow of outgoing data - when hardware or software flow
control is enabled.
WARNING: this function is not portable to different platforms!
"""
if not self.is_open:
raise portNotOpenError
if enable:
termios.tcflow(self.fd, termios.TCOON)
else:
termios.tcflow(self.fd, termios.TCOOFF)
def nonblocking(self):
"""DEPRECATED - has no use"""
import warnings
warnings.warn("nonblocking() has no effect, already nonblocking", DeprecationWarning)
class PosixPollSerial(Serial):
"""\
Poll based read implementation. Not all systems support poll properly.
However this one has better handling of errors, such as a device
disconnecting while it's in use (e.g. USB-serial unplugged).
"""
def read(self, size=1):
"""\
Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read.
"""
if not self.is_open:
raise portNotOpenError
read = bytearray()
poll = select.poll()
poll.register(self.fd, select.POLLIN | select.POLLERR | select.POLLHUP | select.POLLNVAL)
if size > 0:
while len(read) < size:
# print "\tread(): size",size, "have", len(read) #debug
# wait until device becomes ready to read (or something fails)
for fd, event in poll.poll(self._timeout * 1000):
if event & (select.POLLERR | select.POLLHUP | select.POLLNVAL):
raise SerialException('device reports error (poll)')
# we don't care if it is select.POLLIN or timeout, that's
# handled below
buf = os.read(self.fd, size - len(read))
read.extend(buf)
if ((self._timeout is not None and self._timeout >= 0) or
(self._inter_byte_timeout is not None and self._inter_byte_timeout > 0)) and not buf:
break # early abort on timeout
return bytes(read)
class VTIMESerial(Serial):
"""\
Implement timeout using vtime of tty device instead of using select.
This means that no inter character timeout can be specified and that
the error handling is degraded.
Overall timeout is disabled when inter-character timeout is used.
"""
def _reconfigure_port(self, force_update=True):
"""Set communication parameters on opened port."""
super(VTIMESerial, self)._reconfigure_port()
fcntl.fcntl(self.fd, fcntl.F_SETFL, 0) # clear O_NONBLOCK
if self._inter_byte_timeout is not None:
vmin = 1
vtime = int(self._inter_byte_timeout * 10)
else:
vmin = 0
vtime = int(self._timeout * 10)
try:
orig_attr = termios.tcgetattr(self.fd)
iflag, oflag, cflag, lflag, ispeed, ospeed, cc = orig_attr
except termios.error as msg: # if a port is nonexistent but has a /dev file, it'll fail here
raise serial.SerialException("Could not configure port: {}".format(msg))
if vtime < 0 or vtime > 255:
raise ValueError('Invalid vtime: {!r}'.format(vtime))
cc[termios.VTIME] = vtime
cc[termios.VMIN] = vmin
termios.tcsetattr(
self.fd,
termios.TCSANOW,
[iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
def read(self, size=1):
"""\
Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read.
"""
if not self.is_open:
raise portNotOpenError
read = bytearray()
while len(read) < size:
buf = os.read(self.fd, size - len(read))
if not buf:
break
read.extend(buf)
return bytes(read)
| technologiescollege/Blockly-rduino-communication | scripts/Lib/site-packages/serial/serialposix.py | Python | gpl-3.0 | 29,639 |
from django import template
from manage_treemap.views.roles import options_for_permission
from treemap.audit import FieldPermission
from treemap.lib.object_caches import role_field_permissions
register = template.Library()
@register.filter
def photo_permission_level(role):
photo_perms = role_field_permissions(role, None, 'TreePhoto')
if photo_perms:
perm = min([p.permission_level for p in photo_perms])
else:
perm = FieldPermission.READ_ONLY
label = dict(FieldPermission.choices)[perm]
return perm, label
register.filter(options_for_permission)
| maurizi/otm-core | opentreemap/manage_treemap/templatetags/roles.py | Python | agpl-3.0 | 594 |
"""Support for HomematicIP Cloud weather devices."""
import logging
from homematicip.aio.device import (
AsyncWeatherSensor, AsyncWeatherSensorPlus, AsyncWeatherSensorPro)
from homematicip.aio.home import AsyncHome
from homeassistant.components.weather import WeatherEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from . import DOMAIN as HMIPC_DOMAIN, HMIPC_HAPID, HomematicipGenericDevice
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the HomematicIP Cloud weather sensor."""
pass
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry,
async_add_entities) -> None:
"""Set up the HomematicIP weather sensor from a config entry."""
home = hass.data[HMIPC_DOMAIN][config_entry.data[HMIPC_HAPID]].home
devices = []
for device in home.devices:
if isinstance(device, AsyncWeatherSensorPro):
devices.append(HomematicipWeatherSensorPro(home, device))
elif isinstance(device, (AsyncWeatherSensor, AsyncWeatherSensorPlus)):
devices.append(HomematicipWeatherSensor(home, device))
if devices:
async_add_entities(devices)
class HomematicipWeatherSensor(HomematicipGenericDevice, WeatherEntity):
"""representation of a HomematicIP Cloud weather sensor plus & basic."""
def __init__(self, home: AsyncHome, device) -> None:
"""Initialize the weather sensor."""
super().__init__(home, device)
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self._device.label
@property
def temperature(self) -> float:
"""Return the platform temperature."""
return self._device.actualTemperature
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def humidity(self) -> int:
"""Return the humidity."""
return self._device.humidity
@property
def wind_speed(self) -> float:
"""Return the wind speed."""
return self._device.windSpeed
@property
def attribution(self) -> str:
"""Return the attribution."""
return "Powered by Homematic IP"
@property
def condition(self) -> str:
"""Return the current condition."""
if hasattr(self._device, "raining") and self._device.raining:
return 'rainy'
if self._device.storm:
return 'windy'
if self._device.sunshine:
return 'sunny'
return ''
class HomematicipWeatherSensorPro(HomematicipWeatherSensor):
"""representation of a HomematicIP weather sensor pro."""
@property
def wind_bearing(self) -> float:
"""Return the wind bearing."""
return self._device.windDirection
| jnewland/home-assistant | homeassistant/components/homematicip_cloud/weather.py | Python | apache-2.0 | 2,991 |
# Implements twisted-based UNIX domain socket transport
import sys
from twisted.internet import reactor
from pysnmp.carrier.twisted.dgram.base import DgramTwistedTransport
from pysnmp.carrier import error
domainName = snmpLocalDomain = (1, 3, 6, 1, 2, 1, 100, 1, 13)
class UnixTwistedTransport(DgramTwistedTransport):
# AbstractTwistedTransport API
def openClientMode(self, iface=''):
try:
self._lport = reactor.connectUNIXDatagram(iface, self)
except Exception:
raise error.CarrierError(sys.exc_info()[1])
return self
def openServerMode(self, iface=None):
try:
self._lport = reactor.listenUNIXDatagram(iface, self)
except Exception:
raise error.CarrierError(sys.exc_info()[1])
return self
def closeTransport(self):
d = self._lport.stopListening()
d and d.addCallback(lambda x: None)
DgramTwistedTransport.closeTransport(self)
UnixTransport = UnixTwistedTransport
| BoundaryDev/boundary-plugin-mongodb-enterprise-dev | pysnmp/carrier/twisted/dgram/unix.py | Python | apache-2.0 | 1,019 |
input = """
p24|p8|not_p15:-p7,p2,not p25.
p11|not_p20|p8|not_p15:-p24,p2.
p3|p20|p18|p3:-p1,not p22.
not_p5|not_p5|not_p19|p22.
p1|p12|p2|not_p16:-not p23,not p3.
p3|not_p20|p18:-p13,not p22,not p14.
p17|p12|p8|not_p8.
p15|not_p22|p24|not_p12.
not_p21|p20|p18:-p1,not p22.
p24|p9|p24|not_p22:-p12,p23.
not_p2|not_p24|p9:-p1,p12,not p11.
p11|not_p20|not_p1|not_p4:-p24,p2.
p2|p24|p15:-not p23.
p24|p9|p24|p10:-p12,p23.
:-p19,not_p19.
p15|p4|p3|p1:-p12,p5.
p21|p5|p12|not_p20:-p14.
p12|not_p15|p16|p17:-p6,p1.
p1|p14|p2|p11:-not p5,p24.
not_p22|p24|p15:-not p23.
not_p21|p20|p18|p3:-p13,not p22.
p17|p12|p8|p25.
p15|p6:-p19,p5,not p3,not p7.
not_p25|p14|p9|not_p3.
:-p16,not_p16.
not_p23|p7|p21|p5:-p13,not p3.
p1|p15|p15|p11:-not p5,p24.
p6|not_p15|p7|p17:-p6,p1.
p3|p20|p18:-p1,not p22.
p19|p1|p12:-p19,not p23,not p11.
p24|not_p8|p6|not_p22:-p12,p23.
p5|p11|p25|not_p20:-p17,not p7.
p6|p9|p6|not_p22:-p19,p23.
not_p5:-not p5.
p1|p15|p15|not_p18:-not p5,p24.
p6|p7:-p19,not p23,not p3,not p11.
p6|p4|p7|p18:-p9,p1.
p24|p9|p24|not_p22:-p19,p23.
p11|p8|p4|p16.
p21|p22|p11|p25|not_p20:-p17.
not_p20|not_p1|not_p4:-p7,p2,not p25.
not_p21|p15|p22|not_p12.
not_p23|p7|not_p17|p5:-p13,not p3.
p21|p5|p12|not_p20:-p17.
p12|not_p15|p16|p18:-p9,p1.
p15|p11|p24|p3.
p21|p22|p12|p25|not_p20:-p14.
p3|p21|not_p20:-p6,p23,not p4.
p17|p6|not_p19|not_p8.
not_p4:-not p4.
"""
output = """
{p25, p18, p1, not_p5, p17, p15, not_p4, p21, p14, p16}
{p25, p20, p1, not_p5, p17, p15, not_p4, p21, p14, p16}
{p2, p25, not_p5, p17, p15, not_p4, p21, p14, p16}
{p25, p18, p1, not_p5, p17, p15, not_p4, p21, p16, not_p3}
{p25, p20, p1, not_p5, p17, p15, not_p4, p21, p16, not_p3}
{p2, p25, not_p5, p17, p15, not_p4, p21, p16, not_p3}
{p25, p18, p1, not_p5, p17, p15, not_p4, p21, p16, not_p25}
{p25, p20, p1, not_p5, p17, p15, not_p4, p21, p16, not_p25}
{p2, p25, not_p5, p17, p15, not_p4, p21, p16, not_p25}
{p7, p20, p1, not_p5, p17, p15, p9, not_p4, p21, p16}
{p11, p20, p1, not_p5, p17, p15, p9, not_p4, p21, p16, p6}
{p25, p20, p1, not_p5, p17, p15, p9, not_p4, p21, p16, p6}
{p25, p18, p1, not_p5, p17, p15, p9, not_p4, p21, p16}
{p2, p25, not_p5, p17, p15, p9, not_p4, p21, p16}
{not_p20, not_p5, p12, p17, p15, not_p4, p16, not_p3}
{not_p20, p18, p1, not_p5, p17, p15, not_p4, p16, not_p3}
{not_p20, p20, p1, not_p5, p17, p15, not_p4, p16, not_p3}
{p2, not_p20, not_p5, p17, p15, not_p4, p16, not_p3}
{not_p20, not_p5, p12, p17, p15, not_p4, p16, not_p25}
{not_p20, p18, p1, not_p5, p17, p15, not_p4, p16, not_p25}
{not_p20, p20, p1, not_p5, p17, p15, not_p4, p16, not_p25}
{p2, not_p20, not_p5, p17, p15, not_p4, p16, not_p25}
{not_p20, not_p5, p12, p17, p15, not_p4, p14, p16}
{not_p20, p18, p1, not_p5, p17, p15, not_p4, p14, p16}
{not_p20, p20, p1, not_p5, p17, p15, not_p4, p14, p16}
{p2, not_p20, not_p5, p17, p15, not_p4, p14, p16}
{not_p20, p18, p1, not_p5, p17, p15, p9, not_p4, p16}
{not_p20, p20, p1, not_p5, p17, p15, p9, not_p4, p16, p6}
{p7, not_p20, p20, p1, not_p5, p17, p15, p9, not_p4, p16}
{not_p20, not_p5, p12, p17, p15, p9, not_p4, p16}
{p2, not_p20, not_p5, p17, p15, p9, not_p4, p16}
{p25, not_p5, p12, p17, p15, not_p4, p16, not_p3}
{p25, not_p5, p12, p17, p15, not_p4, p16, not_p25}
{p25, not_p5, p12, p17, p15, not_p4, p14, p16}
{p25, not_p5, p12, p17, p15, p9, not_p4, p16}
{p2, p25, not_p5, not_p8, p15, not_p4, p16, not_p3}
{p25, p18, p1, not_p5, not_p8, p15, not_p4, p16, not_p3}
{p25, p20, p1, not_p5, not_p8, p15, not_p4, p16, not_p3}
{not_p5, p12, not_p8, p15, not_p4, p16, not_p3}
{p2, p25, not_p5, not_p8, p15, not_p4, p16, not_p25}
{p25, p18, p1, not_p5, not_p8, p15, not_p4, p16, not_p25}
{p25, p20, p1, not_p5, not_p8, p15, not_p4, p16, not_p25}
{not_p5, p12, not_p8, p15, not_p4, p16, not_p25}
{p25, p18, p1, not_p5, not_p8, p15, not_p4, p21, p14, p16}
{p25, p20, p1, not_p5, not_p8, p15, not_p4, p21, p14, p16}
{p2, p25, not_p5, not_p8, p15, not_p4, p21, p14, p16}
{p25, not_p20, p18, p1, not_p5, not_p8, p15, not_p4, p14, p16}
{p25, not_p20, p20, p1, not_p5, not_p8, p15, not_p4, p14, p16}
{p2, p25, not_p20, not_p5, not_p8, p15, not_p4, p14, p16}
{p25, p18, p1, not_p5, not_p8, p15, p9, not_p4, p16}
{p25, p20, p1, not_p5, not_p8, p15, p9, not_p4, p16, p6}
{p7, p25, p20, p1, not_p5, not_p8, p15, p9, not_p4, p16}
{p2, p25, not_p5, not_p8, p15, p9, not_p4, p16}
{p8, p7, p20, p1, not_p5, not_p8, p15, p9, not_p4, p16}
{not_p5, p12, not_p8, p15, not_p4, p14, p16}
{not_p5, p12, not_p8, p15, p9, not_p4, p16}
{p8, p18, p1, not_p5, p15, p9, not_p4, p16, p6}
{p8, p18, p1, not_p5, p15, not_p4, p16, p6, not_p3}
{p8, p18, p1, not_p5, p15, not_p4, p16, p6, not_p25}
{p8, p18, p1, not_p5, p15, not_p4, p21, p14, p16, p6}
{p8, not_p20, p18, p1, not_p5, p15, not_p4, p14, p16, p6}
{p8, p20, p1, not_p5, p15, p9, not_p4, p16, p6}
{p8, p20, p1, not_p5, p15, not_p4, p16, p6, not_p3}
{p8, p20, p1, not_p5, p15, not_p4, p16, p6, not_p25}
{p8, p20, p1, not_p5, p15, not_p4, p21, p14, p16, p6}
{p8, not_p20, p20, p1, not_p5, p15, not_p4, p14, p16, p6}
{p8, p7, p20, p1, not_p5, not_p19, p15, p9, not_p4, p16}
{not_p5, not_p19, p12, p15, not_p4, p16, not_p3}
{not_p5, p12, p15, not_p4, p16, p6, not_p3}
{not_p5, p12, p15, not_p4, p14, p16, p6}
{not_p5, p12, p15, not_p4, p16, p6, not_p25}
{not_p5, p12, p15, p9, not_p4, p16, p6}
{not_p5, not_p19, p12, p15, not_p4, p14, p16}
{not_p5, not_p19, p12, p15, not_p4, p16, not_p25}
{not_p5, not_p19, p12, p15, p9, not_p4, p16}
{p22, p12, p17, p15, not_p4, p5, p16, not_p3}
{p1, p22, p17, p15, not_p4, p5, p16, not_p3}
{p2, p22, p17, p15, not_p4, p5, p16, not_p3}
{p22, p12, p17, p15, not_p4, p5, p16, not_p25}
{p1, p22, p17, p15, not_p4, p5, p16, not_p25}
{p2, p22, p17, p15, not_p4, p5, p16, not_p25}
{p22, p12, p17, p15, not_p4, p5, p14, p16}
{p1, p22, p17, p15, not_p4, p5, p14, p16}
{p2, p22, p17, p15, not_p4, p5, p14, p16}
{p22, p12, p17, p15, p9, not_p4, p5, p16}
{p1, p22, p17, p15, p9, not_p4, p5, p16, p6}
{p18, p1, p22, p17, p15, p9, not_p4, p5, p16}
{p7, p1, p22, p17, p15, p9, not_p4, p5, p16}
{p2, p22, p17, p15, p9, not_p4, p5, p16}
{p18, p1, not_p19, p17, p15, not_p4, p5, p21, p16, not_p3}
{p20, p1, not_p19, p17, p15, not_p4, p5, p21, p16, not_p3}
{p2, not_p19, p17, p15, not_p4, p5, p21, p16, not_p3}
{not_p5, p12, p17, p15, not_p4, p5, p21, p16, not_p3}
{p18, p1, not_p5, p17, p15, not_p4, p5, p21, p16, not_p3}
{p20, p1, not_p5, p17, p15, not_p4, p5, p21, p16, not_p3}
{p2, not_p5, p17, p15, not_p4, p5, p21, p16, not_p3}
{p25, p18, p1, not_p19, p17, p15, not_p4, p5, p16, not_p3}
{p25, p20, p1, not_p19, p17, p15, not_p4, p5, p16, not_p3}
{p2, p25, not_p19, p17, p15, not_p4, p5, p16, not_p3}
{p25, p18, p1, not_p5, p17, p15, not_p4, p5, p16, not_p3}
{p25, p20, p1, not_p5, p17, p15, not_p4, p5, p16, not_p3}
{p2, p25, not_p5, p17, p15, not_p4, p5, p16, not_p3}
{p18, p1, not_p19, p17, p15, not_p4, p5, p21, p16, not_p25}
{p20, p1, not_p19, p17, p15, not_p4, p5, p21, p16, not_p25}
{p2, not_p19, p17, p15, not_p4, p5, p21, p16, not_p25}
{not_p5, p12, p17, p15, not_p4, p5, p21, p16, not_p25}
{p18, p1, not_p5, p17, p15, not_p4, p5, p21, p16, not_p25}
{p20, p1, not_p5, p17, p15, not_p4, p5, p21, p16, not_p25}
{p2, not_p5, p17, p15, not_p4, p5, p21, p16, not_p25}
{p25, p18, p1, not_p19, p17, p15, not_p4, p5, p16, not_p25}
{p25, p20, p1, not_p19, p17, p15, not_p4, p5, p16, not_p25}
{p2, p25, not_p19, p17, p15, not_p4, p5, p16, not_p25}
{p25, p18, p1, not_p5, p17, p15, not_p4, p5, p16, not_p25}
{p25, p20, p1, not_p5, p17, p15, not_p4, p5, p16, not_p25}
{p2, p25, not_p5, p17, p15, not_p4, p5, p16, not_p25}
{p18, p1, not_p19, p17, p15, not_p4, p5, p21, p14, p16}
{p20, p1, not_p19, p17, p15, not_p4, p5, p21, p14, p16}
{p2, not_p19, p17, p15, not_p4, p5, p21, p14, p16}
{not_p5, p12, p17, p15, not_p4, p5, p21, p14, p16}
{p18, p1, not_p5, p17, p15, not_p4, p5, p21, p14, p16}
{p20, p1, not_p5, p17, p15, not_p4, p5, p21, p14, p16}
{p2, not_p5, p17, p15, not_p4, p5, p21, p14, p16}
{p25, p18, p1, not_p19, p17, p15, not_p4, p5, p14, p16}
{p25, p20, p1, not_p19, p17, p15, not_p4, p5, p14, p16}
{p2, p25, not_p19, p17, p15, not_p4, p5, p14, p16}
{p25, p18, p1, not_p5, p17, p15, not_p4, p5, p14, p16}
{p25, p20, p1, not_p5, p17, p15, not_p4, p5, p14, p16}
{p2, p25, not_p5, p17, p15, not_p4, p5, p14, p16}
{p18, p1, not_p19, p17, p15, p9, not_p4, p5, p21, p16}
{p25, p18, p1, not_p19, p17, p15, p9, not_p4, p5, p16}
{p20, p1, not_p19, p17, p15, p9, not_p4, p5, p21, p16, p6}
{p11, p20, p1, not_p19, p17, p15, p9, not_p4, p5, p16, p6}
{p25, p20, p1, not_p19, p17, p15, p9, not_p4, p5, p16, p6}
{p7, p11, p20, p1, not_p19, p17, p15, p9, not_p4, p5, p16}
{p7, p25, p20, p1, not_p19, p17, p15, p9, not_p4, p5, p16}
{p2, not_p19, p17, p15, p9, not_p4, p5, p21, p16}
{p2, p25, not_p19, p17, p15, p9, not_p4, p5, p16}
{p18, p1, not_p5, p17, p15, p9, not_p4, p5, p21, p16}
{p25, p18, p1, not_p5, p17, p15, p9, not_p4, p5, p16}
{p20, p1, not_p5, p17, p15, p9, not_p4, p5, p21, p16, p6}
{p11, p20, p1, not_p5, p17, p15, p9, not_p4, p5, p16, p6}
{p25, p20, p1, not_p5, p17, p15, p9, not_p4, p5, p16, p6}
{p7, p11, p20, p1, not_p5, p17, p15, p9, not_p4, p5, p16}
{p7, p25, p20, p1, not_p5, p17, p15, p9, not_p4, p5, p16}
{not_p5, p12, p17, p15, p9, not_p4, p5, p21, p16}
{p2, not_p5, p17, p15, p9, not_p4, p5, p21, p16}
{p2, p25, not_p5, p17, p15, p9, not_p4, p5, p16}
{p25, p18, p1, not_p19, not_p8, p15, not_p4, p5, p14, p16}
{p25, p18, p1, not_p5, not_p8, p15, not_p4, p5, p14, p16}
{p25, p20, p1, not_p19, not_p8, p15, not_p4, p5, p14, p16}
{p25, p20, p1, not_p5, not_p8, p15, not_p4, p5, p14, p16}
{p2, p25, not_p19, not_p8, p15, not_p4, p5, p14, p16}
{p2, p25, not_p5, not_p8, p15, not_p4, p5, p14, p16}
{p25, p1, p22, not_p8, p15, not_p4, p5, p14, p16}
{p2, p25, p22, not_p8, p15, not_p4, p5, p14, p16}
{p8, p1, p22, p15, not_p4, p5, p14, p16, p6}
{p8, p25, p18, p1, not_p5, p15, not_p4, p5, p14, p16, p6}
{p8, p25, p20, p1, not_p5, p15, not_p4, p5, p14, p16, p6}
{p8, p20, p1, not_p5, not_p19, p15, p9, p4, p16}
{not_p20, p20, p1, not_p5, p17, p15, p9, p4, p16}
{p11, p20, p1, not_p5, p17, p15, p9, p4, p21, p16}
{p25, p20, p1, not_p5, p17, p15, p9, p4, p21, p16}
{p8, p20, p1, not_p5, not_p8, p15, p9, p4, p16}
{p25, p20, p1, not_p5, not_p8, p15, p9, p4, p16}
{p25, p20, p1, not_p19, p17, p15, p9, p4, p5, p16}
{p25, p20, p1, not_p5, p17, p15, p9, p4, p5, p16}
{p20, p1, not_p19, p17, p15, p9, p4, p5, p21, p16}
{p11, p20, p1, not_p19, p17, p15, p9, p4, p5, p16}
{p20, p1, not_p5, p17, p15, p9, p4, p5, p21, p16}
{p11, p20, p1, not_p5, p17, p15, p9, p4, p5, p16}
{p1, p22, p17, p15, p9, p4, p5, p16}
{not_p20, not_p5, not_p16, p17, p15, p4, p14}
{not_p20, not_p5, p12, p17, p15, p4, p14}
{not_p20, p18, p1, not_p5, p17, p15, p4, p14}
{not_p20, p20, p1, not_p5, p17, p15, p4, p14}
{p2, not_p20, not_p5, p17, p15, p4, p14}
{not_p20, not_p5, not_p16, p17, p15, p4, not_p3}
{not_p20, not_p5, p12, p17, p15, p4, not_p3}
{not_p20, p18, p1, not_p5, p17, p15, p4, not_p3}
{not_p20, p20, p1, not_p5, p17, p15, p4, not_p3}
{p2, not_p20, not_p5, p17, p15, p4, not_p3}
{not_p20, not_p5, not_p16, p17, p15, p4, not_p25}
{not_p20, not_p5, p12, p17, p15, p4, not_p25}
{not_p20, p18, p1, not_p5, p17, p15, p4, not_p25}
{not_p20, p20, p1, not_p5, p17, p15, p4, not_p25}
{p2, not_p20, not_p5, p17, p15, p4, not_p25}
{not_p20, not_p5, not_p16, p17, p15, p9, p4}
{not_p20, p18, p1, not_p5, p17, p15, p9, p4}
{not_p15, not_p20, p20, p1, not_p5, p17, p15, p9, p4}
{not_p20, not_p5, p12, p17, p15, p9, p4}
{p2, not_p20, not_p5, p17, p15, p9, p4}
{not_p15, p11, p20, p1, not_p5, p17, p15, p9, p4, p21}
{p25, not_p5, not_p16, p17, p15, p4, p21, p14}
{p25, p18, p1, not_p5, p17, p15, p4, p21, p14}
{p25, p20, p1, not_p5, p17, p15, p4, p21, p14}
{p2, p25, not_p5, p17, p15, p4, p21, p14}
{p25, not_p5, not_p16, p17, p15, p4, p21, not_p3}
{p25, p18, p1, not_p5, p17, p15, p4, p21, not_p3}
{p25, p20, p1, not_p5, p17, p15, p4, p21, not_p3}
{p2, p25, not_p5, p17, p15, p4, p21, not_p3}
{p25, not_p5, not_p16, p17, p15, p4, p21, not_p25}
{p25, p18, p1, not_p5, p17, p15, p4, p21, not_p25}
{p25, p20, p1, not_p5, p17, p15, p4, p21, not_p25}
{p2, p25, not_p5, p17, p15, p4, p21, not_p25}
{p25, not_p5, not_p16, p17, p15, p9, p4, p21}
{p25, p18, p1, not_p5, p17, p15, p9, p4, p21}
{not_p15, p25, p20, p1, not_p5, p17, p15, p9, p4, p21}
{p2, p25, not_p5, p17, p15, p9, p4, p21}
{p25, not_p5, p12, p17, p15, p4, p14}
{p25, not_p5, p12, p17, p15, p4, not_p3}
{p25, not_p5, p12, p17, p15, p4, not_p25}
{p25, not_p5, p12, p17, p15, p9, p4}
{p25, not_p5, not_p16, not_p8, p15, p4, not_p3}
{p25, p18, p1, not_p5, not_p8, p15, p4, not_p3}
{p25, p20, p1, not_p5, not_p8, p15, p4, not_p3}
{p2, p25, not_p5, not_p8, p15, p4, not_p3}
{not_p5, p12, not_p8, p15, p4, not_p3}
{p25, not_p5, not_p16, not_p8, p15, p4, not_p25}
{p25, p18, p1, not_p5, not_p8, p15, p4, not_p25}
{p25, p20, p1, not_p5, not_p8, p15, p4, not_p25}
{p2, p25, not_p5, not_p8, p15, p4, not_p25}
{not_p5, p12, not_p8, p15, p4, not_p25}
{p25, not_p5, not_p16, not_p8, p15, p4, p21, p14}
{p25, p18, p1, not_p5, not_p8, p15, p4, p21, p14}
{p25, p20, p1, not_p5, not_p8, p15, p4, p21, p14}
{p2, p25, not_p5, not_p8, p15, p4, p21, p14}
{p25, not_p20, not_p5, not_p16, not_p8, p15, p4, p14}
{p25, not_p20, p18, p1, not_p5, not_p8, p15, p4, p14}
{p25, not_p20, p20, p1, not_p5, not_p8, p15, p4, p14}
{p2, p25, not_p20, not_p5, not_p8, p15, p4, p14}
{p25, not_p5, not_p16, not_p8, p15, p9, p4}
{p25, p18, p1, not_p5, not_p8, p15, p9, p4}
{not_p15, p25, p20, p1, not_p5, not_p8, p15, p9, p4}
{p2, p25, not_p5, not_p8, p15, p9, p4}
{p8, not_p15, p20, p1, not_p5, not_p8, p15, p9, p4}
{not_p5, p12, not_p8, p15, p4, p14}
{not_p5, p12, not_p8, p15, p9, p4}
{p8, not_p15, p20, p1, not_p5, not_p19, p15, p9, p4}
{not_p5, not_p19, p12, p15, p4, not_p3}
{not_p5, p12, p15, p4, p6, not_p3}
{not_p5, not_p19, p12, p15, p4, p14}
{not_p5, not_p19, p12, p15, p4, not_p25}
{not_p5, not_p19, p12, p15, p9, p4}
{not_p5, p12, p15, p4, p14, p6}
{not_p5, p12, p15, p4, p6, not_p25}
{not_p5, p12, p15, p9, p4, p6}
{p22, not_p16, p17, p15, p4, p5, p14}
{p22, p12, p17, p15, p4, p5, p14}
{p1, p22, p17, p15, p4, p5, p14}
{p2, p22, p17, p15, p4, p5, p14}
{p22, not_p16, p17, p15, p4, p5, not_p3}
{p22, p12, p17, p15, p4, p5, not_p3}
{p1, p22, p17, p15, p4, p5, not_p3}
{p2, p22, p17, p15, p4, p5, not_p3}
{p22, not_p16, p17, p15, p4, p5, not_p25}
{p22, p12, p17, p15, p4, p5, not_p25}
{p1, p22, p17, p15, p4, p5, not_p25}
{p2, p22, p17, p15, p4, p5, not_p25}
{p22, not_p16, p17, p15, p9, p4, p5}
{p22, p12, p17, p15, p9, p4, p5}
{p18, p1, p22, p17, p15, p9, p4, p5}
{not_p15, p1, p22, p17, p15, p9, p4, p5}
{p2, p22, p17, p15, p9, p4, p5}
{not_p19, not_p16, p17, p15, p4, p5, p21, not_p3}
{p18, p1, not_p19, p17, p15, p4, p5, p21, not_p3}
{p20, p1, not_p19, p17, p15, p4, p5, p21, not_p3}
{p2, not_p19, p17, p15, p4, p5, p21, not_p3}
{not_p5, not_p16, p17, p15, p4, p5, p21, not_p3}
{not_p5, p12, p17, p15, p4, p5, p21, not_p3}
{p18, p1, not_p5, p17, p15, p4, p5, p21, not_p3}
{p20, p1, not_p5, p17, p15, p4, p5, p21, not_p3}
{p2, not_p5, p17, p15, p4, p5, p21, not_p3}
{p25, not_p19, not_p16, p17, p15, p4, p5, not_p3}
{p25, p18, p1, not_p19, p17, p15, p4, p5, not_p3}
{p25, p20, p1, not_p19, p17, p15, p4, p5, not_p3}
{p2, p25, not_p19, p17, p15, p4, p5, not_p3}
{p25, not_p5, not_p16, p17, p15, p4, p5, not_p3}
{p25, p18, p1, not_p5, p17, p15, p4, p5, not_p3}
{p25, p20, p1, not_p5, p17, p15, p4, p5, not_p3}
{p2, p25, not_p5, p17, p15, p4, p5, not_p3}
{not_p19, not_p16, p17, p15, p4, p5, p21, not_p25}
{p18, p1, not_p19, p17, p15, p4, p5, p21, not_p25}
{p20, p1, not_p19, p17, p15, p4, p5, p21, not_p25}
{p2, not_p19, p17, p15, p4, p5, p21, not_p25}
{not_p5, not_p16, p17, p15, p4, p5, p21, not_p25}
{not_p5, p12, p17, p15, p4, p5, p21, not_p25}
{p18, p1, not_p5, p17, p15, p4, p5, p21, not_p25}
{p20, p1, not_p5, p17, p15, p4, p5, p21, not_p25}
{p2, not_p5, p17, p15, p4, p5, p21, not_p25}
{p25, not_p19, not_p16, p17, p15, p4, p5, not_p25}
{p25, p18, p1, not_p19, p17, p15, p4, p5, not_p25}
{p25, p20, p1, not_p19, p17, p15, p4, p5, not_p25}
{p2, p25, not_p19, p17, p15, p4, p5, not_p25}
{p25, not_p5, not_p16, p17, p15, p4, p5, not_p25}
{p25, p18, p1, not_p5, p17, p15, p4, p5, not_p25}
{p25, p20, p1, not_p5, p17, p15, p4, p5, not_p25}
{p2, p25, not_p5, p17, p15, p4, p5, not_p25}
{not_p19, not_p16, p17, p15, p4, p5, p21, p14}
{p18, p1, not_p19, p17, p15, p4, p5, p21, p14}
{p20, p1, not_p19, p17, p15, p4, p5, p21, p14}
{p2, not_p19, p17, p15, p4, p5, p21, p14}
{not_p5, not_p16, p17, p15, p4, p5, p21, p14}
{not_p5, p12, p17, p15, p4, p5, p21, p14}
{p18, p1, not_p5, p17, p15, p4, p5, p21, p14}
{p20, p1, not_p5, p17, p15, p4, p5, p21, p14}
{p2, not_p5, p17, p15, p4, p5, p21, p14}
{p25, not_p19, not_p16, p17, p15, p4, p5, p14}
{p25, p18, p1, not_p19, p17, p15, p4, p5, p14}
{p25, p20, p1, not_p19, p17, p15, p4, p5, p14}
{p2, p25, not_p19, p17, p15, p4, p5, p14}
{p25, not_p5, not_p16, p17, p15, p4, p5, p14}
{p25, p18, p1, not_p5, p17, p15, p4, p5, p14}
{p25, p20, p1, not_p5, p17, p15, p4, p5, p14}
{p2, p25, not_p5, p17, p15, p4, p5, p14}
{not_p19, not_p16, p17, p15, p9, p4, p5, p21}
{not_p5, not_p16, p17, p15, p9, p4, p5, p21}
{p25, not_p19, not_p16, p17, p15, p9, p4, p5}
{p25, not_p5, not_p16, p17, p15, p9, p4, p5}
{p18, p1, not_p19, p17, p15, p9, p4, p5, p21}
{p25, p18, p1, not_p19, p17, p15, p9, p4, p5}
{not_p15, p20, p1, not_p19, p17, p15, p9, p4, p5, p21}
{not_p15, p11, p20, p1, not_p19, p17, p15, p9, p4, p5}
{not_p15, p25, p20, p1, not_p19, p17, p15, p9, p4, p5}
{p2, not_p19, p17, p15, p9, p4, p5, p21}
{p2, p25, not_p19, p17, p15, p9, p4, p5}
{not_p5, p12, p17, p15, p9, p4, p5, p21}
{p18, p1, not_p5, p17, p15, p9, p4, p5, p21}
{p25, p18, p1, not_p5, p17, p15, p9, p4, p5}
{not_p15, p20, p1, not_p5, p17, p15, p9, p4, p5, p21}
{not_p15, p11, p20, p1, not_p5, p17, p15, p9, p4, p5}
{not_p15, p25, p20, p1, not_p5, p17, p15, p9, p4, p5}
{p2, not_p5, p17, p15, p9, p4, p5, p21}
{p2, p25, not_p5, p17, p15, p9, p4, p5}
{p25, not_p19, not_p16, not_p8, p15, p4, p5, p14}
{p25, not_p5, not_p16, not_p8, p15, p4, p5, p14}
{p25, p22, not_p16, not_p8, p15, p4, p5, p14}
{p25, p1, p22, not_p8, p15, p4, p5, p14}
{p25, p18, p1, not_p19, not_p8, p15, p4, p5, p14}
{p25, p20, p1, not_p19, not_p8, p15, p4, p5, p14}
{p25, p18, p1, not_p5, not_p8, p15, p4, p5, p14}
{p25, p20, p1, not_p5, not_p8, p15, p4, p5, p14}
{p2, p25, not_p19, not_p8, p15, p4, p5, p14}
{p2, p25, not_p5, not_p8, p15, p4, p5, p14}
{p2, p25, p22, not_p8, p15, p4, p5, p14}
{p8, not_p5, not_p16, not_p8, p15, not_p4, not_p3}
{p8, not_p5, p12, not_p8, p15, not_p4, not_p3}
{p8, p18, p1, not_p5, not_p8, p15, not_p4, not_p3}
{p8, p20, p1, not_p5, not_p8, p15, not_p4, not_p3}
{p8, p2, not_p5, not_p8, p15, not_p4, not_p3}
{p8, not_p5, not_p19, not_p16, p15, not_p4, not_p3}
{p8, not_p5, not_p16, p15, not_p4, p6, not_p3}
{p8, not_p20, not_p5, not_p16, p17, p15, not_p4, not_p3}
{p8, p25, not_p5, not_p16, p17, p15, not_p4, p21, not_p3}
{p8, not_p5, not_p19, p12, p15, not_p4, not_p3}
{p8, p18, p1, not_p5, not_p19, p15, not_p4, not_p3}
{p8, p20, p1, not_p5, not_p19, p15, not_p4, not_p3}
{p8, p2, not_p5, not_p19, p15, not_p4, not_p3}
{p8, not_p15, p18, p1, not_p5, p15, not_p4, p6, not_p3}
{p8, not_p15, p20, p1, not_p5, p15, not_p4, p6, not_p3}
{p8, not_p5, p12, p15, not_p4, p6, not_p3}
{p8, p2, not_p5, p15, not_p4, p6, not_p3}
{p8, not_p20, not_p5, p12, p17, p15, not_p4, not_p3}
{p8, not_p20, p18, p1, not_p5, p17, p15, not_p4, not_p3}
{p8, not_p20, p20, p1, not_p5, p17, p15, not_p4, not_p3}
{p8, p2, not_p20, not_p5, p17, p15, not_p4, not_p3}
{p8, p25, p18, p1, not_p5, p17, p15, not_p4, p21, not_p3}
{p8, p25, p20, p1, not_p5, p17, p15, not_p4, p21, not_p3}
{p8, p2, p25, not_p5, p17, p15, not_p4, p21, not_p3}
{p8, p25, not_p5, p12, p17, p15, not_p4, not_p3}
{p8, not_p5, not_p16, not_p8, p15, not_p4, not_p25}
{p8, not_p5, p12, not_p8, p15, not_p4, not_p25}
{p8, p18, p1, not_p5, not_p8, p15, not_p4, not_p25}
{p8, p20, p1, not_p5, not_p8, p15, not_p4, not_p25}
{p8, p2, not_p5, not_p8, p15, not_p4, not_p25}
{p8, not_p5, not_p19, not_p16, p15, not_p4, not_p25}
{p8, not_p5, not_p16, p15, not_p4, p6, not_p25}
{p8, not_p20, not_p5, not_p16, p17, p15, not_p4, not_p25}
{p8, p25, not_p5, not_p16, p17, p15, not_p4, p21, not_p25}
{p8, not_p5, not_p19, p12, p15, not_p4, not_p25}
{p8, p18, p1, not_p5, not_p19, p15, not_p4, not_p25}
{p8, p20, p1, not_p5, not_p19, p15, not_p4, not_p25}
{p8, p2, not_p5, not_p19, p15, not_p4, not_p25}
{p8, not_p15, p18, p1, not_p5, p15, not_p4, p6, not_p25}
{p8, not_p15, p20, p1, not_p5, p15, not_p4, p6, not_p25}
{p8, not_p5, p12, p15, not_p4, p6, not_p25}
{p8, p2, not_p5, p15, not_p4, p6, not_p25}
{p8, not_p20, not_p5, p12, p17, p15, not_p4, not_p25}
{p8, not_p20, p18, p1, not_p5, p17, p15, not_p4, not_p25}
{p8, not_p20, p20, p1, not_p5, p17, p15, not_p4, not_p25}
{p8, p2, not_p20, not_p5, p17, p15, not_p4, not_p25}
{p8, p25, p18, p1, not_p5, p17, p15, not_p4, p21, not_p25}
{p8, p25, p20, p1, not_p5, p17, p15, not_p4, p21, not_p25}
{p8, p2, p25, not_p5, p17, p15, not_p4, p21, not_p25}
{p8, p25, not_p5, p12, p17, p15, not_p4, not_p25}
{p8, not_p5, p12, not_p8, p15, not_p4, p14}
{p8, not_p5, not_p16, not_p8, p15, not_p4, p21, p14}
{p8, p18, p1, not_p5, not_p8, p15, not_p4, p21, p14}
{p8, p20, p1, not_p5, not_p8, p15, not_p4, p21, p14}
{p8, p2, not_p5, not_p8, p15, not_p4, p21, p14}
{p8, not_p20, not_p5, not_p16, not_p8, p15, not_p4, p14}
{p8, not_p20, p18, p1, not_p5, not_p8, p15, not_p4, p14}
{p8, not_p20, p20, p1, not_p5, not_p8, p15, not_p4, p14}
{p8, p2, not_p20, not_p5, not_p8, p15, not_p4, p14}
{p8, not_p5, not_p16, not_p8, p15, p9, not_p4}
{p8, not_p5, p12, not_p8, p15, p9, not_p4}
{p8, p18, p1, not_p5, not_p8, p15, p9, not_p4}
{p8, not_p15, p7, p20, p1, not_p5, not_p8, p15, p9, not_p4}
{p8, p2, not_p5, not_p8, p15, p9, not_p4}
{p8, p25, not_p5, not_p16, p17, p15, not_p4, p21, p14}
{p8, not_p5, not_p19, not_p16, p15, not_p4, p21, p14}
{p8, not_p5, not_p16, p15, not_p4, p21, p14, p6}
{p8, not_p20, not_p5, not_p19, not_p16, p15, not_p4, p14}
{p8, not_p20, not_p5, not_p16, p17, p15, not_p4, p14}
{p8, not_p20, not_p5, not_p16, p15, not_p4, p14, p6}
{p8, not_p5, not_p19, not_p16, p15, p9, not_p4}
{p8, not_p5, not_p16, p15, p9, not_p4, p6}
{p8, not_p20, not_p5, not_p16, p17, p15, p9, not_p4}
{p8, p25, not_p5, not_p16, p17, p15, p9, not_p4, p21}
{p8, not_p5, not_p19, p12, p15, not_p4, p14}
{p8, p18, p1, not_p5, not_p19, p15, not_p4, p21, p14}
{p8, p20, p1, not_p5, not_p19, p15, not_p4, p21, p14}
{p8, p2, not_p5, not_p19, p15, not_p4, p21, p14}
{p8, not_p20, p18, p1, not_p5, not_p19, p15, not_p4, p14}
{p8, not_p20, p20, p1, not_p5, not_p19, p15, not_p4, p14}
{p8, p2, not_p20, not_p5, not_p19, p15, not_p4, p14}
{p8, not_p5, not_p19, p12, p15, p9, not_p4}
{p8, p18, p1, not_p5, not_p19, p15, p9, not_p4}
{p8, not_p15, p7, p20, p1, not_p5, not_p19, p15, p9, not_p4}
{p8, p2, not_p5, not_p19, p15, p9, not_p4}
{p8, not_p5, p12, p15, not_p4, p14, p6}
{p8, not_p15, p18, p1, not_p5, p15, not_p4, p21, p14, p6}
{p8, not_p15, p20, p1, not_p5, p15, not_p4, p21, p14, p6}
{p8, p2, not_p5, p15, not_p4, p21, p14, p6}
{p8, not_p15, not_p20, p18, p1, not_p5, p15, not_p4, p14, p6}
{p8, not_p15, not_p20, p20, p1, not_p5, p15, not_p4, p14, p6}
{p8, p2, not_p20, not_p5, p15, not_p4, p14, p6}
{p8, not_p15, p18, p1, not_p5, p15, p9, not_p4, p6}
{p8, not_p15, p20, p1, not_p5, p15, p9, not_p4, p6}
{p8, not_p5, p12, p15, p9, not_p4, p6}
{p8, p2, not_p5, p15, p9, not_p4, p6}
{p8, not_p20, not_p5, p12, p17, p15, not_p4, p14}
{p8, not_p20, p18, p1, not_p5, p17, p15, not_p4, p14}
{p8, not_p20, p20, p1, not_p5, p17, p15, not_p4, p14}
{p8, p2, not_p20, not_p5, p17, p15, not_p4, p14}
{p8, p25, p18, p1, not_p5, p17, p15, not_p4, p21, p14}
{p8, p25, p20, p1, not_p5, p17, p15, not_p4, p21, p14}
{p8, p2, p25, not_p5, p17, p15, not_p4, p21, p14}
{p8, p25, not_p5, p12, p17, p15, not_p4, p14}
{p8, not_p20, not_p5, p12, p17, p15, p9, not_p4}
{p8, p25, not_p5, p12, p17, p15, p9, not_p4}
{p8, not_p15, p7, p20, p1, not_p5, p17, p15, p9, not_p4, p21}
{p8, p25, p18, p1, not_p5, p17, p15, p9, not_p4, p21}
{p8, p2, p25, not_p5, p17, p15, p9, not_p4, p21}
{p8, not_p20, p18, p1, not_p5, p17, p15, p9, not_p4}
{p8, not_p15, p7, not_p20, p20, p1, not_p5, p17, p15, p9, not_p4}
{p8, p2, not_p20, not_p5, p17, p15, p9, not_p4}
{p11, not_p5, p12, p17, p15, not_p4, p14}
{p11, not_p5, p12, p17, p15, not_p4, not_p3}
{p11, not_p5, p12, p17, p15, not_p4, not_p25}
{p11, not_p5, p12, p17, p15, p9, not_p4}
{p11, not_p5, not_p16, p17, p15, not_p4, p21, not_p3}
{p11, p18, p1, not_p5, p17, p15, not_p4, p21, not_p3}
{p11, p20, p1, not_p5, p17, p15, not_p4, p21, not_p3}
{p2, p11, not_p5, p17, p15, not_p4, p21, not_p3}
{p11, not_p20, not_p5, not_p16, p17, p15, not_p4, not_p3}
{p11, not_p20, p18, p1, not_p5, p17, p15, not_p4, not_p3}
{p11, not_p20, p20, p1, not_p5, p17, p15, not_p4, not_p3}
{p2, p11, not_p20, not_p5, p17, p15, not_p4, not_p3}
{p11, not_p5, not_p16, p17, p15, not_p4, p21, not_p25}
{p11, p18, p1, not_p5, p17, p15, not_p4, p21, not_p25}
{p11, p20, p1, not_p5, p17, p15, not_p4, p21, not_p25}
{p2, p11, not_p5, p17, p15, not_p4, p21, not_p25}
{p11, not_p20, not_p5, not_p16, p17, p15, not_p4, not_p25}
{p11, not_p20, p18, p1, not_p5, p17, p15, not_p4, not_p25}
{p11, not_p20, p20, p1, not_p5, p17, p15, not_p4, not_p25}
{p2, p11, not_p20, not_p5, p17, p15, not_p4, not_p25}
{p11, not_p5, not_p16, p17, p15, not_p4, p21, p14}
{p11, not_p5, not_p16, p17, p15, p9, not_p4, p21}
{p11, not_p20, not_p5, not_p16, p17, p15, not_p4, p14}
{p11, not_p20, not_p5, not_p16, p17, p15, p9, not_p4}
{p2, p11, not_p5, p17, p15, p9, not_p4, p21}
{p11, p18, p1, not_p5, p17, p15, p9, not_p4, p21}
{not_p15, p11, p20, p1, not_p5, p17, p15, p9, not_p4, p21, p6}
{not_p15, p7, p11, p20, p1, not_p5, p17, p15, p9, not_p4, p21}
{p2, p11, not_p20, not_p5, p17, p15, p9, not_p4}
{p11, not_p20, p18, p1, not_p5, p17, p15, p9, not_p4}
{not_p15, p11, not_p20, p20, p1, not_p5, p17, p15, p9, not_p4, p6}
{not_p15, p7, p11, not_p20, p20, p1, not_p5, p17, p15, p9, not_p4}
{p11, p18, p1, not_p5, p17, p15, not_p4, p21, p14}
{p11, p20, p1, not_p5, p17, p15, not_p4, p21, p14}
{p2, p11, not_p5, p17, p15, not_p4, p21, p14}
{p11, not_p20, p18, p1, not_p5, p17, p15, not_p4, p14}
{p11, not_p20, p20, p1, not_p5, p17, p15, not_p4, p14}
{p2, p11, not_p20, not_p5, p17, p15, not_p4, p14}
{p11, not_p5, p12, not_p8, p15, not_p4, p14}
{p11, not_p5, p12, not_p8, p15, not_p4, not_p3}
{p11, not_p5, p12, not_p8, p15, not_p4, not_p25}
{p11, not_p5, p12, not_p8, p15, p9, not_p4}
{p25, p11, not_p5, not_p16, not_p8, p15, not_p4, not_p3}
{p25, p11, not_p5, not_p16, not_p8, p15, not_p4, not_p25}
{p25, p11, not_p5, not_p16, not_p8, p15, not_p4, p21, p14}
{p25, p11, not_p20, not_p5, not_p16, not_p8, p15, not_p4, p14}
{p25, p11, not_p5, not_p16, not_p8, p15, p9, not_p4}
{p25, p11, p18, p1, not_p5, not_p8, p15, p9, not_p4}
{p25, p11, p18, p1, not_p5, not_p8, p15, not_p4, not_p3}
{p25, p11, p18, p1, not_p5, not_p8, p15, not_p4, not_p25}
{p25, p11, p18, p1, not_p5, not_p8, p15, not_p4, p21, p14}
{p25, p11, not_p20, p18, p1, not_p5, not_p8, p15, not_p4, p14}
{p25, p11, p20, p1, not_p5, not_p8, p15, not_p4, not_p3}
{p25, p11, p20, p1, not_p5, not_p8, p15, not_p4, not_p25}
{p25, p11, p20, p1, not_p5, not_p8, p15, not_p4, p21, p14}
{p25, p11, not_p20, p20, p1, not_p5, not_p8, p15, not_p4, p14}
{not_p15, p25, p11, p20, p1, not_p5, not_p8, p15, p9, not_p4, p6}
{not_p15, p7, p25, p11, p20, p1, not_p5, not_p8, p15, p9, not_p4}
{p2, p25, p11, not_p5, not_p8, p15, not_p4, not_p3}
{p2, p25, p11, not_p5, not_p8, p15, not_p4, not_p25}
{p2, p25, p11, not_p5, not_p8, p15, not_p4, p21, p14}
{p2, p25, p11, not_p20, not_p5, not_p8, p15, not_p4, p14}
{p2, p25, p11, not_p5, not_p8, p15, p9, not_p4}
{p11, not_p5, not_p19, p12, p15, not_p4, p14}
{p11, not_p5, not_p19, p12, p15, not_p4, not_p3}
{p11, not_p5, not_p19, p12, p15, not_p4, not_p25}
{p11, not_p5, not_p19, p12, p15, p9, not_p4}
{p11, not_p5, p12, p15, not_p4, p14, p6}
{p11, not_p5, p12, p15, not_p4, p6, not_p3}
{p11, not_p5, p12, p15, not_p4, p6, not_p25}
{p11, not_p5, p12, p15, p9, not_p4, p6}
{p11, not_p19, not_p16, p17, p15, not_p4, p5, not_p3}
{p11, not_p5, not_p16, p17, p15, not_p4, p5, not_p3}
{p11, p22, not_p16, p17, p15, not_p4, p5, not_p3}
{p11, p1, p22, p17, p15, not_p4, p5, not_p3}
{p11, p18, p1, not_p19, p17, p15, not_p4, p5, not_p3}
{p11, p20, p1, not_p19, p17, p15, not_p4, p5, not_p3}
{p11, p18, p1, not_p5, p17, p15, not_p4, p5, not_p3}
{p11, p20, p1, not_p5, p17, p15, not_p4, p5, not_p3}
{p2, p11, not_p19, p17, p15, not_p4, p5, not_p3}
{p2, p11, not_p5, p17, p15, not_p4, p5, not_p3}
{p2, p11, p22, p17, p15, not_p4, p5, not_p3}
{p11, not_p19, not_p16, p17, p15, not_p4, p5, not_p25}
{p11, not_p5, not_p16, p17, p15, not_p4, p5, not_p25}
{p11, p22, not_p16, p17, p15, not_p4, p5, not_p25}
{p11, p1, p22, p17, p15, not_p4, p5, not_p25}
{p11, p18, p1, not_p19, p17, p15, not_p4, p5, not_p25}
{p11, p20, p1, not_p19, p17, p15, not_p4, p5, not_p25}
{p11, p18, p1, not_p5, p17, p15, not_p4, p5, not_p25}
{p11, p20, p1, not_p5, p17, p15, not_p4, p5, not_p25}
{p2, p11, not_p19, p17, p15, not_p4, p5, not_p25}
{p2, p11, not_p5, p17, p15, not_p4, p5, not_p25}
{p2, p11, p22, p17, p15, not_p4, p5, not_p25}
{p11, p22, not_p16, p17, p15, not_p4, p5, p14}
{p25, p11, not_p19, not_p16, p17, p15, not_p4, p5, p14}
{p25, p11, not_p5, not_p16, p17, p15, not_p4, p5, p14}
{p11, not_p19, not_p16, p17, p15, p9, not_p4, p5}
{p11, not_p5, not_p16, p17, p15, p9, not_p4, p5}
{p11, p22, not_p16, p17, p15, p9, not_p4, p5}
{p11, p1, p22, p17, p15, not_p4, p5, p14}
{p2, p11, p22, p17, p15, not_p4, p5, p14}
{p25, p11, p18, p1, not_p19, p17, p15, not_p4, p5, p14}
{p25, p11, p18, p1, not_p5, p17, p15, not_p4, p5, p14}
{p25, p11, p20, p1, not_p19, p17, p15, not_p4, p5, p14}
{p25, p11, p20, p1, not_p5, p17, p15, not_p4, p5, p14}
{p2, p25, p11, not_p19, p17, p15, not_p4, p5, p14}
{p2, p25, p11, not_p5, p17, p15, not_p4, p5, p14}
{p2, p11, not_p19, p17, p15, p9, not_p4, p5}
{p2, p11, not_p5, p17, p15, p9, not_p4, p5}
{p2, p11, p22, p17, p15, p9, not_p4, p5}
{p11, p18, p1, not_p19, p17, p15, p9, not_p4, p5}
{not_p15, p11, p20, p1, not_p19, p17, p15, p9, not_p4, p5, p6}
{not_p15, p7, p11, p20, p1, not_p19, p17, p15, p9, not_p4, p5}
{p11, p18, p1, not_p5, p17, p15, p9, not_p4, p5}
{not_p15, p11, p20, p1, not_p5, p17, p15, p9, not_p4, p5, p6}
{not_p15, p7, p11, p20, p1, not_p5, p17, p15, p9, not_p4, p5}
{p11, p18, p1, p22, p17, p15, p9, not_p4, p5}
{not_p15, p11, p1, p22, p17, p15, p9, not_p4, p5, p6}
{not_p15, p7, p11, p1, p22, p17, p15, p9, not_p4, p5}
{p8, p22, not_p16, p17, p15, not_p4, p5, not_p3}
{p8, p22, p12, p17, p15, not_p4, p5, not_p3}
{p8, p1, p22, p17, p15, not_p4, p5, not_p3}
{p8, p2, p22, p17, p15, not_p4, p5, not_p3}
{p8, p22, not_p16, p17, p15, not_p4, p5, not_p25}
{p8, p22, p12, p17, p15, not_p4, p5, not_p25}
{p8, p1, p22, p17, p15, not_p4, p5, not_p25}
{p8, p2, p22, p17, p15, not_p4, p5, not_p25}
{p8, p22, not_p16, p17, p15, not_p4, p5, p14}
{p8, p22, p12, p17, p15, not_p4, p5, p14}
{p8, p1, p22, p17, p15, not_p4, p5, p14}
{p8, p2, p22, p17, p15, not_p4, p5, p14}
{p8, p22, not_p16, p17, p15, p9, not_p4, p5}
{p8, p22, p12, p17, p15, p9, not_p4, p5}
{p8, p18, p1, p22, p17, p15, p9, not_p4, p5}
{p8, not_p15, p7, p1, p22, p17, p15, p9, not_p4, p5}
{p8, p2, p22, p17, p15, p9, not_p4, p5}
{p8, not_p5, not_p16, p17, p15, not_p4, p5, p21, not_p3}
{p8, not_p5, p12, p17, p15, not_p4, p5, p21, not_p3}
{p8, p18, p1, not_p5, p17, p15, not_p4, p5, p21, not_p3}
{p8, p20, p1, not_p5, p17, p15, not_p4, p5, p21, not_p3}
{p8, p2, not_p5, p17, p15, not_p4, p5, p21, not_p3}
{p8, p25, not_p5, not_p16, p17, p15, not_p4, p5, not_p3}
{p8, p25, p18, p1, not_p5, p17, p15, not_p4, p5, not_p3}
{p8, p25, p20, p1, not_p5, p17, p15, not_p4, p5, not_p3}
{p8, p2, p25, not_p5, p17, p15, not_p4, p5, not_p3}
{p8, not_p5, not_p16, p17, p15, not_p4, p5, p21, not_p25}
{p8, not_p5, p12, p17, p15, not_p4, p5, p21, not_p25}
{p8, p18, p1, not_p5, p17, p15, not_p4, p5, p21, not_p25}
{p8, p20, p1, not_p5, p17, p15, not_p4, p5, p21, not_p25}
{p8, p2, not_p5, p17, p15, not_p4, p5, p21, not_p25}
{p8, p25, not_p5, not_p16, p17, p15, not_p4, p5, not_p25}
{p8, p25, p18, p1, not_p5, p17, p15, not_p4, p5, not_p25}
{p8, p25, p20, p1, not_p5, p17, p15, not_p4, p5, not_p25}
{p8, p2, p25, not_p5, p17, p15, not_p4, p5, not_p25}
{p8, not_p5, not_p16, p17, p15, not_p4, p5, p21, p14}
{p8, not_p5, p12, p17, p15, not_p4, p5, p21, p14}
{p8, p18, p1, not_p5, p17, p15, not_p4, p5, p21, p14}
{p8, p20, p1, not_p5, p17, p15, not_p4, p5, p21, p14}
{p8, p2, not_p5, p17, p15, not_p4, p5, p21, p14}
{p8, p25, not_p5, not_p16, p17, p15, not_p4, p5, p14}
{p8, p25, p18, p1, not_p5, p17, p15, not_p4, p5, p14}
{p8, p25, p20, p1, not_p5, p17, p15, not_p4, p5, p14}
{p8, p2, p25, not_p5, p17, p15, not_p4, p5, p14}
{p8, not_p5, not_p16, p17, p15, p9, not_p4, p5, p21}
{p8, not_p5, p12, p17, p15, p9, not_p4, p5, p21}
{p8, p18, p1, not_p5, p17, p15, p9, not_p4, p5, p21}
{p8, p2, not_p5, p17, p15, p9, not_p4, p5, p21}
{p8, p25, not_p5, not_p16, p17, p15, p9, not_p4, p5}
{p8, p25, p18, p1, not_p5, p17, p15, p9, not_p4, p5}
{p8, not_p15, p7, p25, p20, p1, not_p5, p17, p15, p9, not_p4, p5}
{p8, p2, p25, not_p5, p17, p15, p9, not_p4, p5}
{p8, p22, not_p16, not_p8, p15, not_p4, p5, p14}
{p8, p1, p22, not_p8, p15, not_p4, p5, p14}
{p8, p2, p22, not_p8, p15, not_p4, p5, p14}
{p25, p11, not_p19, not_p16, not_p8, p15, not_p4, p5, p14}
{p25, p11, not_p5, not_p16, not_p8, p15, not_p4, p5, p14}
{p25, p11, p22, not_p16, not_p8, p15, not_p4, p5, p14}
{p25, p11, p1, p22, not_p8, p15, not_p4, p5, p14}
{p25, p11, p18, p1, not_p19, not_p8, p15, not_p4, p5, p14}
{p25, p11, p20, p1, not_p19, not_p8, p15, not_p4, p5, p14}
{p25, p11, p18, p1, not_p5, not_p8, p15, not_p4, p5, p14}
{p25, p11, p20, p1, not_p5, not_p8, p15, not_p4, p5, p14}
{p2, p25, p11, not_p19, not_p8, p15, not_p4, p5, p14}
{p2, p25, p11, not_p5, not_p8, p15, not_p4, p5, p14}
{p2, p25, p11, p22, not_p8, p15, not_p4, p5, p14}
{p8, p25, not_p5, not_p16, not_p8, p15, not_p4, p5, p14}
{p8, p25, p18, p1, not_p5, not_p8, p15, not_p4, p5, p14}
{p8, p25, p20, p1, not_p5, not_p8, p15, not_p4, p5, p14}
{p8, p2, p25, not_p5, not_p8, p15, not_p4, p5, p14}
{p8, p22, not_p19, not_p16, p15, not_p4, p5, p14}
{p8, p1, p22, not_p19, p15, not_p4, p5, p14}
{p8, p2, p22, not_p19, p15, not_p4, p5, p14}
{p8, p25, not_p19, not_p16, p15, not_p4, p5, p14}
{p8, p25, p18, p1, not_p19, p15, not_p4, p5, p14}
{p8, p25, p20, p1, not_p19, p15, not_p4, p5, p14}
{p8, p2, p25, not_p19, p15, not_p4, p5, p14}
{p8, not_p15, p1, p22, p15, not_p4, p5, p14, p6}
{p8, not_p15, p25, p18, p1, not_p5, p15, not_p4, p5, p14, p6}
{p8, not_p15, p25, p20, p1, not_p5, p15, not_p4, p5, p14, p6}
{p8, p25, not_p5, not_p16, p15, not_p4, p5, p14, p6}
{p8, p2, p25, not_p5, p15, not_p4, p5, p14, p6}
{p8, p22, not_p16, p15, not_p4, p5, p14, p6}
{p8, p2, p22, p15, not_p4, p5, p14, p6}
{p24, p8, p20, p1, not_p5, not_p19, not_p12, p9, p4, p16}
{p24, not_p20, p20, p1, not_p5, p17, not_p12, p9, p4, p16}
{p24, p11, p20, p1, not_p5, p17, not_p12, p9, p4, p21, p16}
{p24, p25, p20, p1, not_p5, p17, not_p12, p9, p4, p21, p16}
{p24, p8, p20, p1, not_p5, not_p8, not_p12, p9, p4, p16}
{p24, p25, p20, p1, not_p5, not_p8, not_p12, p9, p4, p16}
{p24, not_p20, p1, p22, not_p5, p17, p9, p4, p16}
{p24, p11, p1, p22, not_p5, p17, p9, p4, p21, p16}
{p24, p25, p1, p22, not_p5, p17, p9, p4, p21, p16}
{p24, not_p20, p20, p1, not_p5, p17, not_p21, p9, p4, p16}
{p24, not_p20, p3, p1, not_p5, p17, not_p21, p9, p4, p16}
{p24, p11, p20, p1, not_p5, p17, not_p21, p9, p4, p21, p16}
{p24, p25, p20, p1, not_p5, p17, not_p21, p9, p4, p21, p16}
{p24, p11, p3, p1, not_p5, p17, not_p21, p9, p4, p21, p16}
{p24, p25, p3, p1, not_p5, p17, not_p21, p9, p4, p21, p16}
{p24, p25, p20, p1, not_p5, not_p8, not_p21, p9, p4, p16}
{p24, p8, p20, p1, not_p5, not_p8, not_p21, p9, p4, p16}
{p24, p25, p3, p1, not_p5, not_p8, not_p21, p9, p4, p16}
{p24, p8, p3, p1, not_p5, not_p8, not_p21, p9, p4, p16}
{p24, p25, p1, p22, not_p5, not_p8, p9, p4, p16}
{p24, p8, p1, p22, not_p5, not_p8, p9, p4, p16}
{p24, p8, p1, p22, not_p5, not_p19, p9, p4, p16}
{p24, p8, p20, p1, not_p5, not_p19, not_p21, p9, p4, p16}
{p24, p8, p3, p1, not_p5, not_p19, not_p21, p9, p4, p16}
{p24, p7, p20, p1, not_p5, p17, not_p12, p9, not_p4, p21, p16}
{p24, p7, p25, p20, p1, not_p5, p12, p17, not_p12, p9, not_p4, p16}
{p24, p7, not_p20, p20, p1, not_p5, p17, not_p12, p9, not_p4, p16}
{p24, p7, p1, p22, not_p5, p17, p9, not_p4, p21, p16}
{p24, p7, p1, p22, not_p5, p12, p17, p9, not_p4, p16}
{p24, p7, not_p20, p1, p22, not_p5, p17, p9, not_p4, p16}
{p24, p7, p20, p1, not_p5, p17, not_p21, p9, not_p4, p21, p16}
{p24, p7, p3, p1, not_p5, p17, not_p21, p9, not_p4, p21, p16}
{p24, p7, not_p20, p20, p1, not_p5, p17, not_p21, p9, not_p4, p16}
{p24, p7, not_p20, p3, p1, not_p5, p17, not_p21, p9, not_p4, p16}
{p24, p7, p25, p20, p1, not_p5, p12, p17, not_p21, p9, not_p4, p16}
{p24, p7, p25, p3, p1, not_p5, p12, p17, not_p21, p9, not_p4, p16}
{p24, not_p20, p1, p22, not_p5, p17, not_p4, p14, p16}
{p24, not_p20, p1, p22, not_p5, p17, not_p4, p16, not_p3}
{p24, not_p20, p1, p22, not_p5, p17, not_p4, p16, not_p25}
{p24, not_p20, p1, p22, not_p5, p17, p9, not_p4, p16, p6}
{p24, not_p20, p18, p1, p22, not_p5, p17, p9, not_p4, p16}
{p24, p25, p1, p22, not_p5, p17, not_p4, p21, p16, not_p3}
{p24, p25, p1, p22, not_p5, p12, p17, not_p24, not_p4, p16, not_p3}
{p24, p25, p1, p22, not_p5, p12, p17, not_p2, not_p4, p16, not_p3}
{p24, p25, p1, p22, not_p5, p17, not_p4, p21, p16, not_p25}
{p24, p25, p1, p22, not_p5, p12, p17, not_p24, not_p4, p16, not_p25}
{p24, p25, p1, p22, not_p5, p12, p17, not_p2, not_p4, p16, not_p25}
{p24, p25, p1, p22, not_p5, p17, not_p4, p21, p14, p16}
{p24, p25, p1, p22, not_p5, p12, p17, not_p24, not_p4, p14, p16}
{p24, p25, p1, p22, not_p5, p12, p17, not_p2, not_p4, p14, p16}
{p24, p11, p1, p22, not_p5, p17, p9, not_p4, p21, p16, p6}
{p24, p25, p1, p22, not_p5, p17, p9, not_p4, p21, p16, p6}
{p24, p25, p18, p1, p22, not_p5, p17, p9, not_p4, p21, p16}
{p24, p25, p18, p1, p22, not_p5, p12, p17, p9, not_p4, p16}
{p24, p25, p18, p1, not_p5, p17, not_p21, not_p4, p21, p14, p16}
{p24, p25, p18, p1, not_p5, p17, not_p21, p9, not_p4, p21, p16}
{p24, p25, p18, p1, not_p5, p17, not_p21, not_p4, p21, p16, not_p3}
{p24, p25, p18, p1, not_p5, p17, not_p21, not_p4, p21, p16, not_p25}
{p24, p25, p18, p1, not_p5, p17, not_p12, not_p4, p21, p14, p16}
{p24, p25, p18, p1, not_p5, p17, not_p12, p9, not_p4, p21, p16}
{p24, p25, p18, p1, not_p5, p17, not_p12, not_p4, p21, p16, not_p3}
{p24, p25, p18, p1, not_p5, p17, not_p12, not_p4, p21, p16, not_p25}
{p24, p11, p20, p1, not_p5, p17, not_p21, p9, not_p4, p21, p16, p6}
{p24, p11, p20, p1, not_p5, p17, not_p12, p9, not_p4, p21, p16, p6}
{p24, p25, p20, p1, not_p5, p17, not_p21, p9, not_p4, p21, p16, p6}
{p24, p25, p20, p1, not_p5, p17, not_p12, p9, not_p4, p21, p16, p6}
{p24, p25, p20, p1, not_p5, p17, not_p21, not_p4, p21, p14, p16}
{p24, p25, p20, p1, not_p5, p17, not_p21, not_p4, p21, p16, not_p3}
{p24, p25, p20, p1, not_p5, p17, not_p21, not_p4, p21, p16, not_p25}
{p24, p25, p20, p1, not_p5, p17, not_p12, not_p4, p21, p14, p16}
{p24, p25, p20, p1, not_p5, p17, not_p12, not_p4, p21, p16, not_p3}
{p24, p25, p20, p1, not_p5, p17, not_p12, not_p4, p21, p16, not_p25}
{p24, p11, p3, p1, not_p5, p17, not_p21, p9, not_p4, p21, p16, p6}
{p24, p25, p3, p1, not_p5, p17, not_p21, p9, not_p4, p21, p16, p6}
{p24, p25, p3, p1, not_p5, p17, not_p21, not_p4, p21, p14, p16}
{p24, p25, p3, p1, not_p5, p17, not_p21, not_p4, p21, p16, not_p3}
{p24, p25, p3, p1, not_p5, p17, not_p21, not_p4, p21, p16, not_p25}
{p24, not_p20, p18, p1, not_p5, p17, not_p21, not_p4, p14, p16}
{p24, not_p20, p18, p1, not_p5, p17, not_p21, p9, not_p4, p16}
{p24, not_p20, p18, p1, not_p5, p17, not_p21, not_p4, p16, not_p3}
{p24, not_p20, p18, p1, not_p5, p17, not_p21, not_p4, p16, not_p25}
{p24, not_p20, p18, p1, not_p5, p17, not_p12, not_p4, p14, p16}
{p24, not_p20, p18, p1, not_p5, p17, not_p12, p9, not_p4, p16}
{p24, not_p20, p18, p1, not_p5, p17, not_p12, not_p4, p16, not_p3}
{p24, not_p20, p18, p1, not_p5, p17, not_p12, not_p4, p16, not_p25}
{p24, not_p20, p20, p1, not_p5, p17, not_p21, p9, not_p4, p16, p6}
{p24, not_p20, p20, p1, not_p5, p17, not_p12, p9, not_p4, p16, p6}
{p24, not_p20, p20, p1, not_p5, p17, not_p21, not_p4, p14, p16}
{p24, not_p20, p20, p1, not_p5, p17, not_p21, not_p4, p16, not_p3}
{p24, not_p20, p20, p1, not_p5, p17, not_p21, not_p4, p16, not_p25}
{p24, not_p20, p20, p1, not_p5, p17, not_p12, not_p4, p14, p16}
{p24, not_p20, p20, p1, not_p5, p17, not_p12, not_p4, p16, not_p3}
{p24, not_p20, p20, p1, not_p5, p17, not_p12, not_p4, p16, not_p25}
{p24, not_p20, p3, p1, not_p5, p17, not_p21, p9, not_p4, p16, p6}
{p24, not_p20, p3, p1, not_p5, p17, not_p21, not_p4, p14, p16}
{p24, not_p20, p3, p1, not_p5, p17, not_p21, not_p4, p16, not_p3}
{p24, not_p20, p3, p1, not_p5, p17, not_p21, not_p4, p16, not_p25}
{p24, p25, p18, p1, not_p5, p12, p17, not_p21, p9, not_p4, p16}
{p24, p25, p18, p1, not_p5, p12, p17, not_p12, p9, not_p4, p16}
{p24, p25, p18, p1, not_p5, p12, p17, not_p21, not_p24, not_p4, p14, p16}
{p24, p25, p18, p1, not_p5, p12, p17, not_p21, not_p24, not_p4, p16, not_p3}
{p24, p25, p18, p1, not_p5, p12, p17, not_p21, not_p24, not_p4, p16, not_p25}
{p24, p25, p18, p1, not_p5, p12, p17, not_p21, not_p2, not_p4, p14, p16}
{p24, p25, p18, p1, not_p5, p12, p17, not_p21, not_p2, not_p4, p16, not_p3}
{p24, p25, p18, p1, not_p5, p12, p17, not_p21, not_p2, not_p4, p16, not_p25}
{p24, p25, p18, p1, not_p5, p12, p17, not_p12, not_p24, not_p4, p14, p16}
{p24, p25, p18, p1, not_p5, p12, p17, not_p12, not_p2, not_p4, p14, p16}
{p24, p25, p18, p1, not_p5, p12, p17, not_p12, not_p24, not_p4, p16, not_p3}
{p24, p25, p18, p1, not_p5, p12, p17, not_p12, not_p2, not_p4, p16, not_p3}
{p24, p25, p18, p1, not_p5, p12, p17, not_p12, not_p24, not_p4, p16, not_p25}
{p24, p25, p18, p1, not_p5, p12, p17, not_p12, not_p2, not_p4, p16, not_p25}
{p24, p25, p20, p1, not_p5, p12, p17, not_p21, not_p24, not_p4, p14, p16}
{p24, p25, p20, p1, not_p5, p12, p17, not_p21, not_p24, not_p4, p16, not_p3}
{p24, p25, p20, p1, not_p5, p12, p17, not_p21, not_p24, not_p4, p16, not_p25}
{p24, p25, p20, p1, not_p5, p12, p17, not_p12, not_p24, not_p4, p14, p16}
{p24, p25, p20, p1, not_p5, p12, p17, not_p12, not_p24, not_p4, p16, not_p3}
{p24, p25, p20, p1, not_p5, p12, p17, not_p12, not_p24, not_p4, p16, not_p25}
{p24, p25, p20, p1, not_p5, p12, p17, not_p21, not_p2, not_p4, p14, p16}
{p24, p25, p20, p1, not_p5, p12, p17, not_p21, not_p2, not_p4, p16, not_p3}
{p24, p25, p20, p1, not_p5, p12, p17, not_p21, not_p2, not_p4, p16, not_p25}
{p24, p25, p20, p1, not_p5, p12, p17, not_p12, not_p2, not_p4, p14, p16}
{p24, p25, p20, p1, not_p5, p12, p17, not_p12, not_p2, not_p4, p16, not_p3}
{p24, p25, p20, p1, not_p5, p12, p17, not_p12, not_p2, not_p4, p16, not_p25}
{p24, p25, p3, p1, not_p5, p12, p17, not_p21, not_p24, not_p4, p14, p16}
{p24, p25, p3, p1, not_p5, p12, p17, not_p21, not_p24, not_p4, p16, not_p3}
{p24, p25, p3, p1, not_p5, p12, p17, not_p21, not_p24, not_p4, p16, not_p25}
{p24, p25, p3, p1, not_p5, p12, p17, not_p21, not_p2, not_p4, p14, p16}
{p24, p25, p3, p1, not_p5, p12, p17, not_p21, not_p2, not_p4, p16, not_p3}
{p24, p25, p3, p1, not_p5, p12, p17, not_p21, not_p2, not_p4, p16, not_p25}
{p24, p25, p1, p22, not_p5, not_p8, not_p4, p16, not_p3}
{p24, p1, p22, not_p5, p12, not_p8, not_p24, not_p4, p16, not_p3}
{p24, p1, p22, not_p5, p12, not_p8, not_p2, not_p4, p16, not_p3}
{p24, p25, p1, p22, not_p5, not_p8, not_p4, p16, not_p25}
{p24, p1, p22, not_p5, p12, not_p8, not_p24, not_p4, p16, not_p25}
{p24, p1, p22, not_p5, p12, not_p8, not_p2, not_p4, p16, not_p25}
{p24, p1, p22, not_p5, p12, not_p8, not_p24, not_p4, p14, p16}
{p24, p1, p22, not_p5, p12, not_p8, not_p2, not_p4, p14, p16}
{p24, p25, p1, p22, not_p5, not_p8, not_p4, p21, p14, p16}
{p24, p25, not_p20, p1, p22, not_p5, not_p8, not_p4, p14, p16}
{p24, p25, p1, p22, not_p5, not_p8, p9, not_p4, p16, p6}
{p24, p25, p18, p1, p22, not_p5, not_p8, p9, not_p4, p16}
{p24, p7, p25, p1, p22, not_p5, not_p8, p9, not_p4, p16}
{p24, p18, p1, p22, not_p5, p12, not_p8, p9, not_p4, p16}
{p24, p7, p1, p22, not_p5, p12, not_p8, p9, not_p4, p16}
{p24, p8, p7, p1, p22, not_p5, not_p8, p9, not_p4, p16}
{p24, p25, p18, p1, not_p5, not_p8, not_p21, not_p4, p16, not_p3}
{p24, p18, p1, not_p5, p12, not_p8, not_p21, not_p24, not_p4, p16, not_p3}
{p24, p18, p1, not_p5, p12, not_p8, not_p21, not_p2, not_p4, p16, not_p3}
{p24, p25, p18, p1, not_p5, not_p8, not_p12, not_p4, p16, not_p3}
{p24, p18, p1, not_p5, p12, not_p8, not_p12, not_p24, not_p4, p16, not_p3}
{p24, p18, p1, not_p5, p12, not_p8, not_p12, not_p2, not_p4, p16, not_p3}
{p24, p25, p20, p1, not_p5, not_p8, not_p21, not_p4, p16, not_p3}
{p24, p20, p1, not_p5, p12, not_p8, not_p21, not_p24, not_p4, p16, not_p3}
{p24, p20, p1, not_p5, p12, not_p8, not_p21, not_p2, not_p4, p16, not_p3}
{p24, p25, p20, p1, not_p5, not_p8, not_p12, not_p4, p16, not_p3}
{p24, p20, p1, not_p5, p12, not_p8, not_p12, not_p24, not_p4, p16, not_p3}
{p24, p20, p1, not_p5, p12, not_p8, not_p12, not_p2, not_p4, p16, not_p3}
{p24, p25, p3, p1, not_p5, not_p8, not_p21, not_p4, p16, not_p3}
{p24, p3, p1, not_p5, p12, not_p8, not_p21, not_p24, not_p4, p16, not_p3}
{p24, p3, p1, not_p5, p12, not_p8, not_p21, not_p2, not_p4, p16, not_p3}
{p24, p25, p18, p1, not_p5, not_p8, not_p21, not_p4, p16, not_p25}
{p24, p18, p1, not_p5, p12, not_p8, not_p21, not_p24, not_p4, p16, not_p25}
{p24, p18, p1, not_p5, p12, not_p8, not_p21, not_p2, not_p4, p16, not_p25}
{p24, p25, p18, p1, not_p5, not_p8, not_p12, not_p4, p16, not_p25}
{p24, p18, p1, not_p5, p12, not_p8, not_p12, not_p24, not_p4, p16, not_p25}
{p24, p18, p1, not_p5, p12, not_p8, not_p12, not_p2, not_p4, p16, not_p25}
{p24, p25, p20, p1, not_p5, not_p8, not_p21, not_p4, p16, not_p25}
{p24, p20, p1, not_p5, p12, not_p8, not_p21, not_p24, not_p4, p16, not_p25}
{p24, p20, p1, not_p5, p12, not_p8, not_p21, not_p2, not_p4, p16, not_p25}
{p24, p25, p20, p1, not_p5, not_p8, not_p12, not_p4, p16, not_p25}
{p24, p20, p1, not_p5, p12, not_p8, not_p12, not_p24, not_p4, p16, not_p25}
{p24, p20, p1, not_p5, p12, not_p8, not_p12, not_p2, not_p4, p16, not_p25}
{p24, p25, p3, p1, not_p5, not_p8, not_p21, not_p4, p16, not_p25}
{p24, p3, p1, not_p5, p12, not_p8, not_p21, not_p24, not_p4, p16, not_p25}
{p24, p3, p1, not_p5, p12, not_p8, not_p21, not_p2, not_p4, p16, not_p25}
{p24, p18, p1, not_p5, p12, not_p8, not_p21, p9, not_p4, p16}
{p24, p25, p18, p1, not_p5, not_p8, not_p21, p9, not_p4, p16}
{p24, p18, p1, not_p5, p12, not_p8, not_p12, p9, not_p4, p16}
{p24, p25, p18, p1, not_p5, not_p8, not_p12, p9, not_p4, p16}
{p24, p25, p20, p1, not_p5, not_p8, not_p21, p9, not_p4, p16, p6}
{p24, p25, p20, p1, not_p5, not_p8, not_p12, p9, not_p4, p16, p6}
{p24, p7, p20, p1, not_p5, p12, not_p8, not_p21, p9, not_p4, p16}
{p24, p7, p25, p20, p1, not_p5, not_p8, not_p21, p9, not_p4, p16}
{p24, p8, p7, p20, p1, not_p5, not_p8, not_p21, p9, not_p4, p16}
{p24, p7, p20, p1, not_p5, p12, not_p8, not_p12, p9, not_p4, p16}
{p24, p7, p25, p20, p1, not_p5, not_p8, not_p12, p9, not_p4, p16}
{p24, p8, p7, p20, p1, not_p5, not_p8, not_p12, p9, not_p4, p16}
{p24, p25, p3, p1, not_p5, not_p8, not_p21, p9, not_p4, p16, p6}
{p24, p7, p3, p1, not_p5, p12, not_p8, not_p21, p9, not_p4, p16}
{p24, p7, p25, p3, p1, not_p5, not_p8, not_p21, p9, not_p4, p16}
{p24, p8, p7, p3, p1, not_p5, not_p8, not_p21, p9, not_p4, p16}
{p24, p18, p1, not_p5, p12, not_p8, not_p21, not_p24, not_p4, p14, p16}
{p24, p18, p1, not_p5, p12, not_p8, not_p12, not_p24, not_p4, p14, p16}
{p24, p18, p1, not_p5, p12, not_p8, not_p21, not_p2, not_p4, p14, p16}
{p24, p18, p1, not_p5, p12, not_p8, not_p12, not_p2, not_p4, p14, p16}
{p24, p20, p1, not_p5, p12, not_p8, not_p21, not_p24, not_p4, p14, p16}
{p24, p20, p1, not_p5, p12, not_p8, not_p12, not_p24, not_p4, p14, p16}
{p24, p20, p1, not_p5, p12, not_p8, not_p21, not_p2, not_p4, p14, p16}
{p24, p20, p1, not_p5, p12, not_p8, not_p12, not_p2, not_p4, p14, p16}
{p24, p3, p1, not_p5, p12, not_p8, not_p21, not_p24, not_p4, p14, p16}
{p24, p3, p1, not_p5, p12, not_p8, not_p21, not_p2, not_p4, p14, p16}
{p24, p25, p18, p1, not_p5, not_p8, not_p21, not_p4, p21, p14, p16}
{p24, p25, p18, p1, not_p5, not_p8, not_p12, not_p4, p21, p14, p16}
{p24, p25, not_p20, p18, p1, not_p5, not_p8, not_p21, not_p4, p14, p16}
{p24, p25, not_p20, p18, p1, not_p5, not_p8, not_p12, not_p4, p14, p16}
{p24, p25, p20, p1, not_p5, not_p8, not_p21, not_p4, p21, p14, p16}
{p24, p25, p20, p1, not_p5, not_p8, not_p12, not_p4, p21, p14, p16}
{p24, p25, not_p20, p20, p1, not_p5, not_p8, not_p21, not_p4, p14, p16}
{p24, p25, not_p20, p20, p1, not_p5, not_p8, not_p12, not_p4, p14, p16}
{p24, p25, p3, p1, not_p5, not_p8, not_p21, not_p4, p21, p14, p16}
{p24, p25, not_p20, p3, p1, not_p5, not_p8, not_p21, not_p4, p14, p16}
{p24, p8, p1, p22, not_p5, p9, not_p4, p16, p6}
{p24, p8, p18, p1, not_p5, not_p21, p9, not_p4, p16, p6}
{p24, p8, p20, p1, not_p5, not_p21, p9, not_p4, p16, p6}
{p24, p8, p3, p1, not_p5, not_p21, p9, not_p4, p16, p6}
{p24, p8, p18, p1, not_p5, not_p12, p9, not_p4, p16, p6}
{p24, p8, p20, p1, not_p5, not_p12, p9, not_p4, p16, p6}
{p24, p8, p1, p22, not_p5, not_p4, p16, p6, not_p3}
{p24, p8, p1, p22, not_p5, not_p4, p16, p6, not_p25}
{p24, p8, p1, p22, not_p5, not_p4, p21, p14, p16, p6}
{p24, p8, not_p20, p1, p22, not_p5, not_p4, p14, p16, p6}
{p24, p8, p18, p1, not_p5, not_p21, not_p4, p16, p6, not_p3}
{p24, p8, p18, p1, not_p5, not_p12, not_p4, p16, p6, not_p3}
{p24, p8, p20, p1, not_p5, not_p21, not_p4, p16, p6, not_p3}
{p24, p8, p20, p1, not_p5, not_p12, not_p4, p16, p6, not_p3}
{p24, p8, p3, p1, not_p5, not_p21, not_p4, p16, p6, not_p3}
{p24, p8, p18, p1, not_p5, not_p21, not_p4, p16, p6, not_p25}
{p24, p8, p18, p1, not_p5, not_p12, not_p4, p16, p6, not_p25}
{p24, p8, p20, p1, not_p5, not_p21, not_p4, p16, p6, not_p25}
{p24, p8, p20, p1, not_p5, not_p12, not_p4, p16, p6, not_p25}
{p24, p8, p3, p1, not_p5, not_p21, not_p4, p16, p6, not_p25}
{p24, p8, p18, p1, not_p5, not_p21, not_p4, p21, p14, p16, p6}
{p24, p8, p18, p1, not_p5, not_p12, not_p4, p21, p14, p16, p6}
{p24, p8, p20, p1, not_p5, not_p21, not_p4, p21, p14, p16, p6}
{p24, p8, p20, p1, not_p5, not_p12, not_p4, p21, p14, p16, p6}
{p24, p8, p3, p1, not_p5, not_p21, not_p4, p21, p14, p16, p6}
{p24, p8, not_p20, p18, p1, not_p5, not_p21, not_p4, p14, p16, p6}
{p24, p8, not_p20, p18, p1, not_p5, not_p12, not_p4, p14, p16, p6}
{p24, p8, not_p20, p20, p1, not_p5, not_p21, not_p4, p14, p16, p6}
{p24, p8, not_p20, p20, p1, not_p5, not_p12, not_p4, p14, p16, p6}
{p24, p8, not_p20, p3, p1, not_p5, not_p21, not_p4, p14, p16, p6}
{p24, p8, p7, p1, p22, not_p5, not_p19, p9, not_p4, p16}
{p24, p8, p7, p20, p1, not_p5, not_p19, not_p12, p9, not_p4, p16}
{p24, p8, p7, p20, p1, not_p5, not_p19, not_p21, p9, not_p4, p16}
{p24, p8, p7, p3, p1, not_p5, not_p19, not_p21, p9, not_p4, p16}
{p24, p1, p22, not_p5, p12, p9, not_p4, p16, p6}
{p24, p18, p1, not_p5, p12, not_p21, p9, not_p4, p16, p6}
{p24, p20, p1, not_p5, p12, not_p21, p9, not_p4, p16, p6}
{p24, p3, p1, not_p5, p12, not_p21, p9, not_p4, p16, p6}
{p24, p18, p1, not_p5, p12, not_p12, p9, not_p4, p16, p6}
{p24, p20, p1, not_p5, p12, not_p12, p9, not_p4, p16, p6}
{p24, p1, p22, not_p5, p12, not_p24, not_p4, p14, p16, p6}
{p24, p18, p1, not_p5, p12, not_p21, not_p24, not_p4, p14, p16, p6}
{p24, p20, p1, not_p5, p12, not_p21, not_p24, not_p4, p14, p16, p6}
{p24, p3, p1, not_p5, p12, not_p21, not_p24, not_p4, p14, p16, p6}
{p24, p18, p1, not_p5, p12, not_p12, not_p24, not_p4, p14, p16, p6}
{p24, p20, p1, not_p5, p12, not_p12, not_p24, not_p4, p14, p16, p6}
{p24, p1, p22, not_p5, p12, not_p2, not_p4, p14, p16, p6}
{p24, p18, p1, not_p5, p12, not_p21, not_p2, not_p4, p14, p16, p6}
{p24, p20, p1, not_p5, p12, not_p21, not_p2, not_p4, p14, p16, p6}
{p24, p3, p1, not_p5, p12, not_p21, not_p2, not_p4, p14, p16, p6}
{p24, p18, p1, not_p5, p12, not_p12, not_p2, not_p4, p14, p16, p6}
{p24, p20, p1, not_p5, p12, not_p12, not_p2, not_p4, p14, p16, p6}
{p24, p1, p22, not_p5, p12, not_p24, not_p4, p16, p6, not_p3}
{p24, p1, p22, not_p5, p12, not_p2, not_p4, p16, p6, not_p3}
{p24, p1, p22, not_p5, p12, not_p24, not_p4, p16, p6, not_p25}
{p24, p1, p22, not_p5, p12, not_p2, not_p4, p16, p6, not_p25}
{p24, p18, p1, not_p5, p12, not_p21, not_p24, not_p4, p16, p6, not_p3}
{p24, p18, p1, not_p5, p12, not_p12, not_p24, not_p4, p16, p6, not_p3}
{p24, p18, p1, not_p5, p12, not_p21, not_p2, not_p4, p16, p6, not_p3}
{p24, p18, p1, not_p5, p12, not_p12, not_p2, not_p4, p16, p6, not_p3}
{p24, p20, p1, not_p5, p12, not_p21, not_p24, not_p4, p16, p6, not_p3}
{p24, p20, p1, not_p5, p12, not_p12, not_p24, not_p4, p16, p6, not_p3}
{p24, p20, p1, not_p5, p12, not_p21, not_p2, not_p4, p16, p6, not_p3}
{p24, p20, p1, not_p5, p12, not_p12, not_p2, not_p4, p16, p6, not_p3}
{p24, p3, p1, not_p5, p12, not_p21, not_p24, not_p4, p16, p6, not_p3}
{p24, p3, p1, not_p5, p12, not_p21, not_p2, not_p4, p16, p6, not_p3}
{p24, p18, p1, not_p5, p12, not_p21, not_p24, not_p4, p16, p6, not_p25}
{p24, p18, p1, not_p5, p12, not_p12, not_p24, not_p4, p16, p6, not_p25}
{p24, p18, p1, not_p5, p12, not_p21, not_p2, not_p4, p16, p6, not_p25}
{p24, p18, p1, not_p5, p12, not_p12, not_p2, not_p4, p16, p6, not_p25}
{p24, p20, p1, not_p5, p12, not_p21, not_p24, not_p4, p16, p6, not_p25}
{p24, p20, p1, not_p5, p12, not_p12, not_p24, not_p4, p16, p6, not_p25}
{p24, p20, p1, not_p5, p12, not_p21, not_p2, not_p4, p16, p6, not_p25}
{p24, p20, p1, not_p5, p12, not_p12, not_p2, not_p4, p16, p6, not_p25}
{p24, p3, p1, not_p5, p12, not_p21, not_p24, not_p4, p16, p6, not_p25}
{p24, p3, p1, not_p5, p12, not_p21, not_p2, not_p4, p16, p6, not_p25}
{p24, p18, p1, not_p5, not_p19, p12, not_p21, p9, not_p4, p16}
{p24, p18, p1, not_p5, not_p19, p12, not_p12, p9, not_p4, p16}
{p24, p18, p1, p22, not_p5, not_p19, p12, p9, not_p4, p16}
{p24, p7, p1, p22, not_p5, not_p19, p12, p9, not_p4, p16}
{p24, p7, p20, p1, not_p5, not_p19, p12, not_p21, p9, not_p4, p16}
{p24, p7, p3, p1, not_p5, not_p19, p12, not_p21, p9, not_p4, p16}
{p24, p7, p20, p1, not_p5, not_p19, p12, not_p12, p9, not_p4, p16}
{p24, p1, p22, not_p5, not_p19, p12, not_p24, not_p4, p14, p16}
{p24, p18, p1, not_p5, not_p19, p12, not_p21, not_p24, not_p4, p14, p16}
{p24, p20, p1, not_p5, not_p19, p12, not_p21, not_p24, not_p4, p14, p16}
{p24, p3, p1, not_p5, not_p19, p12, not_p21, not_p24, not_p4, p14, p16}
{p24, p18, p1, not_p5, not_p19, p12, not_p12, not_p24, not_p4, p14, p16}
{p24, p20, p1, not_p5, not_p19, p12, not_p12, not_p24, not_p4, p14, p16}
{p24, p1, p22, not_p5, not_p19, p12, not_p2, not_p4, p14, p16}
{p24, p18, p1, not_p5, not_p19, p12, not_p21, not_p2, not_p4, p14, p16}
{p24, p20, p1, not_p5, not_p19, p12, not_p21, not_p2, not_p4, p14, p16}
{p24, p3, p1, not_p5, not_p19, p12, not_p21, not_p2, not_p4, p14, p16}
{p24, p18, p1, not_p5, not_p19, p12, not_p12, not_p2, not_p4, p14, p16}
{p24, p20, p1, not_p5, not_p19, p12, not_p12, not_p2, not_p4, p14, p16}
{p24, p1, p22, not_p5, not_p19, p12, not_p24, not_p4, p16, not_p3}
{p24, p1, p22, not_p5, not_p19, p12, not_p2, not_p4, p16, not_p3}
{p24, p1, p22, not_p5, not_p19, p12, not_p24, not_p4, p16, not_p25}
{p24, p1, p22, not_p5, not_p19, p12, not_p2, not_p4, p16, not_p25}
{p24, p18, p1, not_p5, not_p19, p12, not_p21, not_p24, not_p4, p16, not_p3}
{p24, p18, p1, not_p5, not_p19, p12, not_p12, not_p24, not_p4, p16, not_p3}
{p24, p18, p1, not_p5, not_p19, p12, not_p21, not_p2, not_p4, p16, not_p3}
{p24, p18, p1, not_p5, not_p19, p12, not_p12, not_p2, not_p4, p16, not_p3}
{p24, p18, p1, not_p5, not_p19, p12, not_p21, not_p24, not_p4, p16, not_p25}
{p24, p18, p1, not_p5, not_p19, p12, not_p12, not_p24, not_p4, p16, not_p25}
{p24, p18, p1, not_p5, not_p19, p12, not_p21, not_p2, not_p4, p16, not_p25}
{p24, p18, p1, not_p5, not_p19, p12, not_p12, not_p2, not_p4, p16, not_p25}
{p24, p20, p1, not_p5, not_p19, p12, not_p21, not_p24, not_p4, p16, not_p3}
{p24, p20, p1, not_p5, not_p19, p12, not_p12, not_p24, not_p4, p16, not_p3}
{p24, p20, p1, not_p5, not_p19, p12, not_p21, not_p2, not_p4, p16, not_p3}
{p24, p20, p1, not_p5, not_p19, p12, not_p12, not_p2, not_p4, p16, not_p3}
{p24, p20, p1, not_p5, not_p19, p12, not_p21, not_p24, not_p4, p16, not_p25}
{p24, p20, p1, not_p5, not_p19, p12, not_p12, not_p24, not_p4, p16, not_p25}
{p24, p20, p1, not_p5, not_p19, p12, not_p21, not_p2, not_p4, p16, not_p25}
{p24, p20, p1, not_p5, not_p19, p12, not_p12, not_p2, not_p4, p16, not_p25}
{p24, p3, p1, not_p5, not_p19, p12, not_p21, not_p24, not_p4, p16, not_p3}
{p24, p3, p1, not_p5, not_p19, p12, not_p21, not_p2, not_p4, p16, not_p3}
{p24, p3, p1, not_p5, not_p19, p12, not_p21, not_p24, not_p4, p16, not_p25}
{p24, p3, p1, not_p5, not_p19, p12, not_p21, not_p2, not_p4, p16, not_p25}
{p24, not_p15, p2, p22, p17, not_p4, p5, p14, p16}
{p24, not_p15, p2, p22, p17, not_p4, p5, p16, not_p3}
{p24, not_p15, p2, p22, p17, not_p4, p5, p16, not_p25}
{p24, not_p15, p2, p22, p17, p9, not_p4, p5, p16}
{p24, p1, p22, p17, not_p4, p5, p16, not_p3}
{p24, p1, p22, p17, not_p4, p5, p16, not_p25}
{p24, p1, p22, p17, not_p4, p5, p14, p16}
{p24, p1, p22, p17, p9, not_p4, p5, p16, p6}
{p24, p18, p1, p22, p17, p9, not_p4, p5, p16}
{p24, p7, p1, p22, p17, p9, not_p4, p5, p16}
{p24, not_p15, p2, not_p19, p17, not_p21, not_p4, p5, p21, p16, not_p3}
{p24, not_p15, p2, p25, not_p19, p17, not_p21, not_p4, p5, p16, not_p3}
{p24, not_p15, p2, not_p5, p17, not_p21, not_p4, p5, p21, p16, not_p3}
{p24, not_p15, p2, p25, not_p5, p17, not_p21, not_p4, p5, p16, not_p3}
{p24, not_p15, p2, not_p19, p17, not_p12, not_p4, p5, p21, p16, not_p3}
{p24, not_p15, p2, p25, not_p19, p17, not_p12, not_p4, p5, p16, not_p3}
{p24, not_p15, p2, not_p5, p17, not_p12, not_p4, p5, p21, p16, not_p3}
{p24, not_p15, p2, p25, not_p5, p17, not_p12, not_p4, p5, p16, not_p3}
{p24, p18, p1, not_p19, p17, not_p21, not_p4, p5, p21, p16, not_p3}
{p24, p18, p1, not_p5, p17, not_p21, not_p4, p5, p21, p16, not_p3}
{p24, p18, p1, not_p19, p17, not_p12, not_p4, p5, p21, p16, not_p3}
{p24, p18, p1, not_p5, p17, not_p12, not_p4, p5, p21, p16, not_p3}
{p24, p20, p1, not_p19, p17, not_p21, not_p4, p5, p21, p16, not_p3}
{p24, p20, p1, not_p5, p17, not_p21, not_p4, p5, p21, p16, not_p3}
{p24, p20, p1, not_p19, p17, not_p12, not_p4, p5, p21, p16, not_p3}
{p24, p20, p1, not_p5, p17, not_p12, not_p4, p5, p21, p16, not_p3}
{p24, p25, p18, p1, not_p19, p17, not_p21, not_p4, p5, p16, not_p3}
{p24, p25, p18, p1, not_p5, p17, not_p21, not_p4, p5, p16, not_p3}
{p24, p25, p20, p1, not_p19, p17, not_p21, not_p4, p5, p16, not_p3}
{p24, p25, p20, p1, not_p5, p17, not_p21, not_p4, p5, p16, not_p3}
{p24, p25, p18, p1, not_p19, p17, not_p12, not_p4, p5, p16, not_p3}
{p24, p25, p20, p1, not_p19, p17, not_p12, not_p4, p5, p16, not_p3}
{p24, p25, p18, p1, not_p5, p17, not_p12, not_p4, p5, p16, not_p3}
{p24, p25, p20, p1, not_p5, p17, not_p12, not_p4, p5, p16, not_p3}
{p24, not_p15, p2, not_p19, p17, not_p21, not_p4, p5, p21, p16, not_p25}
{p24, not_p15, p2, p25, not_p19, p17, not_p21, not_p4, p5, p16, not_p25}
{p24, not_p15, p2, not_p5, p17, not_p21, not_p4, p5, p21, p16, not_p25}
{p24, not_p15, p2, p25, not_p5, p17, not_p21, not_p4, p5, p16, not_p25}
{p24, not_p15, p2, not_p19, p17, not_p12, not_p4, p5, p21, p16, not_p25}
{p24, not_p15, p2, p25, not_p19, p17, not_p12, not_p4, p5, p16, not_p25}
{p24, not_p15, p2, not_p5, p17, not_p12, not_p4, p5, p21, p16, not_p25}
{p24, not_p15, p2, p25, not_p5, p17, not_p12, not_p4, p5, p16, not_p25}
{p24, p18, p1, not_p19, p17, not_p21, not_p4, p5, p21, p16, not_p25}
{p24, p18, p1, not_p5, p17, not_p21, not_p4, p5, p21, p16, not_p25}
{p24, p18, p1, not_p19, p17, not_p12, not_p4, p5, p21, p16, not_p25}
{p24, p18, p1, not_p5, p17, not_p12, not_p4, p5, p21, p16, not_p25}
{p24, p20, p1, not_p19, p17, not_p21, not_p4, p5, p21, p16, not_p25}
{p24, p20, p1, not_p5, p17, not_p21, not_p4, p5, p21, p16, not_p25}
{p24, p20, p1, not_p19, p17, not_p12, not_p4, p5, p21, p16, not_p25}
{p24, p20, p1, not_p5, p17, not_p12, not_p4, p5, p21, p16, not_p25}
{p24, p25, p18, p1, not_p19, p17, not_p21, not_p4, p5, p16, not_p25}
{p24, p25, p18, p1, not_p5, p17, not_p21, not_p4, p5, p16, not_p25}
{p24, p25, p20, p1, not_p19, p17, not_p21, not_p4, p5, p16, not_p25}
{p24, p25, p20, p1, not_p5, p17, not_p21, not_p4, p5, p16, not_p25}
{p24, p25, p18, p1, not_p19, p17, not_p12, not_p4, p5, p16, not_p25}
{p24, p25, p20, p1, not_p19, p17, not_p12, not_p4, p5, p16, not_p25}
{p24, p25, p18, p1, not_p5, p17, not_p12, not_p4, p5, p16, not_p25}
{p24, p25, p20, p1, not_p5, p17, not_p12, not_p4, p5, p16, not_p25}
{p24, not_p15, p2, not_p19, p17, not_p21, not_p4, p5, p21, p14, p16}
{p24, not_p15, p2, not_p5, p17, not_p21, not_p4, p5, p21, p14, p16}
{p24, not_p15, p2, not_p19, p17, not_p12, not_p4, p5, p21, p14, p16}
{p24, not_p15, p2, not_p5, p17, not_p12, not_p4, p5, p21, p14, p16}
{p24, p18, p1, not_p19, p17, not_p21, not_p4, p5, p21, p14, p16}
{p24, p18, p1, not_p5, p17, not_p21, not_p4, p5, p21, p14, p16}
{p24, p20, p1, not_p19, p17, not_p21, not_p4, p5, p21, p14, p16}
{p24, p20, p1, not_p5, p17, not_p21, not_p4, p5, p21, p14, p16}
{p24, p18, p1, not_p19, p17, not_p12, not_p4, p5, p21, p14, p16}
{p24, p20, p1, not_p19, p17, not_p12, not_p4, p5, p21, p14, p16}
{p24, p18, p1, not_p5, p17, not_p12, not_p4, p5, p21, p14, p16}
{p24, p20, p1, not_p5, p17, not_p12, not_p4, p5, p21, p14, p16}
{p24, not_p15, p2, p25, not_p19, p17, not_p21, not_p4, p5, p14, p16}
{p24, not_p15, p2, p25, not_p5, p17, not_p21, not_p4, p5, p14, p16}
{p24, p25, p18, p1, not_p19, p17, not_p21, not_p4, p5, p14, p16}
{p24, p25, p20, p1, not_p19, p17, not_p21, not_p4, p5, p14, p16}
{p24, p25, p18, p1, not_p5, p17, not_p21, not_p4, p5, p14, p16}
{p24, p25, p20, p1, not_p5, p17, not_p21, not_p4, p5, p14, p16}
{p24, not_p15, p2, p25, not_p19, p17, not_p12, not_p4, p5, p14, p16}
{p24, p25, p18, p1, not_p19, p17, not_p12, not_p4, p5, p14, p16}
{p24, p25, p20, p1, not_p19, p17, not_p12, not_p4, p5, p14, p16}
{p24, not_p15, p2, p25, not_p5, p17, not_p12, not_p4, p5, p14, p16}
{p24, p25, p18, p1, not_p5, p17, not_p12, not_p4, p5, p14, p16}
{p24, p25, p20, p1, not_p5, p17, not_p12, not_p4, p5, p14, p16}
{p24, not_p15, p2, not_p19, p17, not_p21, p9, not_p4, p5, p21, p16}
{p24, not_p15, p2, p25, not_p19, p17, not_p21, p9, not_p4, p5, p16}
{p24, not_p15, p2, not_p5, p17, not_p21, p9, not_p4, p5, p21, p16}
{p24, not_p15, p2, p25, not_p5, p17, not_p21, p9, not_p4, p5, p16}
{p24, not_p15, p2, not_p19, p17, not_p12, p9, not_p4, p5, p21, p16}
{p24, not_p15, p2, p25, not_p19, p17, not_p12, p9, not_p4, p5, p16}
{p24, not_p15, p2, not_p5, p17, not_p12, p9, not_p4, p5, p21, p16}
{p24, not_p15, p2, p25, not_p5, p17, not_p12, p9, not_p4, p5, p16}
{p24, p18, p1, not_p19, p17, not_p21, p9, not_p4, p5, p21, p16}
{p24, p18, p1, not_p5, p17, not_p21, p9, not_p4, p5, p21, p16}
{p24, p25, p18, p1, not_p19, p17, not_p21, p9, not_p4, p5, p16}
{p24, p25, p18, p1, not_p5, p17, not_p21, p9, not_p4, p5, p16}
{p24, p20, p1, not_p19, p17, not_p21, p9, not_p4, p5, p21, p16, p6}
{p24, p11, p20, p1, not_p19, p17, not_p21, p9, not_p4, p5, p16, p6}
{p24, p25, p20, p1, not_p19, p17, not_p21, p9, not_p4, p5, p16, p6}
{p24, p20, p1, not_p5, p17, not_p21, p9, not_p4, p5, p21, p16, p6}
{p24, p11, p20, p1, not_p5, p17, not_p21, p9, not_p4, p5, p16, p6}
{p24, p25, p20, p1, not_p5, p17, not_p21, p9, not_p4, p5, p16, p6}
{p24, p7, p11, p20, p1, not_p19, p17, not_p21, p9, not_p4, p5, p16}
{p24, p7, p25, p20, p1, not_p19, p17, not_p21, p9, not_p4, p5, p16}
{p24, p7, p11, p20, p1, not_p5, p17, not_p21, p9, not_p4, p5, p16}
{p24, p7, p25, p20, p1, not_p5, p17, not_p21, p9, not_p4, p5, p16}
{p24, p18, p1, not_p19, p17, not_p12, p9, not_p4, p5, p21, p16}
{p24, p25, p18, p1, not_p19, p17, not_p12, p9, not_p4, p5, p16}
{p24, p20, p1, not_p19, p17, not_p12, p9, not_p4, p5, p21, p16, p6}
{p24, p11, p20, p1, not_p19, p17, not_p12, p9, not_p4, p5, p16, p6}
{p24, p25, p20, p1, not_p19, p17, not_p12, p9, not_p4, p5, p16, p6}
{p24, p7, p11, p20, p1, not_p19, p17, not_p12, p9, not_p4, p5, p16}
{p24, p7, p25, p20, p1, not_p19, p17, not_p12, p9, not_p4, p5, p16}
{p24, p18, p1, not_p5, p17, not_p12, p9, not_p4, p5, p21, p16}
{p24, p25, p18, p1, not_p5, p17, not_p12, p9, not_p4, p5, p16}
{p24, p20, p1, not_p5, p17, not_p12, p9, not_p4, p5, p21, p16, p6}
{p24, p11, p20, p1, not_p5, p17, not_p12, p9, not_p4, p5, p16, p6}
{p24, p25, p20, p1, not_p5, p17, not_p12, p9, not_p4, p5, p16, p6}
{p24, p7, p11, p20, p1, not_p5, p17, not_p12, p9, not_p4, p5, p16}
{p24, p7, p25, p20, p1, not_p5, p17, not_p12, p9, not_p4, p5, p16}
{p24, p25, p1, p22, not_p8, not_p4, p5, p14, p16}
{p24, not_p15, p2, p25, p22, not_p8, not_p4, p5, p14, p16}
{p24, p25, p18, p1, not_p19, not_p8, not_p12, not_p4, p5, p14, p16}
{p24, p25, p18, p1, not_p5, not_p8, not_p12, not_p4, p5, p14, p16}
{p24, p25, p20, p1, not_p19, not_p8, not_p12, not_p4, p5, p14, p16}
{p24, p25, p20, p1, not_p5, not_p8, not_p12, not_p4, p5, p14, p16}
{p24, not_p15, p2, p25, not_p19, not_p8, not_p12, not_p4, p5, p14, p16}
{p24, not_p15, p2, p25, not_p5, not_p8, not_p12, not_p4, p5, p14, p16}
{p24, p25, p18, p1, not_p19, not_p8, not_p21, not_p4, p5, p14, p16}
{p24, p25, p20, p1, not_p19, not_p8, not_p21, not_p4, p5, p14, p16}
{p24, not_p15, p2, p25, not_p19, not_p8, not_p21, not_p4, p5, p14, p16}
{p24, p25, p18, p1, not_p5, not_p8, not_p21, not_p4, p5, p14, p16}
{p24, p25, p20, p1, not_p5, not_p8, not_p21, not_p4, p5, p14, p16}
{p24, not_p15, p2, p25, not_p5, not_p8, not_p21, not_p4, p5, p14, p16}
{p24, p8, p1, p22, not_p4, p5, p14, p16, p6}
{p24, p8, p25, p18, p1, not_p5, not_p21, not_p4, p5, p14, p16, p6}
{p24, p8, p25, p18, p1, not_p5, not_p12, not_p4, p5, p14, p16, p6}
{p24, p8, p25, p20, p1, not_p5, not_p21, not_p4, p5, p14, p16, p6}
{p24, p8, p25, p20, p1, not_p5, not_p12, not_p4, p5, p14, p16, p6}
{p24, p20, p1, not_p19, p17, not_p21, p9, p4, p5, p21, p16}
{p24, p20, p1, not_p5, p17, not_p21, p9, p4, p5, p21, p16}
{p24, p20, p1, not_p19, p17, not_p12, p9, p4, p5, p21, p16}
{p24, p20, p1, not_p5, p17, not_p12, p9, p4, p5, p21, p16}
{p24, p11, p20, p1, not_p19, p17, not_p12, p9, p4, p5, p16}
{p24, p25, p20, p1, not_p19, p17, not_p12, p9, p4, p5, p16}
{p24, p11, p20, p1, not_p5, p17, not_p12, p9, p4, p5, p16}
{p24, p25, p20, p1, not_p5, p17, not_p12, p9, p4, p5, p16}
{p24, p11, p20, p1, not_p19, p17, not_p21, p9, p4, p5, p16}
{p24, p25, p20, p1, not_p19, p17, not_p21, p9, p4, p5, p16}
{p24, p1, p22, p17, p9, p4, p5, p16}
{p24, p11, p20, p1, not_p5, p17, not_p21, p9, p4, p5, p16}
{p24, p25, p20, p1, not_p5, p17, not_p21, p9, p4, p5, p16}
{p24, not_p20, p1, p22, not_p5, p17, p4, p14}
{p24, not_p20, p18, p1, not_p5, p17, not_p21, p4, p14}
{p24, not_p20, p20, p1, not_p5, p17, not_p21, p4, p14}
{p24, not_p20, p3, p1, not_p5, p17, not_p21, p4, p14}
{p24, not_p20, p18, p1, not_p5, p17, not_p12, p4, p14}
{p24, not_p20, p20, p1, not_p5, p17, not_p12, p4, p14}
{p24, not_p20, p1, p22, not_p5, p17, p4, not_p3}
{p24, not_p20, p1, p22, not_p5, p17, p4, not_p25}
{p24, not_p20, p1, p22, not_p5, p12, p17, p9, p4}
{p24, not_p20, p18, p1, p22, not_p5, p17, p9, p4}
{p24, not_p15, not_p20, p1, p22, not_p5, p17, p9, p4}
{p24, not_p20, p18, p1, not_p5, p17, not_p21, p4, not_p3}
{p24, not_p20, p18, p1, not_p5, p17, not_p12, p4, not_p3}
{p24, not_p20, p20, p1, not_p5, p17, not_p21, p4, not_p3}
{p24, not_p20, p20, p1, not_p5, p17, not_p12, p4, not_p3}
{p24, not_p20, p3, p1, not_p5, p17, not_p21, p4, not_p3}
{p24, not_p20, p18, p1, not_p5, p17, not_p21, p4, not_p25}
{p24, not_p20, p18, p1, not_p5, p17, not_p12, p4, not_p25}
{p24, not_p20, p20, p1, not_p5, p17, not_p21, p4, not_p25}
{p24, not_p20, p20, p1, not_p5, p17, not_p12, p4, not_p25}
{p24, not_p20, p3, p1, not_p5, p17, not_p21, p4, not_p25}
{p24, not_p20, p18, p1, not_p5, p17, not_p21, p9, p4}
{p24, not_p20, p18, p1, not_p5, p17, not_p12, p9, p4}
{p24, not_p20, p20, p1, not_p5, p12, p17, not_p21, p9, p4}
{p24, not_p20, p20, p1, not_p5, p12, p17, not_p12, p9, p4}
{p24, not_p15, not_p20, p20, p1, not_p5, p17, not_p21, p9, p4}
{p24, not_p15, not_p20, p20, p1, not_p5, p17, not_p12, p9, p4}
{p24, not_p20, p3, p1, not_p5, p12, p17, not_p21, p9, p4}
{p24, not_p15, not_p20, p3, p1, not_p5, p17, not_p21, p9, p4}
{p24, p11, p1, p22, not_p5, p12, p17, p9, p4}
{p24, not_p15, p11, p1, p22, not_p5, p17, p9, p4, p21}
{p24, p25, p1, p22, not_p5, p17, p4, p21, p14}
{p24, p25, p1, p22, not_p5, p17, p4, p21, not_p3}
{p24, p25, p1, p22, not_p5, p17, p4, p21, not_p25}
{p24, p25, p18, p1, p22, not_p5, p17, p9, p4, p21}
{p24, not_p15, p25, p1, p22, not_p5, p17, p9, p4, p21}
{p24, p25, p1, p22, not_p5, p12, p17, not_p24, p4, not_p3}
{p24, p25, p1, p22, not_p5, p12, p17, not_p2, p4, not_p3}
{p24, p25, p1, p22, not_p5, p12, p17, p9, p4}
{p24, p25, p1, p22, not_p5, p12, p17, not_p24, p4, p14}
{p24, p25, p1, p22, not_p5, p12, p17, not_p2, p4, p14}
{p24, p25, p1, p22, not_p5, p12, p17, not_p24, p4, not_p25}
{p24, p25, p1, p22, not_p5, p12, p17, not_p2, p4, not_p25}
{p24, p25, p18, p1, not_p5, p17, not_p21, p4, p21, p14}
{p24, p25, p18, p1, not_p5, p17, not_p21, p9, p4, p21}
{p24, p25, p18, p1, not_p5, p17, not_p21, p4, p21, not_p3}
{p24, p25, p18, p1, not_p5, p17, not_p21, p4, p21, not_p25}
{p24, p25, p18, p1, not_p5, p17, not_p12, p4, p21, p14}
{p24, p25, p18, p1, not_p5, p17, not_p12, p9, p4, p21}
{p24, p25, p18, p1, not_p5, p17, not_p12, p4, p21, not_p3}
{p24, p25, p18, p1, not_p5, p17, not_p12, p4, p21, not_p25}
{p24, not_p15, p11, p20, p1, not_p5, p17, not_p21, p9, p4, p21}
{p24, not_p15, p11, p20, p1, not_p5, p17, not_p12, p9, p4, p21}
{p24, p25, p20, p1, not_p5, p17, not_p21, p4, p21, p14}
{p24, p25, p20, p1, not_p5, p17, not_p21, p4, p21, not_p3}
{p24, p25, p20, p1, not_p5, p17, not_p21, p4, p21, not_p25}
{p24, not_p15, p25, p20, p1, not_p5, p17, not_p21, p9, p4, p21}
{p24, p25, p20, p1, not_p5, p17, not_p12, p4, p21, p14}
{p24, p25, p20, p1, not_p5, p17, not_p12, p4, p21, not_p3}
{p24, p25, p20, p1, not_p5, p17, not_p12, p4, p21, not_p25}
{p24, not_p15, p25, p20, p1, not_p5, p17, not_p12, p9, p4, p21}
{p24, not_p15, p11, p3, p1, not_p5, p17, not_p21, p9, p4, p21}
{p24, p25, p3, p1, not_p5, p17, not_p21, p4, p21, p14}
{p24, p25, p3, p1, not_p5, p17, not_p21, p4, p21, not_p3}
{p24, p25, p3, p1, not_p5, p17, not_p21, p4, p21, not_p25}
{p24, not_p15, p25, p3, p1, not_p5, p17, not_p21, p9, p4, p21}
{p24, p11, p20, p1, not_p5, p12, p17, not_p12, p9, p4}
{p24, p11, p20, p1, not_p5, p12, p17, not_p21, p9, p4}
{p24, p11, p3, p1, not_p5, p12, p17, not_p21, p9, p4}
{p24, p25, p18, p1, not_p5, p12, p17, not_p21, p9, p4}
{p24, p25, p18, p1, not_p5, p12, p17, not_p12, p9, p4}
{p24, p25, p18, p1, not_p5, p12, p17, not_p21, not_p24, p4, p14}
{p24, p25, p18, p1, not_p5, p12, p17, not_p21, not_p24, p4, not_p3}
{p24, p25, p18, p1, not_p5, p12, p17, not_p21, not_p24, p4, not_p25}
{p24, p25, p18, p1, not_p5, p12, p17, not_p21, not_p2, p4, p14}
{p24, p25, p18, p1, not_p5, p12, p17, not_p21, not_p2, p4, not_p3}
{p24, p25, p18, p1, not_p5, p12, p17, not_p21, not_p2, p4, not_p25}
{p24, p25, p18, p1, not_p5, p12, p17, not_p12, not_p24, p4, p14}
{p24, p25, p18, p1, not_p5, p12, p17, not_p12, not_p2, p4, p14}
{p24, p25, p18, p1, not_p5, p12, p17, not_p12, not_p24, p4, not_p3}
{p24, p25, p18, p1, not_p5, p12, p17, not_p12, not_p2, p4, not_p3}
{p24, p25, p18, p1, not_p5, p12, p17, not_p12, not_p24, p4, not_p25}
{p24, p25, p18, p1, not_p5, p12, p17, not_p12, not_p2, p4, not_p25}
{p24, p25, p20, p1, not_p5, p12, p17, not_p21, p9, p4}
{p24, p25, p20, p1, not_p5, p12, p17, not_p12, p9, p4}
{p24, p25, p20, p1, not_p5, p12, p17, not_p21, not_p24, p4, p14}
{p24, p25, p20, p1, not_p5, p12, p17, not_p21, not_p24, p4, not_p3}
{p24, p25, p20, p1, not_p5, p12, p17, not_p21, not_p24, p4, not_p25}
{p24, p25, p20, p1, not_p5, p12, p17, not_p21, not_p2, p4, p14}
{p24, p25, p20, p1, not_p5, p12, p17, not_p21, not_p2, p4, not_p3}
{p24, p25, p20, p1, not_p5, p12, p17, not_p21, not_p2, p4, not_p25}
{p24, p25, p20, p1, not_p5, p12, p17, not_p12, not_p24, p4, p14}
{p24, p25, p20, p1, not_p5, p12, p17, not_p12, not_p2, p4, p14}
{p24, p25, p20, p1, not_p5, p12, p17, not_p12, not_p24, p4, not_p3}
{p24, p25, p20, p1, not_p5, p12, p17, not_p12, not_p2, p4, not_p3}
{p24, p25, p20, p1, not_p5, p12, p17, not_p12, not_p24, p4, not_p25}
{p24, p25, p20, p1, not_p5, p12, p17, not_p12, not_p2, p4, not_p25}
{p24, p25, p3, p1, not_p5, p12, p17, not_p21, p9, p4}
{p24, p25, p3, p1, not_p5, p12, p17, not_p21, not_p24, p4, p14}
{p24, p25, p3, p1, not_p5, p12, p17, not_p21, not_p24, p4, not_p3}
{p24, p25, p3, p1, not_p5, p12, p17, not_p21, not_p24, p4, not_p25}
{p24, p25, p3, p1, not_p5, p12, p17, not_p21, not_p2, p4, p14}
{p24, p25, p3, p1, not_p5, p12, p17, not_p21, not_p2, p4, not_p3}
{p24, p25, p3, p1, not_p5, p12, p17, not_p21, not_p2, p4, not_p25}
{p24, p25, p1, p22, not_p5, not_p8, p4, not_p3}
{p24, p1, p22, not_p5, p12, not_p8, not_p24, p4, not_p3}
{p24, p1, p22, not_p5, p12, not_p8, not_p2, p4, not_p3}
{p24, p25, p1, p22, not_p5, not_p8, p4, not_p25}
{p24, p1, p22, not_p5, p12, not_p8, not_p24, p4, not_p25}
{p24, p1, p22, not_p5, p12, not_p8, not_p2, p4, not_p25}
{p24, p1, p22, not_p5, p12, not_p8, not_p24, p4, p14}
{p24, p1, p22, not_p5, p12, not_p8, not_p2, p4, p14}
{p24, p25, p1, p22, not_p5, not_p8, p4, p21, p14}
{p24, p25, not_p20, p1, p22, not_p5, not_p8, p4, p14}
{p24, p25, p18, p1, p22, not_p5, not_p8, p9, p4}
{p24, not_p15, p25, p1, p22, not_p5, not_p8, p9, p4}
{p24, p1, p22, not_p5, p12, not_p8, p9, p4}
{p24, p8, not_p15, p1, p22, not_p5, not_p8, p9, p4}
{p24, p25, p18, p1, not_p5, not_p8, not_p21, p4, not_p3}
{p24, p25, p18, p1, not_p5, not_p8, not_p12, p4, not_p3}
{p24, p18, p1, not_p5, p12, not_p8, not_p21, not_p24, p4, not_p3}
{p24, p18, p1, not_p5, p12, not_p8, not_p21, not_p2, p4, not_p3}
{p24, p18, p1, not_p5, p12, not_p8, not_p12, not_p24, p4, not_p3}
{p24, p18, p1, not_p5, p12, not_p8, not_p12, not_p2, p4, not_p3}
{p24, p25, p20, p1, not_p5, not_p8, not_p21, p4, not_p3}
{p24, p25, p20, p1, not_p5, not_p8, not_p12, p4, not_p3}
{p24, p20, p1, not_p5, p12, not_p8, not_p21, not_p24, p4, not_p3}
{p24, p20, p1, not_p5, p12, not_p8, not_p21, not_p2, p4, not_p3}
{p24, p20, p1, not_p5, p12, not_p8, not_p12, not_p24, p4, not_p3}
{p24, p20, p1, not_p5, p12, not_p8, not_p12, not_p2, p4, not_p3}
{p24, p25, p3, p1, not_p5, not_p8, not_p21, p4, not_p3}
{p24, p3, p1, not_p5, p12, not_p8, not_p21, not_p24, p4, not_p3}
{p24, p3, p1, not_p5, p12, not_p8, not_p21, not_p2, p4, not_p3}
{p24, p25, p18, p1, not_p5, not_p8, not_p21, p4, not_p25}
{p24, p25, p18, p1, not_p5, not_p8, not_p12, p4, not_p25}
{p24, p18, p1, not_p5, p12, not_p8, not_p21, not_p24, p4, not_p25}
{p24, p18, p1, not_p5, p12, not_p8, not_p21, not_p2, p4, not_p25}
{p24, p18, p1, not_p5, p12, not_p8, not_p12, not_p24, p4, not_p25}
{p24, p18, p1, not_p5, p12, not_p8, not_p12, not_p2, p4, not_p25}
{p24, p25, p20, p1, not_p5, not_p8, not_p21, p4, not_p25}
{p24, p25, p20, p1, not_p5, not_p8, not_p12, p4, not_p25}
{p24, p20, p1, not_p5, p12, not_p8, not_p21, not_p24, p4, not_p25}
{p24, p20, p1, not_p5, p12, not_p8, not_p21, not_p2, p4, not_p25}
{p24, p20, p1, not_p5, p12, not_p8, not_p12, not_p24, p4, not_p25}
{p24, p20, p1, not_p5, p12, not_p8, not_p12, not_p2, p4, not_p25}
{p24, p25, p3, p1, not_p5, not_p8, not_p21, p4, not_p25}
{p24, p3, p1, not_p5, p12, not_p8, not_p21, not_p24, p4, not_p25}
{p24, p3, p1, not_p5, p12, not_p8, not_p21, not_p2, p4, not_p25}
{p24, p18, p1, not_p5, p12, not_p8, not_p21, p9, p4}
{p24, p18, p1, not_p5, p12, not_p8, not_p12, p9, p4}
{p24, p25, p18, p1, not_p5, not_p8, not_p21, p9, p4}
{p24, p25, p18, p1, not_p5, not_p8, not_p12, p9, p4}
{p24, p20, p1, not_p5, p12, not_p8, not_p21, p9, p4}
{p24, p20, p1, not_p5, p12, not_p8, not_p12, p9, p4}
{p24, not_p15, p25, p20, p1, not_p5, not_p8, not_p21, p9, p4}
{p24, not_p15, p25, p20, p1, not_p5, not_p8, not_p12, p9, p4}
{p24, p8, not_p15, p20, p1, not_p5, not_p8, not_p21, p9, p4}
{p24, p8, not_p15, p20, p1, not_p5, not_p8, not_p12, p9, p4}
{p24, p3, p1, not_p5, p12, not_p8, not_p21, p9, p4}
{p24, not_p15, p25, p3, p1, not_p5, not_p8, not_p21, p9, p4}
{p24, p8, not_p15, p3, p1, not_p5, not_p8, not_p21, p9, p4}
{p24, p18, p1, not_p5, p12, not_p8, not_p21, not_p24, p4, p14}
{p24, p18, p1, not_p5, p12, not_p8, not_p12, not_p24, p4, p14}
{p24, p18, p1, not_p5, p12, not_p8, not_p21, not_p2, p4, p14}
{p24, p18, p1, not_p5, p12, not_p8, not_p12, not_p2, p4, p14}
{p24, p20, p1, not_p5, p12, not_p8, not_p21, not_p24, p4, p14}
{p24, p20, p1, not_p5, p12, not_p8, not_p12, not_p24, p4, p14}
{p24, p20, p1, not_p5, p12, not_p8, not_p21, not_p2, p4, p14}
{p24, p20, p1, not_p5, p12, not_p8, not_p12, not_p2, p4, p14}
{p24, p3, p1, not_p5, p12, not_p8, not_p21, not_p24, p4, p14}
{p24, p3, p1, not_p5, p12, not_p8, not_p21, not_p2, p4, p14}
{p24, p25, p18, p1, not_p5, not_p8, not_p21, p4, p21, p14}
{p24, p25, p18, p1, not_p5, not_p8, not_p12, p4, p21, p14}
{p24, p25, not_p20, p18, p1, not_p5, not_p8, not_p21, p4, p14}
{p24, p25, not_p20, p18, p1, not_p5, not_p8, not_p12, p4, p14}
{p24, p25, p20, p1, not_p5, not_p8, not_p21, p4, p21, p14}
{p24, p25, p20, p1, not_p5, not_p8, not_p12, p4, p21, p14}
{p24, p25, not_p20, p20, p1, not_p5, not_p8, not_p21, p4, p14}
{p24, p25, not_p20, p20, p1, not_p5, not_p8, not_p12, p4, p14}
{p24, p25, p3, p1, not_p5, not_p8, not_p21, p4, p21, p14}
{p24, p25, not_p20, p3, p1, not_p5, not_p8, not_p21, p4, p14}
{p24, p8, not_p15, p1, p22, not_p5, not_p19, p9, p4}
{p24, p8, not_p15, p20, p1, not_p5, not_p19, not_p21, p9, p4}
{p24, p8, not_p15, p3, p1, not_p5, not_p19, not_p21, p9, p4}
{p24, p8, not_p15, p20, p1, not_p5, not_p19, not_p12, p9, p4}
{p24, p1, p22, not_p5, p12, p9, p4, p6}
{p24, p18, p1, not_p5, p12, not_p21, p9, p4, p6}
{p24, p20, p1, not_p5, p12, not_p21, p9, p4, p6}
{p24, p3, p1, not_p5, p12, not_p21, p9, p4, p6}
{p24, p18, p1, not_p5, p12, not_p12, p9, p4, p6}
{p24, p20, p1, not_p5, p12, not_p12, p9, p4, p6}
{p24, p1, p22, not_p5, p12, not_p24, p4, p14, p6}
{p24, p18, p1, not_p5, p12, not_p21, not_p24, p4, p14, p6}
{p24, p20, p1, not_p5, p12, not_p21, not_p24, p4, p14, p6}
{p24, p3, p1, not_p5, p12, not_p21, not_p24, p4, p14, p6}
{p24, p18, p1, not_p5, p12, not_p12, not_p24, p4, p14, p6}
{p24, p20, p1, not_p5, p12, not_p12, not_p24, p4, p14, p6}
{p24, p1, p22, not_p5, p12, not_p2, p4, p14, p6}
{p24, p18, p1, not_p5, p12, not_p21, not_p2, p4, p14, p6}
{p24, p20, p1, not_p5, p12, not_p21, not_p2, p4, p14, p6}
{p24, p3, p1, not_p5, p12, not_p21, not_p2, p4, p14, p6}
{p24, p18, p1, not_p5, p12, not_p12, not_p2, p4, p14, p6}
{p24, p20, p1, not_p5, p12, not_p12, not_p2, p4, p14, p6}
{p24, p1, p22, not_p5, p12, not_p24, p4, p6, not_p3}
{p24, p1, p22, not_p5, p12, not_p2, p4, p6, not_p3}
{p24, p1, p22, not_p5, p12, not_p24, p4, p6, not_p25}
{p24, p1, p22, not_p5, p12, not_p2, p4, p6, not_p25}
{p24, p18, p1, not_p5, p12, not_p21, not_p24, p4, p6, not_p3}
{p24, p18, p1, not_p5, p12, not_p12, not_p24, p4, p6, not_p3}
{p24, p18, p1, not_p5, p12, not_p21, not_p2, p4, p6, not_p3}
{p24, p18, p1, not_p5, p12, not_p12, not_p2, p4, p6, not_p3}
{p24, p20, p1, not_p5, p12, not_p21, not_p24, p4, p6, not_p3}
{p24, p20, p1, not_p5, p12, not_p12, not_p24, p4, p6, not_p3}
{p24, p20, p1, not_p5, p12, not_p21, not_p2, p4, p6, not_p3}
{p24, p20, p1, not_p5, p12, not_p12, not_p2, p4, p6, not_p3}
{p24, p3, p1, not_p5, p12, not_p21, not_p24, p4, p6, not_p3}
{p24, p3, p1, not_p5, p12, not_p21, not_p2, p4, p6, not_p3}
{p24, p18, p1, not_p5, p12, not_p21, not_p24, p4, p6, not_p25}
{p24, p18, p1, not_p5, p12, not_p12, not_p24, p4, p6, not_p25}
{p24, p18, p1, not_p5, p12, not_p21, not_p2, p4, p6, not_p25}
{p24, p18, p1, not_p5, p12, not_p12, not_p2, p4, p6, not_p25}
{p24, p20, p1, not_p5, p12, not_p21, not_p24, p4, p6, not_p25}
{p24, p20, p1, not_p5, p12, not_p12, not_p24, p4, p6, not_p25}
{p24, p20, p1, not_p5, p12, not_p21, not_p2, p4, p6, not_p25}
{p24, p20, p1, not_p5, p12, not_p12, not_p2, p4, p6, not_p25}
{p24, p3, p1, not_p5, p12, not_p21, not_p24, p4, p6, not_p25}
{p24, p3, p1, not_p5, p12, not_p21, not_p2, p4, p6, not_p25}
{p24, p1, p22, not_p5, not_p19, p12, p9, p4}
{p24, p18, p1, not_p5, not_p19, p12, not_p21, p9, p4}
{p24, p20, p1, not_p5, not_p19, p12, not_p21, p9, p4}
{p24, p3, p1, not_p5, not_p19, p12, not_p21, p9, p4}
{p24, p18, p1, not_p5, not_p19, p12, not_p12, p9, p4}
{p24, p20, p1, not_p5, not_p19, p12, not_p12, p9, p4}
{p24, p1, p22, not_p5, not_p19, p12, not_p24, p4, p14}
{p24, p18, p1, not_p5, not_p19, p12, not_p21, not_p24, p4, p14}
{p24, p20, p1, not_p5, not_p19, p12, not_p21, not_p24, p4, p14}
{p24, p3, p1, not_p5, not_p19, p12, not_p21, not_p24, p4, p14}
{p24, p18, p1, not_p5, not_p19, p12, not_p12, not_p24, p4, p14}
{p24, p20, p1, not_p5, not_p19, p12, not_p12, not_p24, p4, p14}
{p24, p1, p22, not_p5, not_p19, p12, not_p2, p4, p14}
{p24, p18, p1, not_p5, not_p19, p12, not_p21, not_p2, p4, p14}
{p24, p20, p1, not_p5, not_p19, p12, not_p21, not_p2, p4, p14}
{p24, p3, p1, not_p5, not_p19, p12, not_p21, not_p2, p4, p14}
{p24, p18, p1, not_p5, not_p19, p12, not_p12, not_p2, p4, p14}
{p24, p20, p1, not_p5, not_p19, p12, not_p12, not_p2, p4, p14}
{p24, p1, p22, not_p5, not_p19, p12, not_p24, p4, not_p3}
{p24, p1, p22, not_p5, not_p19, p12, not_p2, p4, not_p3}
{p24, p1, p22, not_p5, not_p19, p12, not_p24, p4, not_p25}
{p24, p1, p22, not_p5, not_p19, p12, not_p2, p4, not_p25}
{p24, p18, p1, not_p5, not_p19, p12, not_p21, not_p24, p4, not_p3}
{p24, p18, p1, not_p5, not_p19, p12, not_p12, not_p24, p4, not_p3}
{p24, p18, p1, not_p5, not_p19, p12, not_p21, not_p2, p4, not_p3}
{p24, p18, p1, not_p5, not_p19, p12, not_p12, not_p2, p4, not_p3}
{p24, p18, p1, not_p5, not_p19, p12, not_p21, not_p24, p4, not_p25}
{p24, p18, p1, not_p5, not_p19, p12, not_p12, not_p24, p4, not_p25}
{p24, p18, p1, not_p5, not_p19, p12, not_p21, not_p2, p4, not_p25}
{p24, p18, p1, not_p5, not_p19, p12, not_p12, not_p2, p4, not_p25}
{p24, p20, p1, not_p5, not_p19, p12, not_p21, not_p24, p4, not_p3}
{p24, p20, p1, not_p5, not_p19, p12, not_p12, not_p24, p4, not_p3}
{p24, p20, p1, not_p5, not_p19, p12, not_p21, not_p2, p4, not_p3}
{p24, p20, p1, not_p5, not_p19, p12, not_p12, not_p2, p4, not_p3}
{p24, p20, p1, not_p5, not_p19, p12, not_p21, not_p24, p4, not_p25}
{p24, p20, p1, not_p5, not_p19, p12, not_p12, not_p24, p4, not_p25}
{p24, p20, p1, not_p5, not_p19, p12, not_p21, not_p2, p4, not_p25}
{p24, p20, p1, not_p5, not_p19, p12, not_p12, not_p2, p4, not_p25}
{p24, p3, p1, not_p5, not_p19, p12, not_p21, not_p24, p4, not_p3}
{p24, p3, p1, not_p5, not_p19, p12, not_p21, not_p2, p4, not_p3}
{p24, p3, p1, not_p5, not_p19, p12, not_p21, not_p24, p4, not_p25}
{p24, p3, p1, not_p5, not_p19, p12, not_p21, not_p2, p4, not_p25}
{p24, p22, not_p16, p17, p4, p5, p14}
{p24, p22, p12, p17, p4, p5, p14}
{p24, p1, p22, p17, p4, p5, p14}
{p24, not_p15, p2, p22, p17, not_p4, p4, p5, p14}
{p24, not_p15, p2, p22, p17, not_p1, p4, p5, p14}
{p24, p22, not_p16, p17, p4, p5, not_p3}
{p24, p22, p12, p17, p4, p5, not_p3}
{p24, p1, p22, p17, p4, p5, not_p3}
{p24, not_p15, p2, p22, p17, not_p4, p4, p5, not_p3}
{p24, not_p15, p2, p22, p17, not_p1, p4, p5, not_p3}
{p24, p22, not_p16, p17, p4, p5, not_p25}
{p24, p22, p12, p17, p4, p5, not_p25}
{p24, p1, p22, p17, p4, p5, not_p25}
{p24, not_p15, p2, p22, p17, not_p4, p4, p5, not_p25}
{p24, not_p15, p2, p22, p17, not_p1, p4, p5, not_p25}
{p24, p22, not_p16, p17, p9, p4, p5}
{p24, p22, p12, p17, p9, p4, p5}
{p24, p18, p1, p22, p17, p9, p4, p5}
{p24, not_p15, p1, p22, p17, p9, p4, p5}
{p24, not_p15, p2, p22, p17, p9, not_p4, p4, p5}
{p24, not_p15, p2, p22, p17, p9, not_p1, p4, p5}
{p24, not_p19, not_p16, p17, not_p21, p4, p5, p21, not_p3}
{p24, p25, not_p19, not_p16, p17, not_p21, p4, p5, not_p3}
{p24, p18, p1, not_p19, p17, not_p21, p4, p5, p21, not_p3}
{p24, p25, p18, p1, not_p19, p17, not_p21, p4, p5, not_p3}
{p24, p20, p1, not_p19, p17, not_p21, p4, p5, p21, not_p3}
{p24, p25, p20, p1, not_p19, p17, not_p21, p4, p5, not_p3}
{p24, not_p15, p2, not_p19, p17, not_p21, not_p4, p4, p5, p21, not_p3}
{p24, not_p15, p2, not_p19, p17, not_p21, not_p1, p4, p5, p21, not_p3}
{p24, not_p15, p2, p25, not_p19, p17, not_p21, not_p4, p4, p5, not_p3}
{p24, not_p15, p2, p25, not_p19, p17, not_p21, not_p1, p4, p5, not_p3}
{p24, not_p5, not_p16, p17, not_p21, p4, p5, p21, not_p3}
{p24, p25, not_p5, not_p16, p17, not_p21, p4, p5, not_p3}
{p24, not_p5, p12, p17, not_p21, p4, p5, p21, not_p3}
{p24, p18, p1, not_p5, p17, not_p21, p4, p5, p21, not_p3}
{p24, p25, p18, p1, not_p5, p17, not_p21, p4, p5, not_p3}
{p24, p20, p1, not_p5, p17, not_p21, p4, p5, p21, not_p3}
{p24, p25, p20, p1, not_p5, p17, not_p21, p4, p5, not_p3}
{p24, not_p15, p2, not_p5, p17, not_p21, not_p4, p4, p5, p21, not_p3}
{p24, not_p15, p2, p25, not_p5, p17, not_p21, not_p4, p4, p5, not_p3}
{p24, not_p15, p2, not_p5, p17, not_p21, not_p1, p4, p5, p21, not_p3}
{p24, not_p15, p2, p25, not_p5, p17, not_p21, not_p1, p4, p5, not_p3}
{p24, not_p19, not_p16, p17, not_p12, p4, p5, p21, not_p3}
{p24, p25, not_p19, not_p16, p17, not_p12, p4, p5, not_p3}
{p24, p18, p1, not_p19, p17, not_p12, p4, p5, p21, not_p3}
{p24, p25, p18, p1, not_p19, p17, not_p12, p4, p5, not_p3}
{p24, p20, p1, not_p19, p17, not_p12, p4, p5, p21, not_p3}
{p24, p25, p20, p1, not_p19, p17, not_p12, p4, p5, not_p3}
{p24, not_p15, p2, not_p19, p17, not_p12, not_p4, p4, p5, p21, not_p3}
{p24, not_p15, p2, not_p19, p17, not_p12, not_p1, p4, p5, p21, not_p3}
{p24, not_p15, p2, p25, not_p19, p17, not_p12, not_p4, p4, p5, not_p3}
{p24, not_p15, p2, p25, not_p19, p17, not_p12, not_p1, p4, p5, not_p3}
{p24, not_p5, not_p16, p17, not_p12, p4, p5, p21, not_p3}
{p24, p25, not_p5, not_p16, p17, not_p12, p4, p5, not_p3}
{p24, not_p5, p12, p17, not_p12, p4, p5, p21, not_p3}
{p24, p18, p1, not_p5, p17, not_p12, p4, p5, p21, not_p3}
{p24, p25, p18, p1, not_p5, p17, not_p12, p4, p5, not_p3}
{p24, p20, p1, not_p5, p17, not_p12, p4, p5, p21, not_p3}
{p24, p25, p20, p1, not_p5, p17, not_p12, p4, p5, not_p3}
{p24, not_p15, p2, not_p5, p17, not_p12, not_p4, p4, p5, p21, not_p3}
{p24, not_p15, p2, not_p5, p17, not_p12, not_p1, p4, p5, p21, not_p3}
{p24, not_p15, p2, p25, not_p5, p17, not_p12, not_p4, p4, p5, not_p3}
{p24, not_p15, p2, p25, not_p5, p17, not_p12, not_p1, p4, p5, not_p3}
{p24, not_p19, not_p16, p17, not_p21, p4, p5, p21, not_p25}
{p24, p25, not_p19, not_p16, p17, not_p21, p4, p5, not_p25}
{p24, p18, p1, not_p19, p17, not_p21, p4, p5, p21, not_p25}
{p24, p25, p18, p1, not_p19, p17, not_p21, p4, p5, not_p25}
{p24, p20, p1, not_p19, p17, not_p21, p4, p5, p21, not_p25}
{p24, p25, p20, p1, not_p19, p17, not_p21, p4, p5, not_p25}
{p24, not_p15, p2, not_p19, p17, not_p21, not_p4, p4, p5, p21, not_p25}
{p24, not_p15, p2, not_p19, p17, not_p21, not_p1, p4, p5, p21, not_p25}
{p24, not_p15, p2, p25, not_p19, p17, not_p21, not_p4, p4, p5, not_p25}
{p24, not_p15, p2, p25, not_p19, p17, not_p21, not_p1, p4, p5, not_p25}
{p24, not_p5, not_p16, p17, not_p21, p4, p5, p21, not_p25}
{p24, p25, not_p5, not_p16, p17, not_p21, p4, p5, not_p25}
{p24, not_p5, p12, p17, not_p21, p4, p5, p21, not_p25}
{p24, p18, p1, not_p5, p17, not_p21, p4, p5, p21, not_p25}
{p24, p25, p18, p1, not_p5, p17, not_p21, p4, p5, not_p25}
{p24, p20, p1, not_p5, p17, not_p21, p4, p5, p21, not_p25}
{p24, p25, p20, p1, not_p5, p17, not_p21, p4, p5, not_p25}
{p24, not_p15, p2, not_p5, p17, not_p21, not_p4, p4, p5, p21, not_p25}
{p24, not_p15, p2, p25, not_p5, p17, not_p21, not_p4, p4, p5, not_p25}
{p24, not_p15, p2, not_p5, p17, not_p21, not_p1, p4, p5, p21, not_p25}
{p24, not_p15, p2, p25, not_p5, p17, not_p21, not_p1, p4, p5, not_p25}
{p24, not_p19, not_p16, p17, not_p12, p4, p5, p21, not_p25}
{p24, p25, not_p19, not_p16, p17, not_p12, p4, p5, not_p25}
{p24, p18, p1, not_p19, p17, not_p12, p4, p5, p21, not_p25}
{p24, p25, p18, p1, not_p19, p17, not_p12, p4, p5, not_p25}
{p24, p20, p1, not_p19, p17, not_p12, p4, p5, p21, not_p25}
{p24, p25, p20, p1, not_p19, p17, not_p12, p4, p5, not_p25}
{p24, not_p15, p2, not_p19, p17, not_p12, not_p4, p4, p5, p21, not_p25}
{p24, not_p15, p2, not_p19, p17, not_p12, not_p1, p4, p5, p21, not_p25}
{p24, not_p15, p2, p25, not_p19, p17, not_p12, not_p4, p4, p5, not_p25}
{p24, not_p15, p2, p25, not_p19, p17, not_p12, not_p1, p4, p5, not_p25}
{p24, not_p5, not_p16, p17, not_p12, p4, p5, p21, not_p25}
{p24, p25, not_p5, not_p16, p17, not_p12, p4, p5, not_p25}
{p24, not_p5, p12, p17, not_p12, p4, p5, p21, not_p25}
{p24, p18, p1, not_p5, p17, not_p12, p4, p5, p21, not_p25}
{p24, p25, p18, p1, not_p5, p17, not_p12, p4, p5, not_p25}
{p24, p20, p1, not_p5, p17, not_p12, p4, p5, p21, not_p25}
{p24, p25, p20, p1, not_p5, p17, not_p12, p4, p5, not_p25}
{p24, not_p15, p2, not_p5, p17, not_p12, not_p4, p4, p5, p21, not_p25}
{p24, not_p15, p2, not_p5, p17, not_p12, not_p1, p4, p5, p21, not_p25}
{p24, not_p15, p2, p25, not_p5, p17, not_p12, not_p4, p4, p5, not_p25}
{p24, not_p15, p2, p25, not_p5, p17, not_p12, not_p1, p4, p5, not_p25}
{p24, not_p5, p12, p17, not_p21, p4, p5, p21, p14}
{p24, not_p5, p12, p17, not_p12, p4, p5, p21, p14}
{p24, not_p19, not_p16, p17, not_p21, p4, p5, p21, p14}
{p24, p18, p1, not_p19, p17, not_p21, p4, p5, p21, p14}
{p24, p20, p1, not_p19, p17, not_p21, p4, p5, p21, p14}
{p24, not_p15, p2, not_p19, p17, not_p21, not_p4, p4, p5, p21, p14}
{p24, not_p15, p2, not_p19, p17, not_p21, not_p1, p4, p5, p21, p14}
{p24, not_p5, not_p16, p17, not_p21, p4, p5, p21, p14}
{p24, p18, p1, not_p5, p17, not_p21, p4, p5, p21, p14}
{p24, p20, p1, not_p5, p17, not_p21, p4, p5, p21, p14}
{p24, not_p15, p2, not_p5, p17, not_p21, not_p4, p4, p5, p21, p14}
{p24, not_p15, p2, not_p5, p17, not_p21, not_p1, p4, p5, p21, p14}
{p24, not_p19, not_p16, p17, not_p12, p4, p5, p21, p14}
{p24, p18, p1, not_p19, p17, not_p12, p4, p5, p21, p14}
{p24, p20, p1, not_p19, p17, not_p12, p4, p5, p21, p14}
{p24, not_p15, p2, not_p19, p17, not_p12, not_p4, p4, p5, p21, p14}
{p24, not_p15, p2, not_p19, p17, not_p12, not_p1, p4, p5, p21, p14}
{p24, not_p5, not_p16, p17, not_p12, p4, p5, p21, p14}
{p24, p18, p1, not_p5, p17, not_p12, p4, p5, p21, p14}
{p24, p20, p1, not_p5, p17, not_p12, p4, p5, p21, p14}
{p24, not_p15, p2, not_p5, p17, not_p12, not_p4, p4, p5, p21, p14}
{p24, not_p15, p2, not_p5, p17, not_p12, not_p1, p4, p5, p21, p14}
{p24, p25, not_p19, not_p16, p17, not_p21, p4, p5, p14}
{p24, p25, p18, p1, not_p19, p17, not_p21, p4, p5, p14}
{p24, p25, p20, p1, not_p19, p17, not_p21, p4, p5, p14}
{p24, not_p15, p2, p25, not_p19, p17, not_p21, not_p4, p4, p5, p14}
{p24, not_p15, p2, p25, not_p19, p17, not_p21, not_p1, p4, p5, p14}
{p24, p25, not_p5, not_p16, p17, not_p21, p4, p5, p14}
{p24, p25, p18, p1, not_p5, p17, not_p21, p4, p5, p14}
{p24, p25, p20, p1, not_p5, p17, not_p21, p4, p5, p14}
{p24, not_p15, p2, p25, not_p5, p17, not_p21, not_p4, p4, p5, p14}
{p24, not_p15, p2, p25, not_p5, p17, not_p21, not_p1, p4, p5, p14}
{p24, p25, not_p19, not_p16, p17, not_p12, p4, p5, p14}
{p24, p25, p18, p1, not_p19, p17, not_p12, p4, p5, p14}
{p24, p25, p20, p1, not_p19, p17, not_p12, p4, p5, p14}
{p24, not_p15, p2, p25, not_p19, p17, not_p12, not_p4, p4, p5, p14}
{p24, not_p15, p2, p25, not_p19, p17, not_p12, not_p1, p4, p5, p14}
{p24, p25, not_p5, not_p16, p17, not_p12, p4, p5, p14}
{p24, p25, p18, p1, not_p5, p17, not_p12, p4, p5, p14}
{p24, p25, p20, p1, not_p5, p17, not_p12, p4, p5, p14}
{p24, not_p15, p2, p25, not_p5, p17, not_p12, not_p4, p4, p5, p14}
{p24, not_p15, p2, p25, not_p5, p17, not_p12, not_p1, p4, p5, p14}
{p24, not_p19, not_p16, p17, not_p21, p9, p4, p5, p21}
{p24, not_p5, not_p16, p17, not_p21, p9, p4, p5, p21}
{p24, not_p19, not_p16, p17, not_p12, p9, p4, p5, p21}
{p24, not_p5, not_p16, p17, not_p12, p9, p4, p5, p21}
{p24, p25, not_p19, not_p16, p17, not_p21, p9, p4, p5}
{p24, p25, not_p5, not_p16, p17, not_p21, p9, p4, p5}
{p24, p25, not_p19, not_p16, p17, not_p12, p9, p4, p5}
{p24, p25, not_p5, not_p16, p17, not_p12, p9, p4, p5}
{p24, p18, p1, not_p19, p17, not_p21, p9, p4, p5, p21}
{p24, p25, p18, p1, not_p19, p17, not_p21, p9, p4, p5}
{p24, not_p15, p20, p1, not_p19, p17, not_p21, p9, p4, p5, p21}
{p24, not_p15, p11, p20, p1, not_p19, p17, not_p21, p9, p4, p5}
{p24, not_p15, p25, p20, p1, not_p19, p17, not_p21, p9, p4, p5}
{p24, not_p15, p2, not_p19, p17, not_p21, p9, not_p4, p4, p5, p21}
{p24, not_p15, p2, not_p19, p17, not_p21, p9, not_p1, p4, p5, p21}
{p24, not_p15, p2, p25, not_p19, p17, not_p21, p9, not_p4, p4, p5}
{p24, not_p15, p2, p25, not_p19, p17, not_p21, p9, not_p1, p4, p5}
{p24, not_p5, p12, p17, not_p21, p9, p4, p5, p21}
{p24, p18, p1, not_p5, p17, not_p21, p9, p4, p5, p21}
{p24, p25, p18, p1, not_p5, p17, not_p21, p9, p4, p5}
{p24, not_p15, p20, p1, not_p5, p17, not_p21, p9, p4, p5, p21}
{p24, not_p15, p11, p20, p1, not_p5, p17, not_p21, p9, p4, p5}
{p24, not_p15, p25, p20, p1, not_p5, p17, not_p21, p9, p4, p5}
{p24, not_p15, p2, not_p5, p17, not_p21, p9, not_p4, p4, p5, p21}
{p24, not_p15, p2, p25, not_p5, p17, not_p21, p9, not_p4, p4, p5}
{p24, not_p15, p2, not_p5, p17, not_p21, p9, not_p1, p4, p5, p21}
{p24, not_p15, p2, p25, not_p5, p17, not_p21, p9, not_p1, p4, p5}
{p24, p18, p1, not_p19, p17, not_p12, p9, p4, p5, p21}
{p24, p25, p18, p1, not_p19, p17, not_p12, p9, p4, p5}
{p24, not_p15, p20, p1, not_p19, p17, not_p12, p9, p4, p5, p21}
{p24, not_p15, p11, p20, p1, not_p19, p17, not_p12, p9, p4, p5}
{p24, not_p15, p25, p20, p1, not_p19, p17, not_p12, p9, p4, p5}
{p24, not_p15, p2, not_p19, p17, not_p12, p9, not_p4, p4, p5, p21}
{p24, not_p15, p2, not_p19, p17, not_p12, p9, not_p1, p4, p5, p21}
{p24, not_p15, p2, p25, not_p19, p17, not_p12, p9, not_p4, p4, p5}
{p24, not_p15, p2, p25, not_p19, p17, not_p12, p9, not_p1, p4, p5}
{p24, not_p5, p12, p17, not_p12, p9, p4, p5, p21}
{p24, p18, p1, not_p5, p17, not_p12, p9, p4, p5, p21}
{p24, p25, p18, p1, not_p5, p17, not_p12, p9, p4, p5}
{p24, not_p15, p20, p1, not_p5, p17, not_p12, p9, p4, p5, p21}
{p24, not_p15, p11, p20, p1, not_p5, p17, not_p12, p9, p4, p5}
{p24, not_p15, p25, p20, p1, not_p5, p17, not_p12, p9, p4, p5}
{p24, not_p15, p2, not_p5, p17, not_p12, p9, not_p4, p4, p5, p21}
{p24, not_p15, p2, not_p5, p17, not_p12, p9, not_p1, p4, p5, p21}
{p24, not_p15, p2, p25, not_p5, p17, not_p12, p9, not_p4, p4, p5}
{p24, not_p15, p2, p25, not_p5, p17, not_p12, p9, not_p1, p4, p5}
{p24, p25, p22, not_p16, not_p8, p4, p5, p14}
{p24, p25, p1, p22, not_p8, p4, p5, p14}
{p24, not_p15, p2, p25, p22, not_p8, not_p4, p4, p5, p14}
{p24, not_p15, p2, p25, p22, not_p8, not_p1, p4, p5, p14}
{p24, p25, not_p19, not_p16, not_p8, not_p21, p4, p5, p14}
{p24, p25, not_p5, not_p16, not_p8, not_p21, p4, p5, p14}
{p24, p25, not_p19, not_p16, not_p8, not_p12, p4, p5, p14}
{p24, p25, not_p5, not_p16, not_p8, not_p12, p4, p5, p14}
{p24, p25, p18, p1, not_p19, not_p8, not_p21, p4, p5, p14}
{p24, p25, p18, p1, not_p5, not_p8, not_p21, p4, p5, p14}
{p24, p25, p18, p1, not_p19, not_p8, not_p12, p4, p5, p14}
{p24, p25, p18, p1, not_p5, not_p8, not_p12, p4, p5, p14}
{p24, p25, p20, p1, not_p19, not_p8, not_p21, p4, p5, p14}
{p24, p25, p20, p1, not_p5, not_p8, not_p21, p4, p5, p14}
{p24, p25, p20, p1, not_p19, not_p8, not_p12, p4, p5, p14}
{p24, p25, p20, p1, not_p5, not_p8, not_p12, p4, p5, p14}
{p24, not_p15, p2, p25, not_p19, not_p8, not_p21, not_p4, p4, p5, p14}
{p24, not_p15, p2, p25, not_p5, not_p8, not_p21, not_p4, p4, p5, p14}
{p24, not_p15, p2, p25, not_p19, not_p8, not_p21, not_p1, p4, p5, p14}
{p24, not_p15, p2, p25, not_p5, not_p8, not_p21, not_p1, p4, p5, p14}
{p24, not_p15, p2, p25, not_p19, not_p8, not_p12, not_p4, p4, p5, p14}
{p24, not_p15, p2, p25, not_p5, not_p8, not_p12, not_p4, p4, p5, p14}
{p24, not_p15, p2, p25, not_p19, not_p8, not_p12, not_p1, p4, p5, p14}
{p24, not_p15, p2, p25, not_p5, not_p8, not_p12, not_p1, p4, p5, p14}
{p24, p8, not_p15, p1, p22, not_p5, not_p4, p6, not_p3}
{p24, p8, p1, p22, not_p5, p12, not_p24, not_p4, p6, not_p3}
{p24, p8, p1, p22, not_p5, p12, not_p2, not_p4, p6, not_p3}
{p24, p8, p1, p22, not_p5, not_p8, not_p4, not_p3}
{p24, p8, p1, p22, not_p5, not_p19, not_p4, not_p3}
{p24, p8, not_p20, p1, p22, not_p5, p17, not_p4, not_p3}
{p24, p8, p25, p1, p22, not_p5, p17, not_p4, p21, not_p3}
{p24, p8, p25, p1, p22, not_p5, p12, p17, not_p24, not_p4, not_p3}
{p24, p8, p25, p1, p22, not_p5, p12, p17, not_p2, not_p4, not_p3}
{p24, p8, not_p15, p1, p22, not_p5, not_p4, p6, not_p25}
{p24, p8, p1, p22, not_p5, p12, not_p24, not_p4, p6, not_p25}
{p24, p8, p1, p22, not_p5, p12, not_p2, not_p4, p6, not_p25}
{p24, p8, p1, p22, not_p5, not_p8, not_p4, not_p25}
{p24, p8, p1, p22, not_p5, not_p19, not_p4, not_p25}
{p24, p8, not_p20, p1, p22, not_p5, p17, not_p4, not_p25}
{p24, p8, p25, p1, p22, not_p5, p17, not_p4, p21, not_p25}
{p24, p8, p25, p1, p22, not_p5, p12, p17, not_p24, not_p4, not_p25}
{p24, p8, p25, p1, p22, not_p5, p12, p17, not_p2, not_p4, not_p25}
{p24, p8, not_p15, p1, p22, not_p5, not_p4, p21, p14, p6}
{p24, p8, p1, p22, not_p5, p12, not_p24, not_p4, p14, p6}
{p24, p8, p1, p22, not_p5, p12, not_p2, not_p4, p14, p6}
{p24, p8, not_p15, not_p20, p1, p22, not_p5, not_p4, p14, p6}
{p24, p8, not_p15, p1, p22, not_p5, p9, not_p4, p6}
{p24, p8, p1, p22, not_p5, p12, p9, not_p4, p6}
{p24, p8, p1, p22, not_p5, not_p8, not_p4, p21, p14}
{p24, p8, p25, p1, p22, not_p5, p17, not_p4, p21, p14}
{p24, p8, p1, p22, not_p5, not_p19, not_p4, p21, p14}
{p24, p8, not_p20, p1, p22, not_p5, not_p8, not_p4, p14}
{p24, p8, p1, p22, not_p5, p12, not_p8, not_p24, not_p4, p14}
{p24, p8, p1, p22, not_p5, p12, not_p8, not_p2, not_p4, p14}
{p24, p8, not_p20, p1, p22, not_p5, not_p19, not_p4, p14}
{p24, p8, p1, p22, not_p5, not_p19, p12, not_p24, not_p4, p14}
{p24, p8, p1, p22, not_p5, not_p19, p12, not_p2, not_p4, p14}
{p24, p8, not_p20, p1, p22, not_p5, p17, not_p4, p14}
{p24, p8, p25, p1, p22, not_p5, p12, p17, not_p24, not_p4, p14}
{p24, p8, p25, p1, p22, not_p5, p12, p17, not_p2, not_p4, p14}
{p24, p8, p18, p1, p22, not_p5, not_p8, p9, not_p4}
{p24, p8, p7, p1, p22, not_p5, p12, not_p8, p9, not_p4}
{p24, p8, not_p15, p7, p1, p22, not_p5, not_p8, p9, not_p4}
{p24, p8, p18, p1, p22, not_p5, not_p19, p9, not_p4}
{p24, p8, not_p20, p18, p1, p22, not_p5, p17, p9, not_p4}
{p24, p8, p25, p18, p1, p22, not_p5, p17, p9, not_p4, p21}
{p24, p8, p25, p18, p1, p22, not_p5, p12, p17, p9, not_p4}
{p24, p8, p7, p1, p22, not_p5, not_p19, p12, p9, not_p4}
{p24, p8, not_p15, p7, p1, p22, not_p5, not_p19, p9, not_p4}
{p24, p8, not_p15, p7, p1, p22, not_p5, p17, p9, not_p4, p21}
{p24, p8, not_p15, p7, not_p20, p1, p22, not_p5, p17, p9, not_p4}
{p24, p8, p7, p1, p22, not_p5, p12, p17, p9, not_p4}
{p24, p8, not_p15, p18, p1, not_p5, not_p21, not_p4, p6, not_p3}
{p24, p8, p18, p1, not_p5, p12, not_p21, not_p24, not_p4, p6, not_p3}
{p24, p8, p18, p1, not_p5, p12, not_p21, not_p2, not_p4, p6, not_p3}
{p24, p8, p18, p1, not_p5, not_p8, not_p21, not_p4, not_p3}
{p24, p8, p18, p1, not_p5, not_p19, not_p21, not_p4, not_p3}
{p24, p8, not_p20, p18, p1, not_p5, p17, not_p21, not_p4, not_p3}
{p24, p8, p25, p18, p1, not_p5, p12, p17, not_p21, not_p24, not_p4, not_p3}
{p24, p8, p25, p18, p1, not_p5, p12, p17, not_p21, not_p2, not_p4, not_p3}
{p24, p8, p25, p18, p1, not_p5, p17, not_p21, not_p4, p21, not_p3}
{p24, p8, not_p15, p18, p1, not_p5, not_p12, not_p4, p6, not_p3}
{p24, p8, p18, p1, not_p5, p12, not_p12, not_p24, not_p4, p6, not_p3}
{p24, p8, p18, p1, not_p5, p12, not_p12, not_p2, not_p4, p6, not_p3}
{p24, p8, p18, p1, not_p5, not_p8, not_p12, not_p4, not_p3}
{p24, p8, p18, p1, not_p5, not_p19, not_p12, not_p4, not_p3}
{p24, p8, not_p20, p18, p1, not_p5, p17, not_p12, not_p4, not_p3}
{p24, p8, p25, p18, p1, not_p5, p12, p17, not_p12, not_p24, not_p4, not_p3}
{p24, p8, p25, p18, p1, not_p5, p12, p17, not_p12, not_p2, not_p4, not_p3}
{p24, p8, p25, p18, p1, not_p5, p17, not_p12, not_p4, p21, not_p3}
{p24, p8, not_p15, p20, p1, not_p5, not_p21, not_p4, p6, not_p3}
{p24, p8, p20, p1, not_p5, p12, not_p21, not_p24, not_p4, p6, not_p3}
{p24, p8, p20, p1, not_p5, p12, not_p21, not_p2, not_p4, p6, not_p3}
{p24, p8, p20, p1, not_p5, not_p8, not_p21, not_p4, not_p3}
{p24, p8, p20, p1, not_p5, not_p19, not_p21, not_p4, not_p3}
{p24, p8, not_p20, p20, p1, not_p5, p17, not_p21, not_p4, not_p3}
{p24, p8, p25, p20, p1, not_p5, p12, p17, not_p21, not_p24, not_p4, not_p3}
{p24, p8, p25, p20, p1, not_p5, p12, p17, not_p21, not_p2, not_p4, not_p3}
{p24, p8, p25, p20, p1, not_p5, p17, not_p21, not_p4, p21, not_p3}
{p24, p8, not_p15, p20, p1, not_p5, not_p12, not_p4, p6, not_p3}
{p24, p8, p20, p1, not_p5, p12, not_p12, not_p24, not_p4, p6, not_p3}
{p24, p8, p20, p1, not_p5, p12, not_p12, not_p2, not_p4, p6, not_p3}
{p24, p8, p20, p1, not_p5, not_p8, not_p12, not_p4, not_p3}
{p24, p8, p20, p1, not_p5, not_p19, not_p12, not_p4, not_p3}
{p24, p8, not_p20, p20, p1, not_p5, p17, not_p12, not_p4, not_p3}
{p24, p8, p25, p20, p1, not_p5, p12, p17, not_p12, not_p24, not_p4, not_p3}
{p24, p8, p25, p20, p1, not_p5, p12, p17, not_p12, not_p2, not_p4, not_p3}
{p24, p8, p25, p20, p1, not_p5, p17, not_p12, not_p4, p21, not_p3}
{p24, p8, not_p15, p3, p1, not_p5, not_p21, not_p4, p6, not_p3}
{p24, p8, p3, p1, not_p5, p12, not_p21, not_p24, not_p4, p6, not_p3}
{p24, p8, p3, p1, not_p5, p12, not_p21, not_p2, not_p4, p6, not_p3}
{p24, p8, p3, p1, not_p5, not_p8, not_p21, not_p4, not_p3}
{p24, p8, p3, p1, not_p5, not_p19, not_p21, not_p4, not_p3}
{p24, p8, not_p20, p3, p1, not_p5, p17, not_p21, not_p4, not_p3}
{p24, p8, p25, p3, p1, not_p5, p12, p17, not_p21, not_p24, not_p4, not_p3}
{p24, p8, p25, p3, p1, not_p5, p12, p17, not_p21, not_p2, not_p4, not_p3}
{p24, p8, p25, p3, p1, not_p5, p17, not_p21, not_p4, p21, not_p3}
{p24, p8, not_p15, p18, p1, not_p5, not_p21, not_p4, p6, not_p25}
{p24, p8, p18, p1, not_p5, p12, not_p21, not_p24, not_p4, p6, not_p25}
{p24, p8, p18, p1, not_p5, p12, not_p21, not_p2, not_p4, p6, not_p25}
{p24, p8, p18, p1, not_p5, not_p8, not_p21, not_p4, not_p25}
{p24, p8, p18, p1, not_p5, not_p19, not_p21, not_p4, not_p25}
{p24, p8, not_p20, p18, p1, not_p5, p17, not_p21, not_p4, not_p25}
{p24, p8, p25, p18, p1, not_p5, p12, p17, not_p21, not_p24, not_p4, not_p25}
{p24, p8, p25, p18, p1, not_p5, p12, p17, not_p21, not_p2, not_p4, not_p25}
{p24, p8, p25, p18, p1, not_p5, p17, not_p21, not_p4, p21, not_p25}
{p24, p8, not_p15, p18, p1, not_p5, not_p12, not_p4, p6, not_p25}
{p24, p8, p18, p1, not_p5, p12, not_p12, not_p24, not_p4, p6, not_p25}
{p24, p8, p18, p1, not_p5, p12, not_p12, not_p2, not_p4, p6, not_p25}
{p24, p8, p18, p1, not_p5, not_p8, not_p12, not_p4, not_p25}
{p24, p8, p18, p1, not_p5, not_p19, not_p12, not_p4, not_p25}
{p24, p8, not_p20, p18, p1, not_p5, p17, not_p12, not_p4, not_p25}
{p24, p8, p25, p18, p1, not_p5, p12, p17, not_p12, not_p24, not_p4, not_p25}
{p24, p8, p25, p18, p1, not_p5, p12, p17, not_p12, not_p2, not_p4, not_p25}
{p24, p8, p25, p18, p1, not_p5, p17, not_p12, not_p4, p21, not_p25}
{p24, p8, not_p15, p20, p1, not_p5, not_p21, not_p4, p6, not_p25}
{p24, p8, p20, p1, not_p5, p12, not_p21, not_p24, not_p4, p6, not_p25}
{p24, p8, p20, p1, not_p5, p12, not_p21, not_p2, not_p4, p6, not_p25}
{p24, p8, p20, p1, not_p5, not_p8, not_p21, not_p4, not_p25}
{p24, p8, p20, p1, not_p5, not_p19, not_p21, not_p4, not_p25}
{p24, p8, not_p20, p20, p1, not_p5, p17, not_p21, not_p4, not_p25}
{p24, p8, p25, p20, p1, not_p5, p12, p17, not_p21, not_p24, not_p4, not_p25}
{p24, p8, p25, p20, p1, not_p5, p12, p17, not_p21, not_p2, not_p4, not_p25}
{p24, p8, p25, p20, p1, not_p5, p17, not_p21, not_p4, p21, not_p25}
{p24, p8, not_p15, p20, p1, not_p5, not_p12, not_p4, p6, not_p25}
{p24, p8, p20, p1, not_p5, p12, not_p12, not_p24, not_p4, p6, not_p25}
{p24, p8, p20, p1, not_p5, p12, not_p12, not_p2, not_p4, p6, not_p25}
{p24, p8, p20, p1, not_p5, not_p8, not_p12, not_p4, not_p25}
{p24, p8, p20, p1, not_p5, not_p19, not_p12, not_p4, not_p25}
{p24, p8, not_p20, p20, p1, not_p5, p17, not_p12, not_p4, not_p25}
{p24, p8, p25, p20, p1, not_p5, p12, p17, not_p12, not_p24, not_p4, not_p25}
{p24, p8, p25, p20, p1, not_p5, p12, p17, not_p12, not_p2, not_p4, not_p25}
{p24, p8, p25, p20, p1, not_p5, p17, not_p12, not_p4, p21, not_p25}
{p24, p8, not_p15, p3, p1, not_p5, not_p21, not_p4, p6, not_p25}
{p24, p8, p3, p1, not_p5, p12, not_p21, not_p24, not_p4, p6, not_p25}
{p24, p8, p3, p1, not_p5, p12, not_p21, not_p2, not_p4, p6, not_p25}
{p24, p8, p3, p1, not_p5, not_p8, not_p21, not_p4, not_p25}
{p24, p8, p3, p1, not_p5, not_p19, not_p21, not_p4, not_p25}
{p24, p8, not_p20, p3, p1, not_p5, p17, not_p21, not_p4, not_p25}
{p24, p8, p25, p3, p1, not_p5, p12, p17, not_p21, not_p24, not_p4, not_p25}
{p24, p8, p25, p3, p1, not_p5, p12, p17, not_p21, not_p2, not_p4, not_p25}
{p24, p8, p25, p3, p1, not_p5, p17, not_p21, not_p4, p21, not_p25}
{p24, p8, p18, p1, not_p5, p12, not_p21, p9, not_p4, p6}
{p24, p8, p18, p1, not_p5, p12, not_p12, p9, not_p4, p6}
{p24, p8, not_p15, p18, p1, not_p5, not_p21, p9, not_p4, p6}
{p24, p8, not_p15, p18, p1, not_p5, not_p12, p9, not_p4, p6}
{p24, p8, p18, p1, not_p5, p12, not_p21, not_p24, not_p4, p14, p6}
{p24, p8, p18, p1, not_p5, p12, not_p21, not_p2, not_p4, p14, p6}
{p24, p8, not_p15, p18, p1, not_p5, not_p21, not_p4, p21, p14, p6}
{p24, p8, not_p15, not_p20, p18, p1, not_p5, not_p21, not_p4, p14, p6}
{p24, p8, not_p15, p18, p1, not_p5, not_p12, not_p4, p21, p14, p6}
{p24, p8, not_p15, not_p20, p18, p1, not_p5, not_p12, not_p4, p14, p6}
{p24, p8, p18, p1, not_p5, p12, not_p12, not_p24, not_p4, p14, p6}
{p24, p8, p18, p1, not_p5, p12, not_p12, not_p2, not_p4, p14, p6}
{p24, p8, not_p15, p20, p1, not_p5, not_p21, p9, not_p4, p6}
{p24, p8, not_p15, p20, p1, not_p5, not_p12, p9, not_p4, p6}
{p24, p8, p20, p1, not_p5, p12, not_p21, p9, not_p4, p6}
{p24, p8, p20, p1, not_p5, p12, not_p12, p9, not_p4, p6}
{p24, p8, p20, p1, not_p5, p12, not_p21, not_p24, not_p4, p14, p6}
{p24, p8, p20, p1, not_p5, p12, not_p21, not_p2, not_p4, p14, p6}
{p24, p8, not_p15, p20, p1, not_p5, not_p21, not_p4, p21, p14, p6}
{p24, p8, not_p15, not_p20, p20, p1, not_p5, not_p21, not_p4, p14, p6}
{p24, p8, not_p15, p20, p1, not_p5, not_p12, not_p4, p21, p14, p6}
{p24, p8, not_p15, not_p20, p20, p1, not_p5, not_p12, not_p4, p14, p6}
{p24, p8, p20, p1, not_p5, p12, not_p12, not_p24, not_p4, p14, p6}
{p24, p8, p20, p1, not_p5, p12, not_p12, not_p2, not_p4, p14, p6}
{p24, p8, not_p15, p3, p1, not_p5, not_p21, p9, not_p4, p6}
{p24, p8, p3, p1, not_p5, p12, not_p21, p9, not_p4, p6}
{p24, p8, p3, p1, not_p5, p12, not_p21, not_p24, not_p4, p14, p6}
{p24, p8, p3, p1, not_p5, p12, not_p21, not_p2, not_p4, p14, p6}
{p24, p8, not_p15, p3, p1, not_p5, not_p21, not_p4, p21, p14, p6}
{p24, p8, not_p15, not_p20, p3, p1, not_p5, not_p21, not_p4, p14, p6}
{p24, p8, p18, p1, not_p5, not_p8, not_p21, p9, not_p4}
{p24, p8, p18, p1, not_p5, not_p8, not_p12, p9, not_p4}
{p24, p8, p18, p1, not_p5, not_p8, not_p21, not_p4, p21, p14}
{p24, p8, not_p20, p18, p1, not_p5, not_p8, not_p21, not_p4, p14}
{p24, p8, p18, p1, not_p5, p12, not_p8, not_p21, not_p24, not_p4, p14}
{p24, p8, p18, p1, not_p5, p12, not_p8, not_p21, not_p2, not_p4, p14}
{p24, p8, p18, p1, not_p5, not_p8, not_p12, not_p4, p21, p14}
{p24, p8, not_p20, p18, p1, not_p5, not_p8, not_p12, not_p4, p14}
{p24, p8, p18, p1, not_p5, p12, not_p8, not_p12, not_p24, not_p4, p14}
{p24, p8, p18, p1, not_p5, p12, not_p8, not_p12, not_p2, not_p4, p14}
{p24, p8, p7, p20, p1, not_p5, p12, not_p8, not_p21, p9, not_p4}
{p24, p8, p7, p20, p1, not_p5, p12, not_p8, not_p12, p9, not_p4}
{p24, p8, not_p15, p7, p20, p1, not_p5, not_p8, not_p21, p9, not_p4}
{p24, p8, not_p15, p7, p20, p1, not_p5, not_p8, not_p12, p9, not_p4}
{p24, p8, p20, p1, not_p5, not_p8, not_p21, not_p4, p21, p14}
{p24, p8, p20, p1, not_p5, not_p8, not_p12, not_p4, p21, p14}
{p24, p8, not_p20, p20, p1, not_p5, not_p8, not_p21, not_p4, p14}
{p24, p8, not_p20, p20, p1, not_p5, not_p8, not_p12, not_p4, p14}
{p24, p8, p20, p1, not_p5, p12, not_p8, not_p21, not_p24, not_p4, p14}
{p24, p8, p20, p1, not_p5, p12, not_p8, not_p21, not_p2, not_p4, p14}
{p24, p8, p20, p1, not_p5, p12, not_p8, not_p12, not_p24, not_p4, p14}
{p24, p8, p20, p1, not_p5, p12, not_p8, not_p12, not_p2, not_p4, p14}
{p24, p8, p3, p1, not_p5, not_p8, not_p21, not_p4, p21, p14}
{p24, p8, not_p20, p3, p1, not_p5, not_p8, not_p21, not_p4, p14}
{p24, p8, p3, p1, not_p5, p12, not_p8, not_p21, not_p24, not_p4, p14}
{p24, p8, p3, p1, not_p5, p12, not_p8, not_p21, not_p2, not_p4, p14}
{p24, p8, p7, p3, p1, not_p5, p12, not_p8, not_p21, p9, not_p4}
{p24, p8, not_p15, p7, p3, p1, not_p5, not_p8, not_p21, p9, not_p4}
{p24, p8, p18, p1, not_p5, not_p19, not_p21, p9, not_p4}
{p24, p8, p18, p1, not_p5, not_p19, not_p12, p9, not_p4}
{p24, p8, not_p20, p18, p1, not_p5, p17, not_p21, p9, not_p4}
{p24, p8, p25, p18, p1, not_p5, p12, p17, not_p21, p9, not_p4}
{p24, p8, p25, p18, p1, not_p5, p17, not_p21, p9, not_p4, p21}
{p24, p8, not_p20, p18, p1, not_p5, p17, not_p12, p9, not_p4}
{p24, p8, p25, p18, p1, not_p5, p12, p17, not_p12, p9, not_p4}
{p24, p8, p25, p18, p1, not_p5, p17, not_p12, p9, not_p4, p21}
{p24, p8, p18, p1, not_p5, not_p19, not_p21, not_p4, p21, p14}
{p24, p8, p18, p1, not_p5, not_p19, not_p12, not_p4, p21, p14}
{p24, p8, not_p20, p18, p1, not_p5, not_p19, not_p21, not_p4, p14}
{p24, p8, not_p20, p18, p1, not_p5, not_p19, not_p12, not_p4, p14}
{p24, p8, p18, p1, not_p5, not_p19, p12, not_p21, not_p24, not_p4, p14}
{p24, p8, p18, p1, not_p5, not_p19, p12, not_p21, not_p2, not_p4, p14}
{p24, p8, p18, p1, not_p5, not_p19, p12, not_p12, not_p24, not_p4, p14}
{p24, p8, p18, p1, not_p5, not_p19, p12, not_p12, not_p2, not_p4, p14}
{p24, p8, not_p20, p18, p1, not_p5, p17, not_p21, not_p4, p14}
{p24, p8, p25, p18, p1, not_p5, p17, not_p21, not_p4, p21, p14}
{p24, p8, p25, p18, p1, not_p5, p12, p17, not_p21, not_p24, not_p4, p14}
{p24, p8, p25, p18, p1, not_p5, p12, p17, not_p21, not_p2, not_p4, p14}
{p24, p8, not_p20, p18, p1, not_p5, p17, not_p12, not_p4, p14}
{p24, p8, p25, p18, p1, not_p5, p17, not_p12, not_p4, p21, p14}
{p24, p8, p25, p18, p1, not_p5, p12, p17, not_p12, not_p24, not_p4, p14}
{p24, p8, p25, p18, p1, not_p5, p12, p17, not_p12, not_p2, not_p4, p14}
{p24, p8, p7, p20, p1, not_p5, p12, p17, not_p21, p9, not_p4, p21}
{p24, p8, p7, not_p20, p20, p1, not_p5, p12, p17, not_p21, p9, not_p4}
{p24, p8, p7, p25, p20, p1, not_p5, p12, p17, not_p21, p9, not_p4}
{p24, p8, p7, p20, p1, not_p5, p12, p17, not_p12, p9, not_p4, p21}
{p24, p8, p7, not_p20, p20, p1, not_p5, p12, p17, not_p12, p9, not_p4}
{p24, p8, p7, p25, p20, p1, not_p5, p12, p17, not_p12, p9, not_p4}
{p24, p8, p7, p20, p1, not_p5, not_p19, p12, not_p21, p9, not_p4}
{p24, p8, p7, p20, p1, not_p5, not_p19, p12, not_p12, p9, not_p4}
{p24, p8, not_p15, p7, p20, p1, not_p5, not_p19, not_p21, p9, not_p4}
{p24, p8, not_p15, p7, p20, p1, not_p5, not_p19, not_p12, p9, not_p4}
{p24, p8, not_p15, p7, p20, p1, not_p5, p17, not_p21, p9, not_p4, p21}
{p24, p8, not_p15, p7, not_p20, p20, p1, not_p5, p17, not_p21, p9, not_p4}
{p24, p8, not_p15, p7, p20, p1, not_p5, p17, not_p12, p9, not_p4, p21}
{p24, p8, not_p15, p7, not_p20, p20, p1, not_p5, p17, not_p12, p9, not_p4}
{p24, p8, p25, p20, p1, not_p5, p17, not_p21, not_p4, p21, p14}
{p24, p8, p25, p20, p1, not_p5, p17, not_p12, not_p4, p21, p14}
{p24, p8, p20, p1, not_p5, not_p19, not_p21, not_p4, p21, p14}
{p24, p8, p20, p1, not_p5, not_p19, not_p12, not_p4, p21, p14}
{p24, p8, not_p20, p20, p1, not_p5, p17, not_p21, not_p4, p14}
{p24, p8, not_p20, p20, p1, not_p5, p17, not_p12, not_p4, p14}
{p24, p8, not_p20, p20, p1, not_p5, not_p19, not_p21, not_p4, p14}
{p24, p8, not_p20, p20, p1, not_p5, not_p19, not_p12, not_p4, p14}
{p24, p8, p20, p1, not_p5, not_p19, p12, not_p21, not_p24, not_p4, p14}
{p24, p8, p20, p1, not_p5, not_p19, p12, not_p12, not_p24, not_p4, p14}
{p24, p8, p20, p1, not_p5, not_p19, p12, not_p21, not_p2, not_p4, p14}
{p24, p8, p20, p1, not_p5, not_p19, p12, not_p12, not_p2, not_p4, p14}
{p24, p8, p25, p20, p1, not_p5, p12, p17, not_p21, not_p24, not_p4, p14}
{p24, p8, p25, p20, p1, not_p5, p12, p17, not_p12, not_p24, not_p4, p14}
{p24, p8, p25, p20, p1, not_p5, p12, p17, not_p21, not_p2, not_p4, p14}
{p24, p8, p25, p20, p1, not_p5, p12, p17, not_p12, not_p2, not_p4, p14}
{p24, p8, p7, p3, p1, not_p5, not_p19, p12, not_p21, p9, not_p4}
{p24, p8, not_p15, p7, p3, p1, not_p5, not_p19, not_p21, p9, not_p4}
{p24, p8, p3, p1, not_p5, not_p19, not_p21, not_p4, p21, p14}
{p24, p8, not_p20, p3, p1, not_p5, not_p19, not_p21, not_p4, p14}
{p24, p8, p3, p1, not_p5, not_p19, p12, not_p21, not_p24, not_p4, p14}
{p24, p8, p3, p1, not_p5, not_p19, p12, not_p21, not_p2, not_p4, p14}
{p24, p8, p7, p3, p1, not_p5, p12, p17, not_p21, p9, not_p4, p21}
{p24, p8, p7, not_p20, p3, p1, not_p5, p12, p17, not_p21, p9, not_p4}
{p24, p8, p7, p25, p3, p1, not_p5, p12, p17, not_p21, p9, not_p4}
{p24, p8, not_p15, p7, p3, p1, not_p5, p17, not_p21, p9, not_p4, p21}
{p24, p8, not_p15, p7, not_p20, p3, p1, not_p5, p17, not_p21, p9, not_p4}
{p24, p8, not_p20, p3, p1, not_p5, p17, not_p21, not_p4, p14}
{p24, p8, p25, p3, p1, not_p5, p17, not_p21, not_p4, p21, p14}
{p24, p8, p25, p3, p1, not_p5, p12, p17, not_p21, not_p24, not_p4, p14}
{p24, p8, p25, p3, p1, not_p5, p12, p17, not_p21, not_p2, not_p4, p14}
{p24, p11, not_p5, p12, p17, not_p21, not_p4, p14, not_p18}
{p24, p11, not_p5, p12, p17, not_p12, not_p4, p14, not_p18}
{p24, p11, p22, not_p5, p12, p17, not_p4, p14, not_p18}
{p24, p11, not_p5, p12, p17, not_p21, not_p4, not_p3, not_p18}
{p24, p11, not_p5, p12, p17, not_p12, not_p4, not_p3, not_p18}
{p24, p11, p22, not_p5, p12, p17, not_p4, not_p3, not_p18}
{p24, p11, not_p5, p12, p17, not_p21, not_p4, not_p25, not_p18}
{p24, p11, not_p5, p12, p17, not_p12, not_p4, not_p25, not_p18}
{p24, p11, p22, not_p5, p12, p17, not_p4, not_p25, not_p18}
{p24, p11, not_p5, p12, p17, not_p21, p9, not_p4, not_p18}
{p24, p11, not_p5, p12, p17, not_p12, p9, not_p4, not_p18}
{p24, p11, p22, not_p5, p12, p17, p9, not_p4, not_p18}
{p24, p11, not_p5, not_p16, p17, not_p21, not_p4, p21, not_p3, not_p18}
{p24, p11, not_p5, not_p16, p17, not_p12, not_p4, p21, not_p3, not_p18}
{p24, p11, p22, not_p5, not_p16, p17, not_p4, p21, not_p3, not_p18}
{p24, p2, p11, not_p5, p17, not_p21, not_p4, p21, not_p3, not_p18}
{p24, p2, p11, not_p5, p17, not_p12, not_p4, p21, not_p3, not_p18}
{p24, p2, p11, p22, not_p5, p17, not_p4, p21, not_p3, not_p18}
{p24, p11, not_p20, not_p5, not_p16, p17, not_p21, not_p4, not_p3, not_p18}
{p24, p11, not_p20, not_p5, not_p16, p17, not_p12, not_p4, not_p3, not_p18}
{p24, p11, not_p20, p22, not_p5, not_p16, p17, not_p4, not_p3, not_p18}
{p24, p2, p11, not_p20, not_p5, p17, not_p21, not_p4, not_p3, not_p18}
{p24, p2, p11, not_p20, not_p5, p17, not_p12, not_p4, not_p3, not_p18}
{p24, p2, p11, not_p20, p22, not_p5, p17, not_p4, not_p3, not_p18}
{p24, p11, not_p5, not_p16, p17, not_p21, not_p4, p21, not_p25, not_p18}
{p24, p11, not_p5, not_p16, p17, not_p12, not_p4, p21, not_p25, not_p18}
{p24, p11, p22, not_p5, not_p16, p17, not_p4, p21, not_p25, not_p18}
{p24, p2, p11, not_p5, p17, not_p21, not_p4, p21, not_p25, not_p18}
{p24, p2, p11, not_p5, p17, not_p12, not_p4, p21, not_p25, not_p18}
{p24, p2, p11, p22, not_p5, p17, not_p4, p21, not_p25, not_p18}
{p24, p11, not_p20, not_p5, not_p16, p17, not_p21, not_p4, not_p25, not_p18}
{p24, p11, not_p20, not_p5, not_p16, p17, not_p12, not_p4, not_p25, not_p18}
{p24, p11, not_p20, p22, not_p5, not_p16, p17, not_p4, not_p25, not_p18}
{p24, p2, p11, not_p20, not_p5, p17, not_p21, not_p4, not_p25, not_p18}
{p24, p2, p11, not_p20, not_p5, p17, not_p12, not_p4, not_p25, not_p18}
{p24, p2, p11, not_p20, p22, not_p5, p17, not_p4, not_p25, not_p18}
{p24, p11, not_p5, not_p16, p17, not_p21, not_p4, p21, p14, not_p18}
{p24, p11, not_p5, not_p16, p17, not_p12, not_p4, p21, p14, not_p18}
{p24, p11, p22, not_p5, not_p16, p17, not_p4, p21, p14, not_p18}
{p24, p2, p11, not_p5, p17, not_p21, not_p4, p21, p14, not_p18}
{p24, p2, p11, not_p5, p17, not_p12, not_p4, p21, p14, not_p18}
{p24, p2, p11, p22, not_p5, p17, not_p4, p21, p14, not_p18}
{p24, p11, not_p5, not_p16, p17, not_p21, p9, not_p4, p21, not_p18}
{p24, p11, not_p5, not_p16, p17, not_p12, p9, not_p4, p21, not_p18}
{p24, p11, p22, not_p5, not_p16, p17, p9, not_p4, p21, not_p18}
{p24, p2, p11, not_p5, p17, not_p21, p9, not_p4, p21, not_p18}
{p24, p2, p11, not_p5, p17, not_p12, p9, not_p4, p21, not_p18}
{p24, p2, p11, p22, not_p5, p17, p9, not_p4, p21, not_p18}
{p24, p11, not_p20, not_p5, not_p16, p17, not_p21, not_p4, p14, not_p18}
{p24, p11, not_p20, not_p5, not_p16, p17, not_p12, not_p4, p14, not_p18}
{p24, p11, not_p20, p22, not_p5, not_p16, p17, not_p4, p14, not_p18}
{p24, p11, not_p20, not_p5, not_p16, p17, not_p21, p9, not_p4, not_p18}
{p24, p11, not_p20, not_p5, not_p16, p17, not_p12, p9, not_p4, not_p18}
{p24, p11, not_p20, p22, not_p5, not_p16, p17, p9, not_p4, not_p18}
{p24, p2, p11, not_p20, not_p5, p17, not_p21, not_p4, p14, not_p18}
{p24, p2, p11, not_p20, not_p5, p17, not_p12, not_p4, p14, not_p18}
{p24, p2, p11, not_p20, p22, not_p5, p17, not_p4, p14, not_p18}
{p24, p2, p11, not_p20, not_p5, p17, not_p21, p9, not_p4, not_p18}
{p24, p2, p11, not_p20, not_p5, p17, not_p12, p9, not_p4, not_p18}
{p24, p2, p11, not_p20, p22, not_p5, p17, p9, not_p4, not_p18}
{p24, p11, not_p5, p12, not_p8, not_p21, not_p4, p14, not_p18}
{p24, p11, not_p5, p12, not_p8, not_p12, not_p4, p14, not_p18}
{p24, p11, p22, not_p5, p12, not_p8, not_p4, p14, not_p18}
{p24, p11, not_p5, not_p19, p12, not_p21, not_p4, p14, not_p18}
{p24, p11, not_p5, not_p19, p12, not_p12, not_p4, p14, not_p18}
{p24, p11, p22, not_p5, not_p19, p12, not_p4, p14, not_p18}
{p24, p11, not_p5, p12, not_p21, not_p4, p14, p6, not_p18}
{p24, p11, not_p5, p12, not_p12, not_p4, p14, p6, not_p18}
{p24, p11, p22, not_p5, p12, not_p4, p14, p6, not_p18}
{p24, p11, not_p5, p12, not_p8, not_p21, not_p4, not_p3, not_p18}
{p24, p11, not_p5, p12, not_p8, not_p12, not_p4, not_p3, not_p18}
{p24, p11, p22, not_p5, p12, not_p8, not_p4, not_p3, not_p18}
{p24, p11, not_p5, not_p19, p12, not_p21, not_p4, not_p3, not_p18}
{p24, p11, not_p5, not_p19, p12, not_p12, not_p4, not_p3, not_p18}
{p24, p11, p22, not_p5, not_p19, p12, not_p4, not_p3, not_p18}
{p24, p11, not_p5, p12, not_p21, not_p4, p6, not_p3, not_p18}
{p24, p11, not_p5, p12, not_p12, not_p4, p6, not_p3, not_p18}
{p24, p11, p22, not_p5, p12, not_p4, p6, not_p3, not_p18}
{p24, p11, not_p5, p12, not_p8, not_p21, not_p4, not_p25, not_p18}
{p24, p11, not_p5, p12, not_p8, not_p12, not_p4, not_p25, not_p18}
{p24, p11, p22, not_p5, p12, not_p8, not_p4, not_p25, not_p18}
{p24, p11, not_p5, not_p19, p12, not_p21, not_p4, not_p25, not_p18}
{p24, p11, not_p5, not_p19, p12, not_p12, not_p4, not_p25, not_p18}
{p24, p11, p22, not_p5, not_p19, p12, not_p4, not_p25, not_p18}
{p24, p11, not_p5, p12, not_p21, not_p4, p6, not_p25, not_p18}
{p24, p11, not_p5, p12, not_p12, not_p4, p6, not_p25, not_p18}
{p24, p11, p22, not_p5, p12, not_p4, p6, not_p25, not_p18}
{p24, p11, not_p5, p12, not_p8, not_p21, p9, not_p4, not_p18}
{p24, p11, not_p5, p12, not_p8, not_p12, p9, not_p4, not_p18}
{p24, p11, p22, not_p5, p12, not_p8, p9, not_p4, not_p18}
{p24, p11, not_p5, not_p19, p12, not_p21, p9, not_p4, not_p18}
{p24, p11, not_p5, not_p19, p12, not_p12, p9, not_p4, not_p18}
{p24, p11, p22, not_p5, not_p19, p12, p9, not_p4, not_p18}
{p24, p11, not_p5, p12, not_p21, p9, not_p4, p6, not_p18}
{p24, p11, not_p5, p12, not_p12, p9, not_p4, p6, not_p18}
{p24, p11, p22, not_p5, p12, p9, not_p4, p6, not_p18}
{p24, p25, p11, not_p5, not_p16, not_p8, not_p21, not_p4, not_p3, not_p18}
{p24, p25, p11, not_p5, not_p16, not_p8, not_p12, not_p4, not_p3, not_p18}
{p24, p25, p11, p22, not_p5, not_p16, not_p8, not_p4, not_p3, not_p18}
{p24, p25, p11, not_p5, not_p16, not_p8, not_p21, not_p4, not_p25, not_p18}
{p24, p25, p11, not_p5, not_p16, not_p8, not_p12, not_p4, not_p25, not_p18}
{p24, p25, p11, p22, not_p5, not_p16, not_p8, not_p4, not_p25, not_p18}
{p24, p25, p11, not_p5, not_p16, not_p8, not_p21, not_p4, p21, p14, not_p18}
{p24, p25, p11, not_p5, not_p16, not_p8, not_p12, not_p4, p21, p14, not_p18}
{p24, p25, p11, p22, not_p5, not_p16, not_p8, not_p4, p21, p14, not_p18}
{p24, p25, p11, not_p20, not_p5, not_p16, not_p8, not_p21, not_p4, p14, not_p18}
{p24, p25, p11, not_p20, not_p5, not_p16, not_p8, not_p12, not_p4, p14, not_p18}
{p24, p25, p11, not_p20, p22, not_p5, not_p16, not_p8, not_p4, p14, not_p18}
{p24, p25, p11, not_p5, not_p16, not_p8, not_p21, p9, not_p4, not_p18}
{p24, p25, p11, not_p5, not_p16, not_p8, not_p12, p9, not_p4, not_p18}
{p24, p25, p11, p22, not_p5, not_p16, not_p8, p9, not_p4, not_p18}
{p24, p2, p25, p11, not_p5, not_p8, not_p21, not_p4, not_p3, not_p18}
{p24, p2, p25, p11, not_p5, not_p8, not_p12, not_p4, not_p3, not_p18}
{p24, p2, p25, p11, p22, not_p5, not_p8, not_p4, not_p3, not_p18}
{p24, p2, p25, p11, not_p5, not_p8, not_p21, not_p4, not_p25, not_p18}
{p24, p2, p25, p11, not_p5, not_p8, not_p12, not_p4, not_p25, not_p18}
{p24, p2, p25, p11, p22, not_p5, not_p8, not_p4, not_p25, not_p18}
{p24, p2, p25, p11, not_p5, not_p8, not_p21, not_p4, p21, p14, not_p18}
{p24, p2, p25, p11, not_p5, not_p8, not_p12, not_p4, p21, p14, not_p18}
{p24, p2, p25, p11, p22, not_p5, not_p8, not_p4, p21, p14, not_p18}
{p24, p2, p25, p11, not_p20, not_p5, not_p8, not_p21, not_p4, p14, not_p18}
{p24, p2, p25, p11, not_p20, not_p5, not_p8, not_p12, not_p4, p14, not_p18}
{p24, p2, p25, p11, not_p20, p22, not_p5, not_p8, not_p4, p14, not_p18}
{p24, p2, p25, p11, not_p5, not_p8, not_p21, p9, not_p4, not_p18}
{p24, p2, p25, p11, not_p5, not_p8, not_p12, p9, not_p4, not_p18}
{p24, p2, p25, p11, p22, not_p5, not_p8, p9, not_p4, not_p18}
{p24, p8, p11, not_p5, not_p16, not_p8, not_p21, not_p4, not_p3, not_p18}
{p24, p8, p11, not_p5, not_p16, not_p8, not_p12, not_p4, not_p3, not_p18}
{p24, p8, p11, p22, not_p5, not_p16, not_p8, not_p4, not_p3, not_p18}
{p24, p8, p11, not_p5, not_p16, not_p8, not_p21, not_p4, not_p25, not_p18}
{p24, p8, p11, not_p5, not_p16, not_p8, not_p12, not_p4, not_p25, not_p18}
{p24, p8, p11, p22, not_p5, not_p16, not_p8, not_p4, not_p25, not_p18}
{p24, p8, p11, not_p5, not_p16, not_p8, not_p21, not_p4, p21, p14, not_p18}
{p24, p8, p11, not_p5, not_p16, not_p8, not_p12, not_p4, p21, p14, not_p18}
{p24, p8, p11, p22, not_p5, not_p16, not_p8, not_p4, p21, p14, not_p18}
{p24, p8, p11, not_p20, p22, not_p5, not_p16, not_p8, not_p4, p14, not_p18}
{p24, p8, p11, not_p20, not_p5, not_p16, not_p8, not_p21, not_p4, p14, not_p18}
{p24, p8, p11, not_p20, not_p5, not_p16, not_p8, not_p12, not_p4, p14, not_p18}
{p24, p8, p11, not_p5, not_p16, not_p8, not_p21, p9, not_p4, not_p18}
{p24, p8, p11, not_p5, not_p16, not_p8, not_p12, p9, not_p4, not_p18}
{p24, p8, p11, p22, not_p5, not_p16, not_p8, p9, not_p4, not_p18}
{p24, p8, p2, p11, not_p5, not_p8, not_p21, not_p4, not_p3, not_p18}
{p24, p8, p2, p11, not_p5, not_p8, not_p12, not_p4, not_p3, not_p18}
{p24, p8, p2, p11, p22, not_p5, not_p8, not_p4, not_p3, not_p18}
{p24, p8, p2, p11, not_p5, not_p8, not_p21, not_p4, not_p25, not_p18}
{p24, p8, p2, p11, not_p5, not_p8, not_p12, not_p4, not_p25, not_p18}
{p24, p8, p2, p11, p22, not_p5, not_p8, not_p4, not_p25, not_p18}
{p24, p8, p2, p11, not_p5, not_p8, not_p21, not_p4, p21, p14, not_p18}
{p24, p8, p2, p11, not_p5, not_p8, not_p12, not_p4, p21, p14, not_p18}
{p24, p8, p2, p11, p22, not_p5, not_p8, not_p4, p21, p14, not_p18}
{p24, p8, p2, p11, not_p20, p22, not_p5, not_p8, not_p4, p14, not_p18}
{p24, p8, p2, p11, not_p20, not_p5, not_p8, not_p21, not_p4, p14, not_p18}
{p24, p8, p2, p11, not_p20, not_p5, not_p8, not_p12, not_p4, p14, not_p18}
{p24, p8, p2, p11, not_p5, not_p8, not_p21, p9, not_p4, not_p18}
{p24, p8, p2, p11, not_p5, not_p8, not_p12, p9, not_p4, not_p18}
{p24, p8, p2, p11, p22, not_p5, not_p8, p9, not_p4, not_p18}
{p24, p8, p11, not_p5, not_p19, not_p16, not_p21, not_p4, not_p3, not_p18}
{p24, p8, p11, not_p5, not_p19, not_p16, not_p12, not_p4, not_p3, not_p18}
{p24, p8, p11, p22, not_p5, not_p19, not_p16, not_p4, not_p3, not_p18}
{p24, p8, p2, p11, not_p5, not_p19, not_p21, not_p4, not_p3, not_p18}
{p24, p8, p2, p11, not_p5, not_p19, not_p12, not_p4, not_p3, not_p18}
{p24, p8, p2, p11, p22, not_p5, not_p19, not_p4, not_p3, not_p18}
{p24, p8, p11, not_p5, not_p16, not_p21, not_p4, p6, not_p3, not_p18}
{p24, p8, p11, not_p5, not_p16, not_p12, not_p4, p6, not_p3, not_p18}
{p24, p8, p11, p22, not_p5, not_p16, not_p4, p6, not_p3, not_p18}
{p24, p8, p2, p11, not_p5, not_p21, not_p4, p6, not_p3, not_p18}
{p24, p8, p2, p11, not_p5, not_p12, not_p4, p6, not_p3, not_p18}
{p24, p8, p2, p11, p22, not_p5, not_p4, p6, not_p3, not_p18}
{p24, p8, p11, not_p5, not_p19, not_p16, not_p21, not_p4, not_p25, not_p18}
{p24, p8, p11, not_p5, not_p19, not_p16, not_p12, not_p4, not_p25, not_p18}
{p24, p8, p11, p22, not_p5, not_p19, not_p16, not_p4, not_p25, not_p18}
{p24, p8, p11, not_p5, not_p19, not_p16, not_p21, not_p4, p21, p14, not_p18}
{p24, p8, p11, not_p5, not_p19, not_p16, not_p12, not_p4, p21, p14, not_p18}
{p24, p8, p11, p22, not_p5, not_p19, not_p16, not_p4, p21, p14, not_p18}
{p24, p8, p11, not_p20, p22, not_p5, not_p19, not_p16, not_p4, p14, not_p18}
{p24, p8, p11, not_p20, not_p5, not_p19, not_p16, not_p21, not_p4, p14, not_p18}
{p24, p8, p11, not_p20, not_p5, not_p19, not_p16, not_p12, not_p4, p14, not_p18}
{p24, p8, p11, not_p5, not_p19, not_p16, not_p21, p9, not_p4, not_p18}
{p24, p8, p11, not_p5, not_p19, not_p16, not_p12, p9, not_p4, not_p18}
{p24, p8, p11, p22, not_p5, not_p19, not_p16, p9, not_p4, not_p18}
{p24, p8, p2, p11, not_p5, not_p19, not_p21, not_p4, not_p25, not_p18}
{p24, p8, p2, p11, not_p5, not_p19, not_p12, not_p4, not_p25, not_p18}
{p24, p8, p2, p11, p22, not_p5, not_p19, not_p4, not_p25, not_p18}
{p24, p8, p2, p11, not_p5, not_p19, not_p21, not_p4, p21, p14, not_p18}
{p24, p8, p2, p11, not_p5, not_p19, not_p12, not_p4, p21, p14, not_p18}
{p24, p8, p2, p11, p22, not_p5, not_p19, not_p4, p21, p14, not_p18}
{p24, p8, p2, p11, not_p20, p22, not_p5, not_p19, not_p4, p14, not_p18}
{p24, p8, p2, p11, not_p20, not_p5, not_p19, not_p21, not_p4, p14, not_p18}
{p24, p8, p2, p11, not_p20, not_p5, not_p19, not_p12, not_p4, p14, not_p18}
{p24, p8, p2, p11, not_p5, not_p19, not_p21, p9, not_p4, not_p18}
{p24, p8, p2, p11, not_p5, not_p19, not_p12, p9, not_p4, not_p18}
{p24, p8, p2, p11, p22, not_p5, not_p19, p9, not_p4, not_p18}
{p24, p8, p11, not_p5, not_p16, not_p21, not_p4, p6, not_p25, not_p18}
{p24, p8, p11, not_p5, not_p16, not_p12, not_p4, p6, not_p25, not_p18}
{p24, p8, p11, p22, not_p5, not_p16, not_p4, p6, not_p25, not_p18}
{p24, p8, p11, not_p5, not_p16, not_p21, not_p4, p21, p14, p6, not_p18}
{p24, p8, p11, not_p5, not_p16, not_p12, not_p4, p21, p14, p6, not_p18}
{p24, p8, p11, p22, not_p5, not_p16, not_p4, p21, p14, p6, not_p18}
{p24, p8, p11, not_p20, p22, not_p5, not_p16, not_p4, p14, p6, not_p18}
{p24, p8, p11, not_p20, not_p5, not_p16, not_p21, not_p4, p14, p6, not_p18}
{p24, p8, p11, not_p20, not_p5, not_p16, not_p12, not_p4, p14, p6, not_p18}
{p24, p8, p11, not_p5, not_p16, not_p21, p9, not_p4, p6, not_p18}
{p24, p8, p11, not_p5, not_p16, not_p12, p9, not_p4, p6, not_p18}
{p24, p8, p11, p22, not_p5, not_p16, p9, not_p4, p6, not_p18}
{p24, p8, p2, p11, not_p5, not_p21, not_p4, p6, not_p25, not_p18}
{p24, p8, p2, p11, not_p5, not_p12, not_p4, p6, not_p25, not_p18}
{p24, p8, p2, p11, p22, not_p5, not_p4, p6, not_p25, not_p18}
{p24, p8, p2, p11, not_p5, not_p21, not_p4, p21, p14, p6, not_p18}
{p24, p8, p2, p11, not_p5, not_p12, not_p4, p21, p14, p6, not_p18}
{p24, p8, p2, p11, p22, not_p5, not_p4, p21, p14, p6, not_p18}
{p24, p8, p2, p11, not_p20, p22, not_p5, not_p4, p14, p6, not_p18}
{p24, p8, p2, p11, not_p20, not_p5, not_p21, not_p4, p14, p6, not_p18}
{p24, p8, p2, p11, not_p20, not_p5, not_p12, not_p4, p14, p6, not_p18}
{p24, p8, p2, p11, not_p5, not_p21, p9, not_p4, p6, not_p18}
{p24, p8, p2, p11, not_p5, not_p12, p9, not_p4, p6, not_p18}
{p24, p8, p2, p11, p22, not_p5, p9, not_p4, p6, not_p18}
{p24, p11, p1, p22, not_p5, p17, not_p4, p21, not_p3}
{p24, p11, not_p20, p1, p22, not_p5, p17, not_p4, not_p3}
{p24, p11, p1, p22, not_p5, p12, p17, not_p4, not_p3}
{p24, p11, p18, p1, not_p5, p17, not_p21, not_p4, p21, not_p3}
{p24, p11, p18, p1, not_p5, p17, not_p12, not_p4, p21, not_p3}
{p24, p11, p20, p1, not_p5, p17, not_p21, not_p4, p21, not_p3}
{p24, p11, p20, p1, not_p5, p17, not_p12, not_p4, p21, not_p3}
{p24, p11, p3, p1, not_p5, p17, not_p21, not_p4, p21, not_p3}
{p24, p11, not_p20, p18, p1, not_p5, p17, not_p21, not_p4, not_p3}
{p24, p11, not_p20, p18, p1, not_p5, p17, not_p12, not_p4, not_p3}
{p24, p11, not_p20, p20, p1, not_p5, p17, not_p21, not_p4, not_p3}
{p24, p11, not_p20, p20, p1, not_p5, p17, not_p12, not_p4, not_p3}
{p24, p11, not_p20, p3, p1, not_p5, p17, not_p21, not_p4, not_p3}
{p24, p11, p18, p1, not_p5, p12, p17, not_p21, not_p4, not_p3}
{p24, p11, p18, p1, not_p5, p12, p17, not_p12, not_p4, not_p3}
{p24, p11, p20, p1, not_p5, p12, p17, not_p21, not_p4, not_p3}
{p24, p11, p20, p1, not_p5, p12, p17, not_p12, not_p4, not_p3}
{p24, p11, p3, p1, not_p5, p12, p17, not_p21, not_p4, not_p3}
{p24, p11, p1, p22, not_p5, p17, not_p4, p21, not_p25}
{p24, p11, not_p20, p1, p22, not_p5, p17, not_p4, not_p25}
{p24, p11, p1, p22, not_p5, p12, p17, not_p4, not_p25}
{p24, p11, p18, p1, not_p5, p17, not_p21, not_p4, p21, not_p25}
{p24, p11, p18, p1, not_p5, p17, not_p12, not_p4, p21, not_p25}
{p24, p11, p20, p1, not_p5, p17, not_p21, not_p4, p21, not_p25}
{p24, p11, p20, p1, not_p5, p17, not_p12, not_p4, p21, not_p25}
{p24, p11, p3, p1, not_p5, p17, not_p21, not_p4, p21, not_p25}
{p24, p11, not_p20, p18, p1, not_p5, p17, not_p21, not_p4, not_p25}
{p24, p11, not_p20, p18, p1, not_p5, p17, not_p12, not_p4, not_p25}
{p24, p11, not_p20, p20, p1, not_p5, p17, not_p21, not_p4, not_p25}
{p24, p11, not_p20, p20, p1, not_p5, p17, not_p12, not_p4, not_p25}
{p24, p11, not_p20, p3, p1, not_p5, p17, not_p21, not_p4, not_p25}
{p24, p11, p18, p1, not_p5, p12, p17, not_p21, not_p4, not_p25}
{p24, p11, p18, p1, not_p5, p12, p17, not_p12, not_p4, not_p25}
{p24, p11, p20, p1, not_p5, p12, p17, not_p21, not_p4, not_p25}
{p24, p11, p20, p1, not_p5, p12, p17, not_p12, not_p4, not_p25}
{p24, p11, p3, p1, not_p5, p12, p17, not_p21, not_p4, not_p25}
{p24, p11, p18, p1, not_p5, p17, not_p21, p9, not_p4, p21}
{p24, p11, p18, p1, not_p5, p17, not_p12, p9, not_p4, p21}
{p24, p11, p18, p1, p22, not_p5, p17, p9, not_p4, p21}
{p24, p11, not_p20, p18, p1, not_p5, p17, not_p21, p9, not_p4}
{p24, p11, not_p20, p18, p1, not_p5, p17, not_p12, p9, not_p4}
{p24, p11, not_p20, p18, p1, p22, not_p5, p17, p9, not_p4}
{p24, p11, p18, p1, not_p5, p12, p17, not_p21, p9, not_p4}
{p24, p11, p18, p1, not_p5, p12, p17, not_p12, p9, not_p4}
{p24, p11, p18, p1, p22, not_p5, p12, p17, p9, not_p4}
{p24, p7, p11, p20, p1, not_p5, p12, p17, not_p12, p9, not_p4}
{p24, p7, p11, p1, p22, not_p5, p12, p17, p9, not_p4}
{p24, p7, p11, p20, p1, not_p5, p12, p17, not_p21, p9, not_p4}
{p24, p7, p11, p3, p1, not_p5, p12, p17, not_p21, p9, not_p4}
{p24, not_p15, p11, p20, p1, not_p5, p17, not_p12, p9, not_p4, p21, p6}
{p24, not_p15, p11, not_p20, p20, p1, not_p5, p17, not_p12, p9, not_p4, p6}
{p24, not_p15, p11, p20, p1, not_p5, p17, not_p21, p9, not_p4, p21, p6}
{p24, not_p15, p11, p3, p1, not_p5, p17, not_p21, p9, not_p4, p21, p6}
{p24, not_p15, p11, p1, p22, not_p5, p17, p9, not_p4, p21, p6}
{p24, not_p15, p11, not_p20, p1, p22, not_p5, p17, p9, not_p4, p6}
{p24, not_p15, p11, not_p20, p20, p1, not_p5, p17, not_p21, p9, not_p4, p6}
{p24, not_p15, p11, not_p20, p3, p1, not_p5, p17, not_p21, p9, not_p4, p6}
{p24, not_p15, p7, p11, p1, p22, not_p5, p17, p9, not_p4, p21}
{p24, not_p15, p7, p11, not_p20, p1, p22, not_p5, p17, p9, not_p4}
{p24, not_p15, p7, p11, p3, p1, not_p5, p17, not_p21, p9, not_p4, p21}
{p24, not_p15, p7, p11, not_p20, p3, p1, not_p5, p17, not_p21, p9, not_p4}
{p24, not_p15, p7, p11, p20, p1, not_p5, p17, not_p12, p9, not_p4, p21}
{p24, not_p15, p7, p11, not_p20, p20, p1, not_p5, p17, not_p12, p9, not_p4}
{p24, not_p15, p7, p11, p20, p1, not_p5, p17, not_p21, p9, not_p4, p21}
{p24, not_p15, p7, p11, not_p20, p20, p1, not_p5, p17, not_p21, p9, not_p4}
{p24, p11, p1, p22, not_p5, p17, not_p4, p21, p14}
{p24, p11, not_p20, p1, p22, not_p5, p17, not_p4, p14}
{p24, p11, p1, p22, not_p5, p12, p17, not_p4, p14}
{p24, p11, p18, p1, not_p5, p17, not_p21, not_p4, p21, p14}
{p24, p11, p18, p1, not_p5, p17, not_p12, not_p4, p21, p14}
{p24, p11, p20, p1, not_p5, p17, not_p21, not_p4, p21, p14}
{p24, p11, p20, p1, not_p5, p17, not_p12, not_p4, p21, p14}
{p24, p11, p3, p1, not_p5, p17, not_p21, not_p4, p21, p14}
{p24, p11, not_p20, p18, p1, not_p5, p17, not_p21, not_p4, p14}
{p24, p11, not_p20, p18, p1, not_p5, p17, not_p12, not_p4, p14}
{p24, p11, not_p20, p20, p1, not_p5, p17, not_p21, not_p4, p14}
{p24, p11, not_p20, p20, p1, not_p5, p17, not_p12, not_p4, p14}
{p24, p11, not_p20, p3, p1, not_p5, p17, not_p21, not_p4, p14}
{p24, p11, p18, p1, not_p5, p12, p17, not_p21, not_p4, p14}
{p24, p11, p18, p1, not_p5, p12, p17, not_p12, not_p4, p14}
{p24, p11, p20, p1, not_p5, p12, p17, not_p21, not_p4, p14}
{p24, p11, p20, p1, not_p5, p12, p17, not_p12, not_p4, p14}
{p24, p11, p3, p1, not_p5, p12, p17, not_p21, not_p4, p14}
{p24, p25, p11, p1, p22, not_p5, not_p8, not_p4, not_p3}
{p24, p25, p11, p1, p22, not_p5, not_p8, not_p4, not_p25}
{p24, p25, p11, p1, p22, not_p5, not_p8, not_p4, p21, p14}
{p24, p25, p11, not_p20, p1, p22, not_p5, not_p8, not_p4, p14}
{p24, not_p15, p25, p11, p1, p22, not_p5, not_p8, p9, not_p4, p6}
{p24, p25, p11, p18, p1, p22, not_p5, not_p8, p9, not_p4}
{p24, not_p15, p7, p25, p11, p1, p22, not_p5, not_p8, p9, not_p4}
{p24, p25, p11, p18, p1, not_p5, not_p8, not_p21, not_p4, not_p3}
{p24, p25, p11, p18, p1, not_p5, not_p8, not_p12, not_p4, not_p3}
{p24, p25, p11, p20, p1, not_p5, not_p8, not_p21, not_p4, not_p3}
{p24, p25, p11, p20, p1, not_p5, not_p8, not_p12, not_p4, not_p3}
{p24, p25, p11, p3, p1, not_p5, not_p8, not_p21, not_p4, not_p3}
{p24, p25, p11, p18, p1, not_p5, not_p8, not_p21, not_p4, not_p25}
{p24, p25, p11, p18, p1, not_p5, not_p8, not_p12, not_p4, not_p25}
{p24, p25, p11, p20, p1, not_p5, not_p8, not_p21, not_p4, not_p25}
{p24, p25, p11, p20, p1, not_p5, not_p8, not_p12, not_p4, not_p25}
{p24, p25, p11, p3, p1, not_p5, not_p8, not_p21, not_p4, not_p25}
{p24, p25, p11, p18, p1, not_p5, not_p8, not_p21, not_p4, p21, p14}
{p24, p25, p11, p18, p1, not_p5, not_p8, not_p12, not_p4, p21, p14}
{p24, p25, p11, not_p20, p18, p1, not_p5, not_p8, not_p21, not_p4, p14}
{p24, p25, p11, not_p20, p18, p1, not_p5, not_p8, not_p12, not_p4, p14}
{p24, p25, p11, p20, p1, not_p5, not_p8, not_p21, not_p4, p21, p14}
{p24, p25, p11, p20, p1, not_p5, not_p8, not_p12, not_p4, p21, p14}
{p24, p25, p11, not_p20, p20, p1, not_p5, not_p8, not_p21, not_p4, p14}
{p24, p25, p11, not_p20, p20, p1, not_p5, not_p8, not_p12, not_p4, p14}
{p24, p25, p11, p3, p1, not_p5, not_p8, not_p21, not_p4, p21, p14}
{p24, p25, p11, not_p20, p3, p1, not_p5, not_p8, not_p21, not_p4, p14}
{p24, not_p15, p25, p11, p20, p1, not_p5, not_p8, not_p12, p9, not_p4, p6}
{p24, not_p15, p25, p11, p20, p1, not_p5, not_p8, not_p21, p9, not_p4, p6}
{p24, not_p15, p25, p11, p3, p1, not_p5, not_p8, not_p21, p9, not_p4, p6}
{p24, p25, p11, p18, p1, not_p5, not_p8, not_p21, p9, not_p4}
{p24, p25, p11, p18, p1, not_p5, not_p8, not_p12, p9, not_p4}
{p24, not_p15, p7, p25, p11, p20, p1, not_p5, not_p8, not_p21, p9, not_p4}
{p24, not_p15, p7, p25, p11, p20, p1, not_p5, not_p8, not_p12, p9, not_p4}
{p24, not_p15, p7, p25, p11, p3, p1, not_p5, not_p8, not_p21, p9, not_p4}
{p24, p11, p1, p22, not_p5, p12, not_p8, not_p4, p14}
{p24, p11, p18, p1, not_p5, p12, not_p8, not_p21, not_p4, p14}
{p24, p11, p20, p1, not_p5, p12, not_p8, not_p21, not_p4, p14}
{p24, p11, p3, p1, not_p5, p12, not_p8, not_p21, not_p4, p14}
{p24, p11, p18, p1, not_p5, p12, not_p8, not_p12, not_p4, p14}
{p24, p11, p20, p1, not_p5, p12, not_p8, not_p12, not_p4, p14}
{p24, p11, p1, p22, not_p5, p12, not_p8, not_p4, not_p3}
{p24, p11, p1, p22, not_p5, p12, not_p8, not_p4, not_p25}
{p24, p11, p18, p1, p22, not_p5, p12, not_p8, p9, not_p4}
{p24, p7, p11, p1, p22, not_p5, p12, not_p8, p9, not_p4}
{p24, p11, p18, p1, not_p5, p12, not_p8, not_p21, not_p4, not_p3}
{p24, p11, p18, p1, not_p5, p12, not_p8, not_p12, not_p4, not_p3}
{p24, p11, p20, p1, not_p5, p12, not_p8, not_p21, not_p4, not_p3}
{p24, p11, p20, p1, not_p5, p12, not_p8, not_p12, not_p4, not_p3}
{p24, p11, p3, p1, not_p5, p12, not_p8, not_p21, not_p4, not_p3}
{p24, p11, p18, p1, not_p5, p12, not_p8, not_p21, not_p4, not_p25}
{p24, p11, p18, p1, not_p5, p12, not_p8, not_p12, not_p4, not_p25}
{p24, p11, p20, p1, not_p5, p12, not_p8, not_p21, not_p4, not_p25}
{p24, p11, p20, p1, not_p5, p12, not_p8, not_p12, not_p4, not_p25}
{p24, p11, p3, p1, not_p5, p12, not_p8, not_p21, not_p4, not_p25}
{p24, p11, p18, p1, not_p5, p12, not_p8, not_p21, p9, not_p4}
{p24, p11, p18, p1, not_p5, p12, not_p8, not_p12, p9, not_p4}
{p24, p7, p11, p20, p1, not_p5, p12, not_p8, not_p21, p9, not_p4}
{p24, p7, p11, p20, p1, not_p5, p12, not_p8, not_p12, p9, not_p4}
{p24, p7, p11, p3, p1, not_p5, p12, not_p8, not_p21, p9, not_p4}
{p24, p11, p1, p22, not_p5, not_p19, p12, not_p4, p14}
{p24, p11, p18, p1, not_p5, not_p19, p12, not_p21, not_p4, p14}
{p24, p11, p20, p1, not_p5, not_p19, p12, not_p21, not_p4, p14}
{p24, p11, p3, p1, not_p5, not_p19, p12, not_p21, not_p4, p14}
{p24, p11, p18, p1, not_p5, not_p19, p12, not_p12, not_p4, p14}
{p24, p11, p20, p1, not_p5, not_p19, p12, not_p12, not_p4, p14}
{p24, p11, p1, p22, not_p5, not_p19, p12, not_p4, not_p3}
{p24, p11, p1, p22, not_p5, not_p19, p12, not_p4, not_p25}
{p24, p11, p18, p1, p22, not_p5, not_p19, p12, p9, not_p4}
{p24, p7, p11, p1, p22, not_p5, not_p19, p12, p9, not_p4}
{p24, p11, p18, p1, not_p5, not_p19, p12, not_p21, not_p4, not_p3}
{p24, p11, p18, p1, not_p5, not_p19, p12, not_p12, not_p4, not_p3}
{p24, p11, p20, p1, not_p5, not_p19, p12, not_p21, not_p4, not_p3}
{p24, p11, p20, p1, not_p5, not_p19, p12, not_p12, not_p4, not_p3}
{p24, p11, p3, p1, not_p5, not_p19, p12, not_p21, not_p4, not_p3}
{p24, p11, p18, p1, not_p5, not_p19, p12, not_p21, not_p4, not_p25}
{p24, p11, p18, p1, not_p5, not_p19, p12, not_p12, not_p4, not_p25}
{p24, p11, p20, p1, not_p5, not_p19, p12, not_p21, not_p4, not_p25}
{p24, p11, p20, p1, not_p5, not_p19, p12, not_p12, not_p4, not_p25}
{p24, p11, p3, p1, not_p5, not_p19, p12, not_p21, not_p4, not_p25}
{p24, p11, p18, p1, not_p5, not_p19, p12, not_p21, p9, not_p4}
{p24, p11, p18, p1, not_p5, not_p19, p12, not_p12, p9, not_p4}
{p24, p7, p11, p20, p1, not_p5, not_p19, p12, not_p21, p9, not_p4}
{p24, p7, p11, p20, p1, not_p5, not_p19, p12, not_p12, p9, not_p4}
{p24, p7, p11, p3, p1, not_p5, not_p19, p12, not_p21, p9, not_p4}
{p24, p11, p1, p22, not_p5, p12, not_p4, p14, p6}
{p24, p11, p1, p22, not_p5, p12, p9, not_p4, p6}
{p24, p11, p1, p22, not_p5, p12, not_p4, p6, not_p3}
{p24, p11, p1, p22, not_p5, p12, not_p4, p6, not_p25}
{p24, p11, p18, p1, not_p5, p12, not_p21, not_p4, p14, p6}
{p24, p11, p18, p1, not_p5, p12, not_p21, p9, not_p4, p6}
{p24, p11, p18, p1, not_p5, p12, not_p21, not_p4, p6, not_p3}
{p24, p11, p18, p1, not_p5, p12, not_p21, not_p4, p6, not_p25}
{p24, p11, p18, p1, not_p5, p12, not_p12, not_p4, p14, p6}
{p24, p11, p18, p1, not_p5, p12, not_p12, p9, not_p4, p6}
{p24, p11, p18, p1, not_p5, p12, not_p12, not_p4, p6, not_p3}
{p24, p11, p18, p1, not_p5, p12, not_p12, not_p4, p6, not_p25}
{p24, p11, p20, p1, not_p5, p12, not_p21, not_p4, p14, p6}
{p24, p11, p20, p1, not_p5, p12, not_p21, p9, not_p4, p6}
{p24, p11, p20, p1, not_p5, p12, not_p21, not_p4, p6, not_p3}
{p24, p11, p20, p1, not_p5, p12, not_p21, not_p4, p6, not_p25}
{p24, p11, p20, p1, not_p5, p12, not_p12, not_p4, p14, p6}
{p24, p11, p20, p1, not_p5, p12, not_p12, p9, not_p4, p6}
{p24, p11, p20, p1, not_p5, p12, not_p12, not_p4, p6, not_p3}
{p24, p11, p20, p1, not_p5, p12, not_p12, not_p4, p6, not_p25}
{p24, p11, p3, p1, not_p5, p12, not_p21, not_p4, p14, p6}
{p24, p11, p3, p1, not_p5, p12, not_p21, p9, not_p4, p6}
{p24, p11, p3, p1, not_p5, p12, not_p21, not_p4, p6, not_p3}
{p24, p11, p3, p1, not_p5, p12, not_p21, not_p4, p6, not_p25}
{p24, p11, p22, not_p16, p17, not_p4, p5, p14}
{p24, p11, p22, not_p16, p17, not_p4, p5, not_p3}
{p24, p11, p22, not_p16, p17, not_p4, p5, not_p25}
{p24, p11, p22, not_p16, p17, p9, not_p4, p5}
{p24, p8, p22, not_p16, p17, not_p4, p5, p14}
{p24, p8, p22, not_p16, p17, not_p4, p5, not_p3}
{p24, p8, p22, not_p16, p17, not_p4, p5, not_p25}
{p24, p8, p22, not_p16, p17, p9, not_p4, p5}
{p24, p2, p11, p22, p17, not_p4, p5, p14}
{p24, p2, p11, p22, p17, not_p4, p5, not_p3}
{p24, p2, p11, p22, p17, not_p4, p5, not_p25}
{p24, p2, p11, p22, p17, p9, not_p4, p5}
{p24, p8, p2, p22, p17, not_p4, p5, p14}
{p24, p8, p2, p22, p17, not_p4, p5, not_p3}
{p24, p8, p2, p22, p17, not_p4, p5, not_p25}
{p24, p8, p2, p22, p17, p9, not_p4, p5}
{p24, p11, p1, p22, p17, not_p4, p5, not_p3}
{p24, p11, p1, p22, p17, not_p4, p5, not_p25}
{p24, p11, p1, p22, p17, not_p4, p5, p14}
{p24, p11, p18, p1, p22, p17, p9, not_p4, p5}
{p24, not_p15, p11, p1, p22, p17, p9, not_p4, p5, p6}
{p24, not_p15, p7, p11, p1, p22, p17, p9, not_p4, p5}
{p24, p8, p1, p22, p17, not_p4, p5, not_p3}
{p24, p8, p1, p22, p17, not_p4, p5, not_p25}
{p24, p8, p1, p22, p17, not_p4, p5, p14}
{p24, p8, p18, p1, p22, p17, p9, not_p4, p5}
{p24, p8, not_p15, p7, p1, p22, p17, p9, not_p4, p5}
{p24, p11, p18, p1, not_p19, p17, not_p21, not_p4, p5, not_p3}
{p24, p11, p20, p1, not_p19, p17, not_p21, not_p4, p5, not_p3}
{p24, p11, not_p19, not_p16, p17, not_p21, not_p4, p5, not_p3}
{p24, p2, p11, not_p19, p17, not_p21, not_p4, p5, not_p3}
{p24, p11, p18, p1, not_p19, p17, not_p21, not_p4, p5, not_p25}
{p24, p11, p20, p1, not_p19, p17, not_p21, not_p4, p5, not_p25}
{p24, p11, not_p19, not_p16, p17, not_p21, not_p4, p5, not_p25}
{p24, p2, p11, not_p19, p17, not_p21, not_p4, p5, not_p25}
{p24, p25, p11, not_p19, not_p16, p17, not_p21, not_p4, p5, p14}
{p24, p25, p11, p18, p1, not_p19, p17, not_p21, not_p4, p5, p14}
{p24, p25, p11, p20, p1, not_p19, p17, not_p21, not_p4, p5, p14}
{p24, p2, p25, p11, not_p19, p17, not_p21, not_p4, p5, p14}
{p24, p11, not_p19, not_p16, p17, not_p21, p9, not_p4, p5}
{p24, p11, p18, p1, not_p19, p17, not_p21, p9, not_p4, p5}
{p24, not_p15, p11, p20, p1, not_p19, p17, not_p21, p9, not_p4, p5, p6}
{p24, not_p15, p7, p11, p20, p1, not_p19, p17, not_p21, p9, not_p4, p5}
{p24, p2, p11, not_p19, p17, not_p21, p9, not_p4, p5}
{p24, p11, p18, p1, not_p5, p17, not_p21, not_p4, p5, not_p3}
{p24, p11, p20, p1, not_p5, p17, not_p21, not_p4, p5, not_p3}
{p24, p11, not_p5, not_p16, p17, not_p21, not_p4, p5, not_p3}
{p24, p2, p11, not_p5, p17, not_p21, not_p4, p5, not_p3}
{p24, p11, p18, p1, not_p5, p17, not_p21, not_p4, p5, not_p25}
{p24, p11, p20, p1, not_p5, p17, not_p21, not_p4, p5, not_p25}
{p24, p11, not_p5, not_p16, p17, not_p21, not_p4, p5, not_p25}
{p24, p2, p11, not_p5, p17, not_p21, not_p4, p5, not_p25}
{p24, p25, p11, not_p5, not_p16, p17, not_p21, not_p4, p5, p14}
{p24, p25, p11, p18, p1, not_p5, p17, not_p21, not_p4, p5, p14}
{p24, p25, p11, p20, p1, not_p5, p17, not_p21, not_p4, p5, p14}
{p24, p2, p25, p11, not_p5, p17, not_p21, not_p4, p5, p14}
{p24, p11, not_p5, not_p16, p17, not_p21, p9, not_p4, p5}
{p24, p11, p18, p1, not_p5, p17, not_p21, p9, not_p4, p5}
{p24, not_p15, p11, p20, p1, not_p5, p17, not_p21, p9, not_p4, p5, p6}
{p24, not_p15, p7, p11, p20, p1, not_p5, p17, not_p21, p9, not_p4, p5}
{p24, p2, p11, not_p5, p17, not_p21, p9, not_p4, p5}
{p24, p11, p18, p1, not_p19, p17, not_p12, not_p4, p5, not_p3}
{p24, p11, p20, p1, not_p19, p17, not_p12, not_p4, p5, not_p3}
{p24, p11, not_p19, not_p16, p17, not_p12, not_p4, p5, not_p3}
{p24, p2, p11, not_p19, p17, not_p12, not_p4, p5, not_p3}
{p24, p11, p18, p1, not_p19, p17, not_p12, not_p4, p5, not_p25}
{p24, p11, p20, p1, not_p19, p17, not_p12, not_p4, p5, not_p25}
{p24, p11, not_p19, not_p16, p17, not_p12, not_p4, p5, not_p25}
{p24, p2, p11, not_p19, p17, not_p12, not_p4, p5, not_p25}
{p24, p25, p11, not_p19, not_p16, p17, not_p12, not_p4, p5, p14}
{p24, p25, p11, p18, p1, not_p19, p17, not_p12, not_p4, p5, p14}
{p24, p25, p11, p20, p1, not_p19, p17, not_p12, not_p4, p5, p14}
{p24, p2, p25, p11, not_p19, p17, not_p12, not_p4, p5, p14}
{p24, p11, not_p19, not_p16, p17, not_p12, p9, not_p4, p5}
{p24, p11, p18, p1, not_p19, p17, not_p12, p9, not_p4, p5}
{p24, not_p15, p11, p20, p1, not_p19, p17, not_p12, p9, not_p4, p5, p6}
{p24, not_p15, p7, p11, p20, p1, not_p19, p17, not_p12, p9, not_p4, p5}
{p24, p2, p11, not_p19, p17, not_p12, p9, not_p4, p5}
{p24, p11, p18, p1, not_p5, p17, not_p12, not_p4, p5, not_p3}
{p24, p11, p20, p1, not_p5, p17, not_p12, not_p4, p5, not_p3}
{p24, p11, not_p5, not_p16, p17, not_p12, not_p4, p5, not_p3}
{p24, p2, p11, not_p5, p17, not_p12, not_p4, p5, not_p3}
{p24, p11, p18, p1, not_p5, p17, not_p12, not_p4, p5, not_p25}
{p24, p11, p20, p1, not_p5, p17, not_p12, not_p4, p5, not_p25}
{p24, p11, not_p5, not_p16, p17, not_p12, not_p4, p5, not_p25}
{p24, p2, p11, not_p5, p17, not_p12, not_p4, p5, not_p25}
{p24, p25, p11, not_p5, not_p16, p17, not_p12, not_p4, p5, p14}
{p24, p25, p11, p18, p1, not_p5, p17, not_p12, not_p4, p5, p14}
{p24, p25, p11, p20, p1, not_p5, p17, not_p12, not_p4, p5, p14}
{p24, p2, p25, p11, not_p5, p17, not_p12, not_p4, p5, p14}
{p24, p11, not_p5, not_p16, p17, not_p12, p9, not_p4, p5}
{p24, p11, p18, p1, not_p5, p17, not_p12, p9, not_p4, p5}
{p24, not_p15, p11, p20, p1, not_p5, p17, not_p12, p9, not_p4, p5, p6}
{p24, not_p15, p7, p11, p20, p1, not_p5, p17, not_p12, p9, not_p4, p5}
{p24, p2, p11, not_p5, p17, not_p12, p9, not_p4, p5}
{p24, p8, p18, p1, not_p5, p17, not_p21, not_p4, p5, p21, not_p3}
{p24, p8, p25, p18, p1, not_p5, p17, not_p21, not_p4, p5, not_p3}
{p24, p8, p18, p1, not_p5, p17, not_p12, not_p4, p5, p21, not_p3}
{p24, p8, p25, p18, p1, not_p5, p17, not_p12, not_p4, p5, not_p3}
{p24, p8, p20, p1, not_p5, p17, not_p21, not_p4, p5, p21, not_p3}
{p24, p8, p25, p20, p1, not_p5, p17, not_p21, not_p4, p5, not_p3}
{p24, p8, p20, p1, not_p5, p17, not_p12, not_p4, p5, p21, not_p3}
{p24, p8, p25, p20, p1, not_p5, p17, not_p12, not_p4, p5, not_p3}
{p24, p8, not_p5, not_p16, p17, not_p21, not_p4, p5, p21, not_p3}
{p24, p8, p25, not_p5, not_p16, p17, not_p21, not_p4, p5, not_p3}
{p24, p8, p2, not_p5, p17, not_p21, not_p4, p5, p21, not_p3}
{p24, p8, p2, p25, not_p5, p17, not_p21, not_p4, p5, not_p3}
{p24, p8, not_p5, not_p16, p17, not_p12, not_p4, p5, p21, not_p3}
{p24, p8, p2, not_p5, p17, not_p12, not_p4, p5, p21, not_p3}
{p24, p8, p25, not_p5, not_p16, p17, not_p12, not_p4, p5, not_p3}
{p24, p8, p2, p25, not_p5, p17, not_p12, not_p4, p5, not_p3}
{p24, p8, p18, p1, not_p5, p17, not_p21, not_p4, p5, p21, not_p25}
{p24, p8, p25, p18, p1, not_p5, p17, not_p21, not_p4, p5, not_p25}
{p24, p8, p18, p1, not_p5, p17, not_p12, not_p4, p5, p21, not_p25}
{p24, p8, p25, p18, p1, not_p5, p17, not_p12, not_p4, p5, not_p25}
{p24, p8, p20, p1, not_p5, p17, not_p21, not_p4, p5, p21, not_p25}
{p24, p8, p25, p20, p1, not_p5, p17, not_p21, not_p4, p5, not_p25}
{p24, p8, p20, p1, not_p5, p17, not_p12, not_p4, p5, p21, not_p25}
{p24, p8, p25, p20, p1, not_p5, p17, not_p12, not_p4, p5, not_p25}
{p24, p8, not_p5, not_p16, p17, not_p21, not_p4, p5, p21, not_p25}
{p24, p8, p25, not_p5, not_p16, p17, not_p21, not_p4, p5, not_p25}
{p24, p8, p2, not_p5, p17, not_p21, not_p4, p5, p21, not_p25}
{p24, p8, p2, p25, not_p5, p17, not_p21, not_p4, p5, not_p25}
{p24, p8, not_p5, not_p16, p17, not_p12, not_p4, p5, p21, not_p25}
{p24, p8, p2, not_p5, p17, not_p12, not_p4, p5, p21, not_p25}
{p24, p8, p25, not_p5, not_p16, p17, not_p12, not_p4, p5, not_p25}
{p24, p8, p2, p25, not_p5, p17, not_p12, not_p4, p5, not_p25}
{p24, p8, p18, p1, not_p5, p17, not_p21, not_p4, p5, p21, p14}
{p24, p8, p18, p1, not_p5, p17, not_p12, not_p4, p5, p21, p14}
{p24, p8, p20, p1, not_p5, p17, not_p21, not_p4, p5, p21, p14}
{p24, p8, p20, p1, not_p5, p17, not_p12, not_p4, p5, p21, p14}
{p24, p8, not_p5, not_p16, p17, not_p21, not_p4, p5, p21, p14}
{p24, p8, p2, not_p5, p17, not_p21, not_p4, p5, p21, p14}
{p24, p8, not_p5, not_p16, p17, not_p12, not_p4, p5, p21, p14}
{p24, p8, p2, not_p5, p17, not_p12, not_p4, p5, p21, p14}
{p24, p8, p25, p18, p1, not_p5, p17, not_p21, not_p4, p5, p14}
{p24, p8, p25, p20, p1, not_p5, p17, not_p21, not_p4, p5, p14}
{p24, p8, p25, not_p5, not_p16, p17, not_p21, not_p4, p5, p14}
{p24, p8, p2, p25, not_p5, p17, not_p21, not_p4, p5, p14}
{p24, p8, p25, p18, p1, not_p5, p17, not_p12, not_p4, p5, p14}
{p24, p8, p25, p20, p1, not_p5, p17, not_p12, not_p4, p5, p14}
{p24, p8, p25, not_p5, not_p16, p17, not_p12, not_p4, p5, p14}
{p24, p8, p2, p25, not_p5, p17, not_p12, not_p4, p5, p14}
{p24, p8, not_p5, not_p16, p17, not_p21, p9, not_p4, p5, p21}
{p24, p8, p18, p1, not_p5, p17, not_p21, p9, not_p4, p5, p21}
{p24, p8, p2, not_p5, p17, not_p21, p9, not_p4, p5, p21}
{p24, p8, not_p5, not_p16, p17, not_p12, p9, not_p4, p5, p21}
{p24, p8, p18, p1, not_p5, p17, not_p12, p9, not_p4, p5, p21}
{p24, p8, p2, not_p5, p17, not_p12, p9, not_p4, p5, p21}
{p24, p8, p25, not_p5, not_p16, p17, not_p21, p9, not_p4, p5}
{p24, p8, p25, p18, p1, not_p5, p17, not_p21, p9, not_p4, p5}
{p24, p8, not_p15, p7, p25, p20, p1, not_p5, p17, not_p21, p9, not_p4, p5}
{p24, p8, p2, p25, not_p5, p17, not_p21, p9, not_p4, p5}
{p24, p8, p25, not_p5, not_p16, p17, not_p12, p9, not_p4, p5}
{p24, p8, p25, p18, p1, not_p5, p17, not_p12, p9, not_p4, p5}
{p24, p8, not_p15, p7, p25, p20, p1, not_p5, p17, not_p12, p9, not_p4, p5}
{p24, p8, p2, p25, not_p5, p17, not_p12, p9, not_p4, p5}
{p24, p8, p22, not_p16, not_p8, not_p4, p5, p14}
{p24, p8, p1, p22, not_p8, not_p4, p5, p14}
{p24, p8, p2, p22, not_p8, not_p4, p5, p14}
{p24, p25, p11, p22, not_p16, not_p8, not_p4, p5, p14}
{p24, p25, p11, p1, p22, not_p8, not_p4, p5, p14}
{p24, p2, p25, p11, p22, not_p8, not_p4, p5, p14}
{p24, p25, p11, not_p19, not_p16, not_p8, not_p21, not_p4, p5, p14}
{p24, p25, p11, p18, p1, not_p19, not_p8, not_p21, not_p4, p5, p14}
{p24, p25, p11, p20, p1, not_p19, not_p8, not_p21, not_p4, p5, p14}
{p24, p2, p25, p11, not_p19, not_p8, not_p21, not_p4, p5, p14}
{p24, p25, p11, not_p5, not_p16, not_p8, not_p21, not_p4, p5, p14}
{p24, p25, p11, p18, p1, not_p5, not_p8, not_p21, not_p4, p5, p14}
{p24, p25, p11, p20, p1, not_p5, not_p8, not_p21, not_p4, p5, p14}
{p24, p2, p25, p11, not_p5, not_p8, not_p21, not_p4, p5, p14}
{p24, p25, p11, not_p19, not_p16, not_p8, not_p12, not_p4, p5, p14}
{p24, p25, p11, p18, p1, not_p19, not_p8, not_p12, not_p4, p5, p14}
{p24, p25, p11, p20, p1, not_p19, not_p8, not_p12, not_p4, p5, p14}
{p24, p2, p25, p11, not_p19, not_p8, not_p12, not_p4, p5, p14}
{p24, p25, p11, not_p5, not_p16, not_p8, not_p12, not_p4, p5, p14}
{p24, p25, p11, p18, p1, not_p5, not_p8, not_p12, not_p4, p5, p14}
{p24, p25, p11, p20, p1, not_p5, not_p8, not_p12, not_p4, p5, p14}
{p24, p2, p25, p11, not_p5, not_p8, not_p12, not_p4, p5, p14}
{p24, p8, p25, not_p5, not_p16, not_p8, not_p12, not_p4, p5, p14}
{p24, p8, p25, p18, p1, not_p5, not_p8, not_p12, not_p4, p5, p14}
{p24, p8, p25, p20, p1, not_p5, not_p8, not_p12, not_p4, p5, p14}
{p24, p8, p2, p25, not_p5, not_p8, not_p12, not_p4, p5, p14}
{p24, p8, p25, not_p5, not_p16, not_p8, not_p21, not_p4, p5, p14}
{p24, p8, p25, p18, p1, not_p5, not_p8, not_p21, not_p4, p5, p14}
{p24, p8, p25, p20, p1, not_p5, not_p8, not_p21, not_p4, p5, p14}
{p24, p8, p2, p25, not_p5, not_p8, not_p21, not_p4, p5, p14}
{p24, p8, p22, not_p19, not_p16, not_p4, p5, p14}
{p24, p8, p1, p22, not_p19, not_p4, p5, p14}
{p24, p8, p2, p22, not_p19, not_p4, p5, p14}
{p24, p8, p25, not_p19, not_p16, not_p12, not_p4, p5, p14}
{p24, p8, p25, p18, p1, not_p19, not_p12, not_p4, p5, p14}
{p24, p8, p25, p20, p1, not_p19, not_p12, not_p4, p5, p14}
{p24, p8, p2, p25, not_p19, not_p12, not_p4, p5, p14}
{p24, p8, p25, not_p19, not_p16, not_p21, not_p4, p5, p14}
{p24, p8, p25, p18, p1, not_p19, not_p21, not_p4, p5, p14}
{p24, p8, p25, p20, p1, not_p19, not_p21, not_p4, p5, p14}
{p24, p8, p2, p25, not_p19, not_p21, not_p4, p5, p14}
{p24, p8, not_p15, p1, p22, not_p4, p5, p14, p6}
{p24, p8, p22, not_p16, not_p4, p5, p14, p6}
{p24, p8, p2, p22, not_p4, p5, p14, p6}
{p24, p8, not_p15, p25, p18, p1, not_p5, not_p12, not_p4, p5, p14, p6}
{p24, p8, not_p15, p25, p20, p1, not_p5, not_p12, not_p4, p5, p14, p6}
{p24, p8, p25, not_p5, not_p16, not_p12, not_p4, p5, p14, p6}
{p24, p8, p2, p25, not_p5, not_p12, not_p4, p5, p14, p6}
{p24, p8, not_p15, p25, p18, p1, not_p5, not_p21, not_p4, p5, p14, p6}
{p24, p8, not_p15, p25, p20, p1, not_p5, not_p21, not_p4, p5, p14, p6}
{p24, p8, p25, not_p5, not_p16, not_p21, not_p4, p5, p14, p6}
{p24, p8, p2, p25, not_p5, not_p21, not_p4, p5, p14, p6}
{p8, p2, p11, not_p5, not_p8, not_p22, not_p21, not_p4, not_p3}
{p8, p2, p11, not_p5, not_p8, not_p22, not_p12, not_p4, not_p3}
{p8, p2, p11, p22, not_p5, not_p8, not_p22, not_p4, not_p3}
{p8, p2, p11, not_p5, not_p19, not_p22, not_p21, not_p4, not_p3}
{p8, p2, p11, not_p5, not_p19, not_p22, not_p12, not_p4, not_p3}
{p8, p2, p11, p22, not_p5, not_p19, not_p22, not_p4, not_p3}
{p8, p2, p11, not_p5, not_p22, not_p21, not_p4, p6, not_p3}
{p8, p2, p11, not_p5, not_p22, not_p12, not_p4, p6, not_p3}
{p8, p2, p11, p22, not_p5, not_p22, not_p4, p6, not_p3}
{p8, p2, p11, not_p5, not_p8, not_p22, not_p21, not_p4, not_p25}
{p8, p2, p11, not_p5, not_p8, not_p22, not_p12, not_p4, not_p25}
{p8, p2, p11, p22, not_p5, not_p8, not_p22, not_p4, not_p25}
{p8, p2, p11, not_p5, not_p19, not_p22, not_p21, not_p4, not_p25}
{p8, p2, p11, not_p5, not_p19, not_p22, not_p12, not_p4, not_p25}
{p8, p2, p11, p22, not_p5, not_p19, not_p22, not_p4, not_p25}
{p8, p2, p11, not_p5, not_p22, not_p21, not_p4, p6, not_p25}
{p8, p2, p11, not_p5, not_p22, not_p12, not_p4, p6, not_p25}
{p8, p2, p11, p22, not_p5, not_p22, not_p4, p6, not_p25}
{p8, p2, p11, not_p5, not_p8, not_p22, not_p21, not_p4, p21, p14}
{p8, p2, p11, not_p5, not_p8, not_p22, not_p12, not_p4, p21, p14}
{p8, p2, p11, p22, not_p5, not_p8, not_p22, not_p4, p21, p14}
{p8, p2, p11, not_p20, p22, not_p5, not_p8, not_p22, not_p4, p14}
{p8, p2, p11, not_p20, not_p5, not_p8, not_p22, not_p21, not_p4, p14}
{p8, p2, p11, not_p20, not_p5, not_p8, not_p22, not_p12, not_p4, p14}
{p8, p2, p11, not_p5, not_p8, not_p22, not_p21, p9, not_p4}
{p8, p2, p11, not_p5, not_p8, not_p22, not_p12, p9, not_p4}
{p8, p2, p11, p22, not_p5, not_p8, not_p22, p9, not_p4}
{p8, p2, p11, not_p5, not_p19, not_p22, not_p21, not_p4, p21, p14}
{p8, p2, p11, not_p5, not_p19, not_p22, not_p12, not_p4, p21, p14}
{p8, p2, p11, p22, not_p5, not_p19, not_p22, not_p4, p21, p14}
{p8, p2, p11, not_p20, p22, not_p5, not_p19, not_p22, not_p4, p14}
{p8, p2, p11, not_p20, not_p5, not_p19, not_p22, not_p21, not_p4, p14}
{p8, p2, p11, not_p20, not_p5, not_p19, not_p22, not_p12, not_p4, p14}
{p8, p2, p11, not_p5, not_p19, not_p22, not_p21, p9, not_p4}
{p8, p2, p11, not_p5, not_p19, not_p22, not_p12, p9, not_p4}
{p8, p2, p11, p22, not_p5, not_p19, not_p22, p9, not_p4}
{p8, p2, p11, not_p5, not_p22, not_p21, not_p4, p21, p14, p6}
{p8, p2, p11, not_p5, not_p22, not_p12, not_p4, p21, p14, p6}
{p8, p2, p11, p22, not_p5, not_p22, not_p4, p21, p14, p6}
{p8, p2, p11, not_p20, p22, not_p5, not_p22, not_p4, p14, p6}
{p8, p2, p11, not_p20, not_p5, not_p22, not_p21, not_p4, p14, p6}
{p8, p2, p11, not_p20, not_p5, not_p22, not_p12, not_p4, p14, p6}
{p8, p2, p11, not_p5, not_p22, not_p21, p9, not_p4, p6}
{p8, p2, p11, not_p5, not_p22, not_p12, p9, not_p4, p6}
{p8, p2, p11, p22, not_p5, not_p22, p9, not_p4, p6}
{p8, p2, p3, not_p5, not_p8, not_p22, not_p21, not_p4, not_p3}
{p8, p2, p3, not_p5, not_p8, not_p22, not_p12, not_p4, not_p3}
{p8, p2, p3, p22, not_p5, not_p8, not_p22, not_p4, not_p3}
{p8, p2, p3, not_p5, not_p19, not_p22, not_p21, not_p4, not_p3}
{p8, p2, p3, not_p5, not_p19, not_p22, not_p12, not_p4, not_p3}
{p8, p2, p3, p22, not_p5, not_p19, not_p22, not_p4, not_p3}
{p8, p2, p3, not_p5, not_p22, not_p21, not_p4, p6, not_p3}
{p8, p2, p3, not_p5, not_p22, not_p12, not_p4, p6, not_p3}
{p8, p2, p3, p22, not_p5, not_p22, not_p4, p6, not_p3}
{p8, p2, not_p20, p3, not_p5, p17, not_p22, not_p21, not_p4, not_p3}
{p8, p2, p25, p3, not_p5, p12, p17, not_p22, not_p21, not_p4, not_p3}
{p8, p2, p25, p3, not_p5, p17, not_p22, not_p21, not_p4, p21, not_p3}
{p8, p2, not_p20, p3, not_p5, p17, not_p22, not_p12, not_p4, not_p3}
{p8, p2, not_p20, p3, p22, not_p5, p17, not_p22, not_p4, not_p3}
{p8, p2, p25, p3, not_p5, p12, p17, not_p22, not_p12, not_p4, not_p3}
{p8, p2, p25, p3, p22, not_p5, p12, p17, not_p22, not_p4, not_p3}
{p8, p2, p25, p3, p22, not_p5, p17, not_p22, not_p4, p21, not_p3}
{p8, p2, p25, p3, not_p5, p17, not_p22, not_p12, not_p4, p21, not_p3}
{p8, p2, p3, not_p5, not_p8, not_p22, not_p21, not_p4, not_p25}
{p8, p2, p3, not_p5, not_p8, not_p22, not_p12, not_p4, not_p25}
{p8, p2, p3, p22, not_p5, not_p8, not_p22, not_p4, not_p25}
{p8, p2, p3, not_p5, not_p19, not_p22, not_p21, not_p4, not_p25}
{p8, p2, p3, not_p5, not_p19, not_p22, not_p12, not_p4, not_p25}
{p8, p2, p3, p22, not_p5, not_p19, not_p22, not_p4, not_p25}
{p8, p2, p3, not_p5, not_p22, not_p21, not_p4, p6, not_p25}
{p8, p2, p3, not_p5, not_p22, not_p12, not_p4, p6, not_p25}
{p8, p2, p3, p22, not_p5, not_p22, not_p4, p6, not_p25}
{p8, p2, not_p20, p3, not_p5, p17, not_p22, not_p21, not_p4, not_p25}
{p8, p2, p25, p3, not_p5, p12, p17, not_p22, not_p21, not_p4, not_p25}
{p8, p2, p25, p3, not_p5, p17, not_p22, not_p21, not_p4, p21, not_p25}
{p8, p2, not_p20, p3, not_p5, p17, not_p22, not_p12, not_p4, not_p25}
{p8, p2, not_p20, p3, p22, not_p5, p17, not_p22, not_p4, not_p25}
{p8, p2, p25, p3, not_p5, p12, p17, not_p22, not_p12, not_p4, not_p25}
{p8, p2, p25, p3, p22, not_p5, p12, p17, not_p22, not_p4, not_p25}
{p8, p2, p25, p3, p22, not_p5, p17, not_p22, not_p4, p21, not_p25}
{p8, p2, p25, p3, not_p5, p17, not_p22, not_p12, not_p4, p21, not_p25}
{p8, p2, p3, not_p5, not_p8, not_p22, not_p21, not_p4, p21, p14}
{p8, p2, p3, not_p5, not_p8, not_p22, not_p12, not_p4, p21, p14}
{p8, p2, p3, p22, not_p5, not_p8, not_p22, not_p4, p21, p14}
{p8, p2, not_p20, p3, not_p5, not_p8, not_p22, not_p21, not_p4, p14}
{p8, p2, not_p20, p3, not_p5, not_p8, not_p22, not_p12, not_p4, p14}
{p8, p2, not_p20, p3, p22, not_p5, not_p8, not_p22, not_p4, p14}
{p8, p2, p3, p22, not_p5, p12, not_p8, not_p22, not_p4, p14}
{p8, p2, p3, not_p5, p12, not_p8, not_p22, not_p21, not_p4, p14}
{p8, p2, p3, not_p5, p12, not_p8, not_p22, not_p12, not_p4, p14}
{p8, p2, p3, not_p5, not_p8, not_p22, not_p21, p9, not_p4}
{p8, p2, p3, not_p5, not_p8, not_p22, not_p12, p9, not_p4}
{p8, p2, p3, p22, not_p5, not_p8, not_p22, p9, not_p4}
{p8, p2, p25, p3, not_p5, p17, not_p22, not_p21, not_p4, p21, p14}
{p8, p2, p25, p3, not_p5, p17, not_p22, not_p12, not_p4, p21, p14}
{p8, p2, p25, p3, p22, not_p5, p17, not_p22, not_p4, p21, p14}
{p8, p2, p3, not_p5, not_p19, not_p22, not_p21, not_p4, p21, p14}
{p8, p2, p3, not_p5, not_p19, not_p22, not_p12, not_p4, p21, p14}
{p8, p2, p3, p22, not_p5, not_p19, not_p22, not_p4, p21, p14}
{p8, p2, p3, not_p5, not_p22, not_p21, not_p4, p21, p14, p6}
{p8, p2, p3, not_p5, not_p22, not_p12, not_p4, p21, p14, p6}
{p8, p2, p3, p22, not_p5, not_p22, not_p4, p21, p14, p6}
{p8, p2, not_p20, p3, not_p5, p17, not_p22, not_p21, not_p4, p14}
{p8, p2, not_p20, p3, not_p5, p17, not_p22, not_p12, not_p4, p14}
{p8, p2, not_p20, p3, p22, not_p5, p17, not_p22, not_p4, p14}
{p8, p2, not_p20, p3, not_p5, not_p19, not_p22, not_p21, not_p4, p14}
{p8, p2, not_p20, p3, not_p5, not_p19, not_p22, not_p12, not_p4, p14}
{p8, p2, not_p20, p3, p22, not_p5, not_p19, not_p22, not_p4, p14}
{p8, p2, not_p20, p3, not_p5, not_p22, not_p21, not_p4, p14, p6}
{p8, p2, not_p20, p3, not_p5, not_p22, not_p12, not_p4, p14, p6}
{p8, p2, not_p20, p3, p22, not_p5, not_p22, not_p4, p14, p6}
{p8, p2, p25, p3, p22, not_p5, p12, p17, not_p22, not_p4, p14}
{p8, p2, p3, p22, not_p5, not_p19, p12, not_p22, not_p4, p14}
{p8, p2, p3, p22, not_p5, p12, not_p22, not_p4, p14, p6}
{p8, p2, p3, not_p5, not_p19, p12, not_p22, not_p21, not_p4, p14}
{p8, p2, p3, not_p5, not_p19, p12, not_p22, not_p12, not_p4, p14}
{p8, p2, p25, p3, not_p5, p12, p17, not_p22, not_p21, not_p4, p14}
{p8, p2, p25, p3, not_p5, p12, p17, not_p22, not_p12, not_p4, p14}
{p8, p2, p3, not_p5, p12, not_p22, not_p21, not_p4, p14, p6}
{p8, p2, p3, not_p5, p12, not_p22, not_p12, not_p4, p14, p6}
{p8, p2, p3, not_p5, not_p19, not_p22, not_p21, p9, not_p4}
{p8, p2, p3, not_p5, not_p19, not_p22, not_p12, p9, not_p4}
{p8, p2, p3, p22, not_p5, not_p19, not_p22, p9, not_p4}
{p8, p2, p3, not_p5, not_p22, not_p21, p9, not_p4, p6}
{p8, p2, p3, not_p5, not_p22, not_p12, p9, not_p4, p6}
{p8, p2, p3, p22, not_p5, not_p22, p9, not_p4, p6}
{p8, p2, not_p20, p3, not_p5, p17, not_p22, not_p21, p9, not_p4}
{p8, p2, not_p20, p3, not_p5, p17, not_p22, not_p12, p9, not_p4}
{p8, p2, not_p20, p3, p22, not_p5, p17, not_p22, p9, not_p4}
{p8, p2, p25, p3, not_p5, p12, p17, not_p22, not_p21, p9, not_p4}
{p8, p2, p25, p3, not_p5, p12, p17, not_p22, not_p12, p9, not_p4}
{p8, p2, p25, p3, p22, not_p5, p12, p17, not_p22, p9, not_p4}
{p8, p2, p25, p3, p22, not_p5, p17, not_p22, p9, not_p4, p21}
{p8, p2, p25, p3, not_p5, p17, not_p22, not_p21, p9, not_p4, p21}
{p8, p2, p25, p3, not_p5, p17, not_p22, not_p12, p9, not_p4, p21}
{p8, p2, p3, p22, p17, not_p22, not_p4, p5, p14}
{p8, p2, p3, p22, p17, not_p22, not_p4, p5, not_p3}
{p8, p2, p3, p22, p17, not_p22, not_p4, p5, not_p25}
{p8, p2, p3, p22, p17, not_p22, p9, not_p4, p5}
{p8, p2, p3, not_p5, p17, not_p22, not_p21, not_p4, p5, p21, not_p3}
{p8, p2, p25, p3, not_p5, p17, not_p22, not_p21, not_p4, p5, not_p3}
{p8, p2, p3, not_p5, p17, not_p22, not_p12, not_p4, p5, p21, not_p3}
{p8, p2, p25, p3, not_p5, p17, not_p22, not_p12, not_p4, p5, not_p3}
{p8, p2, p3, not_p5, p17, not_p22, not_p21, not_p4, p5, p21, not_p25}
{p8, p2, p25, p3, not_p5, p17, not_p22, not_p21, not_p4, p5, not_p25}
{p8, p2, p3, not_p5, p17, not_p22, not_p12, not_p4, p5, p21, not_p25}
{p8, p2, p25, p3, not_p5, p17, not_p22, not_p12, not_p4, p5, not_p25}
{p8, p2, p3, not_p5, p17, not_p22, not_p21, not_p4, p5, p21, p14}
{p8, p2, p3, not_p5, p17, not_p22, not_p12, not_p4, p5, p21, p14}
{p8, p2, p25, p3, not_p5, p17, not_p22, not_p21, not_p4, p5, p14}
{p8, p2, p25, p3, not_p5, p17, not_p22, not_p12, not_p4, p5, p14}
{p8, p2, p3, not_p5, p17, not_p22, not_p21, p9, not_p4, p5, p21}
{p8, p2, p3, not_p5, p17, not_p22, not_p12, p9, not_p4, p5, p21}
{p8, p2, p25, p3, not_p5, p17, not_p22, not_p21, p9, not_p4, p5}
{p8, p2, p25, p3, not_p5, p17, not_p22, not_p12, p9, not_p4, p5}
{p8, p2, p11, p22, not_p8, not_p22, not_p4, p5, p14}
{p8, p2, p3, p22, not_p8, not_p22, not_p4, p5, p14}
{p8, p2, p25, p3, not_p5, not_p8, not_p22, not_p21, not_p4, p5, p14}
{p8, p2, p25, p3, not_p5, not_p8, not_p22, not_p12, not_p4, p5, p14}
{p8, p2, p11, p22, not_p19, not_p22, not_p4, p5, p14}
{p8, p2, p3, p22, not_p19, not_p22, not_p4, p5, p14}
{p8, p2, p25, p11, not_p19, not_p22, not_p21, not_p4, p5, p14}
{p8, p2, p25, p11, not_p19, not_p22, not_p12, not_p4, p5, p14}
{p8, p2, p25, p3, not_p19, not_p22, not_p21, not_p4, p5, p14}
{p8, p2, p25, p3, not_p19, not_p22, not_p12, not_p4, p5, p14}
{p8, p2, p11, p22, not_p22, not_p4, p5, p14, p6}
{p8, p2, p3, p22, not_p22, not_p4, p5, p14, p6}
{p8, p2, p25, p11, not_p5, not_p22, not_p21, not_p4, p5, p14, p6}
{p8, p2, p25, p11, not_p5, not_p22, not_p12, not_p4, p5, p14, p6}
{p8, p2, p25, p3, not_p5, not_p22, not_p21, not_p4, p5, p14, p6}
{p8, p2, p25, p3, not_p5, not_p22, not_p12, not_p4, p5, p14, p6}
{p2, not_p20, p3, not_p5, p17, not_p22, not_p21, p4, p14}
{p2, not_p20, p3, not_p5, p17, not_p22, not_p12, p4, p14}
{p2, not_p20, p3, p22, not_p5, p17, not_p22, p4, p14}
{p2, not_p20, p3, not_p5, p17, not_p22, not_p21, p4, not_p3}
{p2, not_p20, p3, not_p5, p17, not_p22, not_p12, p4, not_p3}
{p2, not_p20, p3, p22, not_p5, p17, not_p22, p4, not_p3}
{p2, not_p20, p3, not_p5, p17, not_p22, not_p21, p4, not_p25}
{p2, not_p20, p3, not_p5, p17, not_p22, not_p12, p4, not_p25}
{p2, not_p20, p3, p22, not_p5, p17, not_p22, p4, not_p25}
{p2, not_p20, p3, not_p5, p17, not_p22, not_p21, p9, p4}
{p2, not_p20, p3, not_p5, p17, not_p22, not_p12, p9, p4}
{p2, not_p20, p3, p22, not_p5, p17, not_p22, p9, p4}
{p2, p25, p3, not_p5, p17, not_p22, not_p21, p4, p21, p14}
{p2, p25, p3, not_p5, p17, not_p22, not_p12, p4, p21, p14}
{p2, p25, p3, p22, not_p5, p17, not_p22, p4, p21, p14}
{p2, p25, p3, not_p5, p17, not_p22, not_p21, p4, p21, not_p3}
{p2, p25, p3, not_p5, p17, not_p22, not_p12, p4, p21, not_p3}
{p2, p25, p3, p22, not_p5, p17, not_p22, p4, p21, not_p3}
{p2, p25, p3, not_p5, p17, not_p22, not_p21, p4, p21, not_p25}
{p2, p25, p3, not_p5, p17, not_p22, not_p12, p4, p21, not_p25}
{p2, p25, p3, p22, not_p5, p17, not_p22, p4, p21, not_p25}
{p2, p25, p3, not_p5, p17, not_p22, not_p21, p9, p4, p21}
{p2, p25, p3, not_p5, p17, not_p22, not_p12, p9, p4, p21}
{p2, p25, p3, p22, not_p5, p17, not_p22, p9, p4, p21}
{p2, p25, p3, p22, not_p5, p12, p17, not_p22, p4, p14}
{p2, p25, p3, p22, not_p5, p12, p17, not_p22, p4, not_p3}
{p2, p25, p3, p22, not_p5, p12, p17, not_p22, p4, not_p25}
{p2, p25, p3, p22, not_p5, p12, p17, not_p22, p9, p4}
{p2, p25, p3, not_p5, p12, p17, not_p22, not_p21, p4, not_p3}
{p2, p25, p3, not_p5, p12, p17, not_p22, not_p12, p4, not_p3}
{p2, p25, p3, not_p5, p12, p17, not_p22, not_p21, p4, p14}
{p2, p25, p3, not_p5, p12, p17, not_p22, not_p21, p4, not_p25}
{p2, p25, p3, not_p5, p12, p17, not_p22, not_p21, p9, p4}
{p2, p25, p3, not_p5, p12, p17, not_p22, not_p12, p4, p14}
{p2, p25, p3, not_p5, p12, p17, not_p22, not_p12, p4, not_p25}
{p2, p25, p3, not_p5, p12, p17, not_p22, not_p12, p9, p4}
{p2, p11, not_p5, p17, not_p22, not_p21, not_p4, p21, not_p3}
{p2, p11, not_p5, p17, not_p22, not_p12, not_p4, p21, not_p3}
{p2, p11, p22, not_p5, p17, not_p22, not_p4, p21, not_p3}
{p2, p11, not_p20, not_p5, p17, not_p22, not_p21, not_p4, not_p3}
{p2, p11, not_p20, not_p5, p17, not_p22, not_p12, not_p4, not_p3}
{p2, p11, not_p20, p22, not_p5, p17, not_p22, not_p4, not_p3}
{p2, p11, not_p5, p12, p17, not_p22, not_p21, not_p4, not_p3}
{p2, p11, not_p5, p12, p17, not_p22, not_p12, not_p4, not_p3}
{p2, p11, p22, not_p5, p12, p17, not_p22, not_p4, not_p3}
{p2, p11, not_p5, p17, not_p22, not_p21, not_p4, p21, not_p25}
{p2, p11, not_p5, p17, not_p22, not_p12, not_p4, p21, not_p25}
{p2, p11, p22, not_p5, p17, not_p22, not_p4, p21, not_p25}
{p2, p11, not_p20, not_p5, p17, not_p22, not_p21, not_p4, not_p25}
{p2, p11, not_p20, not_p5, p17, not_p22, not_p12, not_p4, not_p25}
{p2, p11, not_p20, p22, not_p5, p17, not_p22, not_p4, not_p25}
{p2, p11, not_p5, p12, p17, not_p22, not_p21, not_p4, not_p25}
{p2, p11, not_p5, p12, p17, not_p22, not_p12, not_p4, not_p25}
{p2, p11, p22, not_p5, p12, p17, not_p22, not_p4, not_p25}
{p2, p11, not_p5, p17, not_p22, not_p21, not_p4, p21, p14}
{p2, p11, not_p5, p17, not_p22, not_p12, not_p4, p21, p14}
{p2, p11, p22, not_p5, p17, not_p22, not_p4, p21, p14}
{p2, p11, not_p5, p17, not_p22, not_p21, p9, not_p4, p21}
{p2, p11, not_p5, p17, not_p22, not_p12, p9, not_p4, p21}
{p2, p11, p22, not_p5, p17, not_p22, p9, not_p4, p21}
{p2, p11, not_p20, not_p5, p17, not_p22, not_p21, not_p4, p14}
{p2, p11, not_p20, not_p5, p17, not_p22, not_p12, not_p4, p14}
{p2, p11, not_p20, p22, not_p5, p17, not_p22, not_p4, p14}
{p2, p11, not_p20, not_p5, p17, not_p22, not_p21, p9, not_p4}
{p2, p11, not_p20, not_p5, p17, not_p22, not_p12, p9, not_p4}
{p2, p11, not_p20, p22, not_p5, p17, not_p22, p9, not_p4}
{p2, p11, not_p5, p12, p17, not_p22, not_p21, not_p4, p14}
{p2, p11, not_p5, p12, p17, not_p22, not_p12, not_p4, p14}
{p2, p11, p22, not_p5, p12, p17, not_p22, not_p4, p14}
{p2, p11, not_p5, p12, p17, not_p22, not_p21, p9, not_p4}
{p2, p11, not_p5, p12, p17, not_p22, not_p12, p9, not_p4}
{p2, p11, p22, not_p5, p12, p17, not_p22, p9, not_p4}
{p2, not_p20, p3, not_p5, p17, not_p22, not_p21, not_p4, p14, p16}
{p2, not_p20, p3, not_p5, p17, not_p22, not_p12, not_p4, p14, p16}
{p2, not_p20, p3, p22, not_p5, p17, not_p22, not_p4, p14, p16}
{p2, not_p20, p3, not_p5, p17, not_p22, not_p21, not_p4, p16, not_p3}
{p2, not_p20, p3, not_p5, p17, not_p22, not_p12, not_p4, p16, not_p3}
{p2, not_p20, p3, p22, not_p5, p17, not_p22, not_p4, p16, not_p3}
{p2, not_p20, p3, not_p5, p17, not_p22, not_p21, not_p4, p16, not_p25}
{p2, not_p20, p3, not_p5, p17, not_p22, not_p12, not_p4, p16, not_p25}
{p2, not_p20, p3, p22, not_p5, p17, not_p22, not_p4, p16, not_p25}
{p2, not_p20, p3, not_p5, p17, not_p22, not_p21, p9, not_p4, p16}
{p2, not_p20, p3, not_p5, p17, not_p22, not_p12, p9, not_p4, p16}
{p2, not_p20, p3, p22, not_p5, p17, not_p22, p9, not_p4, p16}
{p2, p25, p3, not_p5, p17, not_p22, not_p21, not_p4, p21, p14, p16}
{p2, p25, p3, not_p5, p17, not_p22, not_p12, not_p4, p21, p14, p16}
{p2, p25, p3, p22, not_p5, p17, not_p22, not_p4, p21, p14, p16}
{p2, p25, p3, not_p5, p17, not_p22, not_p21, not_p4, p21, p16, not_p3}
{p2, p25, p3, not_p5, p17, not_p22, not_p12, not_p4, p21, p16, not_p3}
{p2, p25, p3, p22, not_p5, p17, not_p22, not_p4, p21, p16, not_p3}
{p2, p25, p3, not_p5, p17, not_p22, not_p21, not_p4, p21, p16, not_p25}
{p2, p25, p3, not_p5, p17, not_p22, not_p12, not_p4, p21, p16, not_p25}
{p2, p25, p3, p22, not_p5, p17, not_p22, not_p4, p21, p16, not_p25}
{p2, p25, p3, not_p5, p17, not_p22, not_p21, p9, not_p4, p21, p16}
{p2, p25, p3, not_p5, p17, not_p22, not_p12, p9, not_p4, p21, p16}
{p2, p25, p3, p22, not_p5, p17, not_p22, p9, not_p4, p21, p16}
{p2, p25, p3, not_p5, p12, p17, not_p22, not_p21, not_p4, p14, p16}
{p2, p25, p3, not_p5, p12, p17, not_p22, not_p12, not_p4, p14, p16}
{p2, p25, p3, p22, not_p5, p12, p17, not_p22, not_p4, p14, p16}
{p2, p25, p3, not_p5, p12, p17, not_p22, not_p21, not_p4, p16, not_p3}
{p2, p25, p3, not_p5, p12, p17, not_p22, not_p12, not_p4, p16, not_p3}
{p2, p25, p3, p22, not_p5, p12, p17, not_p22, not_p4, p16, not_p3}
{p2, p25, p3, not_p5, p12, p17, not_p22, not_p21, not_p4, p16, not_p25}
{p2, p25, p3, not_p5, p12, p17, not_p22, not_p12, not_p4, p16, not_p25}
{p2, p25, p3, p22, not_p5, p12, p17, not_p22, not_p4, p16, not_p25}
{p2, p25, p3, not_p5, p12, p17, not_p22, not_p21, p9, not_p4, p16}
{p2, p25, p3, not_p5, p12, p17, not_p22, not_p12, p9, not_p4, p16}
{p2, p25, p3, p22, not_p5, p12, p17, not_p22, p9, not_p4, p16}
{p2, p3, p22, p17, not_p22, p4, p5, p14}
{p2, p3, p22, p17, not_p22, p4, p5, not_p3}
{p2, p3, p22, p17, not_p22, p4, p5, not_p25}
{p2, p3, p22, p17, not_p22, p9, p4, p5}
{p2, p3, not_p19, p17, not_p22, not_p21, p4, p5, p21, not_p3}
{p2, p25, p3, not_p19, p17, not_p22, not_p21, p4, p5, not_p3}
{p2, p3, not_p5, p17, not_p22, not_p21, p4, p5, p21, not_p3}
{p2, p25, p3, not_p5, p17, not_p22, not_p21, p4, p5, not_p3}
{p2, p3, not_p19, p17, not_p22, not_p12, p4, p5, p21, not_p3}
{p2, p25, p3, not_p19, p17, not_p22, not_p12, p4, p5, not_p3}
{p2, p3, not_p5, p17, not_p22, not_p12, p4, p5, p21, not_p3}
{p2, p25, p3, not_p5, p17, not_p22, not_p12, p4, p5, not_p3}
{p2, p3, not_p19, p17, not_p22, not_p21, p4, p5, p21, not_p25}
{p2, p25, p3, not_p19, p17, not_p22, not_p21, p4, p5, not_p25}
{p2, p3, not_p5, p17, not_p22, not_p21, p4, p5, p21, not_p25}
{p2, p25, p3, not_p5, p17, not_p22, not_p21, p4, p5, not_p25}
{p2, p3, not_p19, p17, not_p22, not_p12, p4, p5, p21, not_p25}
{p2, p25, p3, not_p19, p17, not_p22, not_p12, p4, p5, not_p25}
{p2, p3, not_p5, p17, not_p22, not_p12, p4, p5, p21, not_p25}
{p2, p25, p3, not_p5, p17, not_p22, not_p12, p4, p5, not_p25}
{p2, p3, not_p19, p17, not_p22, not_p21, p4, p5, p21, p14}
{p2, p3, not_p5, p17, not_p22, not_p21, p4, p5, p21, p14}
{p2, p3, not_p19, p17, not_p22, not_p12, p4, p5, p21, p14}
{p2, p3, not_p5, p17, not_p22, not_p12, p4, p5, p21, p14}
{p2, p25, p3, not_p19, p17, not_p22, not_p21, p4, p5, p14}
{p2, p25, p3, not_p5, p17, not_p22, not_p21, p4, p5, p14}
{p2, p25, p3, not_p19, p17, not_p22, not_p12, p4, p5, p14}
{p2, p25, p3, not_p5, p17, not_p22, not_p12, p4, p5, p14}
{p2, p3, not_p19, p17, not_p22, not_p21, p9, p4, p5, p21}
{p2, p3, not_p5, p17, not_p22, not_p21, p9, p4, p5, p21}
{p2, p3, not_p19, p17, not_p22, not_p12, p9, p4, p5, p21}
{p2, p3, not_p5, p17, not_p22, not_p12, p9, p4, p5, p21}
{p2, p25, p3, not_p19, p17, not_p22, not_p21, p9, p4, p5}
{p2, p25, p3, not_p5, p17, not_p22, not_p21, p9, p4, p5}
{p2, p25, p3, not_p19, p17, not_p22, not_p12, p9, p4, p5}
{p2, p25, p3, not_p5, p17, not_p22, not_p12, p9, p4, p5}
{p2, p11, p22, p17, not_p22, not_p4, p5, not_p3}
{p2, p11, p22, p17, not_p22, not_p4, p5, not_p25}
{p2, p11, p22, p17, not_p22, not_p4, p5, p14}
{p2, p11, p22, p17, not_p22, p9, not_p4, p5}
{p2, p11, not_p19, p17, not_p22, not_p21, not_p4, p5, not_p3}
{p2, p11, not_p5, p17, not_p22, not_p21, not_p4, p5, not_p3}
{p2, p11, not_p19, p17, not_p22, not_p12, not_p4, p5, not_p3}
{p2, p11, not_p5, p17, not_p22, not_p12, not_p4, p5, not_p3}
{p2, p11, not_p19, p17, not_p22, not_p21, not_p4, p5, not_p25}
{p2, p11, not_p5, p17, not_p22, not_p21, not_p4, p5, not_p25}
{p2, p11, not_p19, p17, not_p22, not_p12, not_p4, p5, not_p25}
{p2, p11, not_p5, p17, not_p22, not_p12, not_p4, p5, not_p25}
{p2, p25, p11, not_p19, p17, not_p22, not_p21, not_p4, p5, p14}
{p2, p25, p11, not_p5, p17, not_p22, not_p21, not_p4, p5, p14}
{p2, p25, p11, not_p19, p17, not_p22, not_p12, not_p4, p5, p14}
{p2, p25, p11, not_p5, p17, not_p22, not_p12, not_p4, p5, p14}
{p2, p11, not_p19, p17, not_p22, not_p21, p9, not_p4, p5}
{p2, p11, not_p5, p17, not_p22, not_p21, p9, not_p4, p5}
{p2, p11, not_p19, p17, not_p22, not_p12, p9, not_p4, p5}
{p2, p11, not_p5, p17, not_p22, not_p12, p9, not_p4, p5}
{p2, p3, p22, p17, not_p22, not_p4, p5, p14, p16}
{p2, p3, p22, p17, not_p22, not_p4, p5, p16, not_p3}
{p2, p3, p22, p17, not_p22, not_p4, p5, p16, not_p25}
{p2, p3, p22, p17, not_p22, p9, not_p4, p5, p16}
{p2, p3, not_p19, p17, not_p22, not_p21, not_p4, p5, p21, p16, not_p3}
{p2, p25, p3, not_p19, p17, not_p22, not_p21, not_p4, p5, p16, not_p3}
{p2, p3, not_p5, p17, not_p22, not_p21, not_p4, p5, p21, p16, not_p3}
{p2, p25, p3, not_p5, p17, not_p22, not_p21, not_p4, p5, p16, not_p3}
{p2, p3, not_p19, p17, not_p22, not_p12, not_p4, p5, p21, p16, not_p3}
{p2, p25, p3, not_p19, p17, not_p22, not_p12, not_p4, p5, p16, not_p3}
{p2, p3, not_p5, p17, not_p22, not_p12, not_p4, p5, p21, p16, not_p3}
{p2, p25, p3, not_p5, p17, not_p22, not_p12, not_p4, p5, p16, not_p3}
{p2, p3, not_p19, p17, not_p22, not_p21, not_p4, p5, p21, p14, p16}
{p2, p3, not_p19, p17, not_p22, not_p21, not_p4, p5, p21, p16, not_p25}
{p2, p3, not_p19, p17, not_p22, not_p21, p9, not_p4, p5, p21, p16}
{p2, p3, not_p5, p17, not_p22, not_p21, not_p4, p5, p21, p14, p16}
{p2, p3, not_p5, p17, not_p22, not_p21, not_p4, p5, p21, p16, not_p25}
{p2, p3, not_p5, p17, not_p22, not_p21, p9, not_p4, p5, p21, p16}
{p2, p3, not_p19, p17, not_p22, not_p12, not_p4, p5, p21, p14, p16}
{p2, p3, not_p19, p17, not_p22, not_p12, not_p4, p5, p21, p16, not_p25}
{p2, p3, not_p19, p17, not_p22, not_p12, p9, not_p4, p5, p21, p16}
{p2, p3, not_p5, p17, not_p22, not_p12, not_p4, p5, p21, p14, p16}
{p2, p3, not_p5, p17, not_p22, not_p12, not_p4, p5, p21, p16, not_p25}
{p2, p3, not_p5, p17, not_p22, not_p12, p9, not_p4, p5, p21, p16}
{p2, p25, p3, not_p19, p17, not_p22, not_p21, not_p4, p5, p14, p16}
{p2, p25, p3, not_p19, p17, not_p22, not_p21, not_p4, p5, p16, not_p25}
{p2, p25, p3, not_p19, p17, not_p22, not_p21, p9, not_p4, p5, p16}
{p2, p25, p3, not_p5, p17, not_p22, not_p21, not_p4, p5, p14, p16}
{p2, p25, p3, not_p5, p17, not_p22, not_p21, not_p4, p5, p16, not_p25}
{p2, p25, p3, not_p5, p17, not_p22, not_p21, p9, not_p4, p5, p16}
{p2, p25, p3, not_p19, p17, not_p22, not_p12, not_p4, p5, p14, p16}
{p2, p25, p3, not_p19, p17, not_p22, not_p12, not_p4, p5, p16, not_p25}
{p2, p25, p3, not_p19, p17, not_p22, not_p12, p9, not_p4, p5, p16}
{p2, p25, p3, not_p5, p17, not_p22, not_p12, not_p4, p5, p14, p16}
{p2, p25, p3, not_p5, p17, not_p22, not_p12, not_p4, p5, p16, not_p25}
{p2, p25, p3, not_p5, p17, not_p22, not_p12, p9, not_p4, p5, p16}
{p2, p3, not_p5, p12, not_p8, not_p22, not_p21, p4, p14}
{p2, p3, not_p5, p12, not_p8, not_p22, not_p12, p4, p14}
{p2, p3, p22, not_p5, p12, not_p8, not_p22, p4, p14}
{p2, p3, not_p5, not_p19, p12, not_p22, not_p21, p4, p14}
{p2, p3, not_p5, not_p19, p12, not_p22, not_p12, p4, p14}
{p2, p3, p22, not_p5, not_p19, p12, not_p22, p4, p14}
{p2, p3, not_p5, p12, not_p22, not_p21, p4, p14, p6}
{p2, p3, not_p5, p12, not_p22, not_p12, p4, p14, p6}
{p2, p3, p22, not_p5, p12, not_p22, p4, p14, p6}
{p2, p3, not_p5, p12, not_p8, not_p22, not_p21, p4, not_p3}
{p2, p3, not_p5, p12, not_p8, not_p22, not_p12, p4, not_p3}
{p2, p3, p22, not_p5, p12, not_p8, not_p22, p4, not_p3}
{p2, p3, not_p5, not_p19, p12, not_p22, not_p21, p4, not_p3}
{p2, p3, not_p5, not_p19, p12, not_p22, not_p12, p4, not_p3}
{p2, p3, p22, not_p5, not_p19, p12, not_p22, p4, not_p3}
{p2, p3, not_p5, p12, not_p22, not_p21, p4, p6, not_p3}
{p2, p3, not_p5, p12, not_p22, not_p12, p4, p6, not_p3}
{p2, p3, p22, not_p5, p12, not_p22, p4, p6, not_p3}
{p2, p3, not_p5, p12, not_p8, not_p22, not_p21, p4, not_p25}
{p2, p3, not_p5, p12, not_p8, not_p22, not_p12, p4, not_p25}
{p2, p3, p22, not_p5, p12, not_p8, not_p22, p4, not_p25}
{p2, p3, not_p5, not_p19, p12, not_p22, not_p21, p4, not_p25}
{p2, p3, not_p5, not_p19, p12, not_p22, not_p12, p4, not_p25}
{p2, p3, p22, not_p5, not_p19, p12, not_p22, p4, not_p25}
{p2, p3, not_p5, p12, not_p22, not_p21, p4, p6, not_p25}
{p2, p3, not_p5, p12, not_p22, not_p12, p4, p6, not_p25}
{p2, p3, p22, not_p5, p12, not_p22, p4, p6, not_p25}
{p2, p3, not_p5, p12, not_p8, not_p22, not_p21, p9, p4}
{p2, p3, not_p5, p12, not_p8, not_p22, not_p12, p9, p4}
{p2, p3, p22, not_p5, p12, not_p8, not_p22, p9, p4}
{p2, p3, not_p5, not_p19, p12, not_p22, not_p21, p9, p4}
{p2, p3, not_p5, not_p19, p12, not_p22, not_p12, p9, p4}
{p2, p3, p22, not_p5, not_p19, p12, not_p22, p9, p4}
{p2, p3, not_p5, p12, not_p22, not_p21, p9, p4, p6}
{p2, p3, not_p5, p12, not_p22, not_p12, p9, p4, p6}
{p2, p3, p22, not_p5, p12, not_p22, p9, p4, p6}
{p2, p11, not_p5, p12, not_p8, not_p22, not_p21, not_p4, p14}
{p2, p11, not_p5, p12, not_p8, not_p22, not_p12, not_p4, p14}
{p2, p11, p22, not_p5, p12, not_p8, not_p22, not_p4, p14}
{p2, p11, not_p5, not_p19, p12, not_p22, not_p21, not_p4, p14}
{p2, p11, not_p5, not_p19, p12, not_p22, not_p12, not_p4, p14}
{p2, p11, p22, not_p5, not_p19, p12, not_p22, not_p4, p14}
{p2, p11, not_p5, p12, not_p22, not_p21, not_p4, p14, p6}
{p2, p11, not_p5, p12, not_p22, not_p12, not_p4, p14, p6}
{p2, p11, p22, not_p5, p12, not_p22, not_p4, p14, p6}
{p2, p11, not_p5, p12, not_p8, not_p22, not_p21, not_p4, not_p3}
{p2, p11, not_p5, p12, not_p8, not_p22, not_p12, not_p4, not_p3}
{p2, p11, p22, not_p5, p12, not_p8, not_p22, not_p4, not_p3}
{p2, p11, not_p5, not_p19, p12, not_p22, not_p21, not_p4, not_p3}
{p2, p11, not_p5, not_p19, p12, not_p22, not_p12, not_p4, not_p3}
{p2, p11, p22, not_p5, not_p19, p12, not_p22, not_p4, not_p3}
{p2, p11, not_p5, p12, not_p22, not_p21, not_p4, p6, not_p3}
{p2, p11, not_p5, p12, not_p22, not_p12, not_p4, p6, not_p3}
{p2, p11, p22, not_p5, p12, not_p22, not_p4, p6, not_p3}
{p2, p11, not_p5, p12, not_p8, not_p22, not_p21, not_p4, not_p25}
{p2, p11, not_p5, p12, not_p8, not_p22, not_p12, not_p4, not_p25}
{p2, p11, p22, not_p5, p12, not_p8, not_p22, not_p4, not_p25}
{p2, p11, not_p5, not_p19, p12, not_p22, not_p21, not_p4, not_p25}
{p2, p11, not_p5, not_p19, p12, not_p22, not_p12, not_p4, not_p25}
{p2, p11, p22, not_p5, not_p19, p12, not_p22, not_p4, not_p25}
{p2, p11, not_p5, p12, not_p22, not_p21, not_p4, p6, not_p25}
{p2, p11, not_p5, p12, not_p22, not_p12, not_p4, p6, not_p25}
{p2, p11, p22, not_p5, p12, not_p22, not_p4, p6, not_p25}
{p2, p11, not_p5, p12, not_p8, not_p22, not_p21, p9, not_p4}
{p2, p11, not_p5, p12, not_p8, not_p22, not_p12, p9, not_p4}
{p2, p11, p22, not_p5, p12, not_p8, not_p22, p9, not_p4}
{p2, p11, not_p5, not_p19, p12, not_p22, not_p21, p9, not_p4}
{p2, p11, not_p5, not_p19, p12, not_p22, not_p12, p9, not_p4}
{p2, p11, p22, not_p5, not_p19, p12, not_p22, p9, not_p4}
{p2, p11, not_p5, p12, not_p22, not_p21, p9, not_p4, p6}
{p2, p11, not_p5, p12, not_p22, not_p12, p9, not_p4, p6}
{p2, p11, p22, not_p5, p12, not_p22, p9, not_p4, p6}
{p2, p3, not_p5, p12, not_p8, not_p22, not_p21, not_p4, p14, p16}
{p2, p3, not_p5, p12, not_p8, not_p22, not_p12, not_p4, p14, p16}
{p2, p3, p22, not_p5, p12, not_p8, not_p22, not_p4, p14, p16}
{p2, p3, not_p5, not_p19, p12, not_p22, not_p21, not_p4, p14, p16}
{p2, p3, not_p5, not_p19, p12, not_p22, not_p12, not_p4, p14, p16}
{p2, p3, p22, not_p5, not_p19, p12, not_p22, not_p4, p14, p16}
{p2, p3, not_p5, p12, not_p22, not_p21, not_p4, p14, p16, p6}
{p2, p3, not_p5, p12, not_p22, not_p12, not_p4, p14, p16, p6}
{p2, p3, p22, not_p5, p12, not_p22, not_p4, p14, p16, p6}
{p2, p3, not_p5, p12, not_p8, not_p22, not_p21, not_p4, p16, not_p3}
{p2, p3, not_p5, p12, not_p8, not_p22, not_p12, not_p4, p16, not_p3}
{p2, p3, p22, not_p5, p12, not_p8, not_p22, not_p4, p16, not_p3}
{p2, p3, not_p5, not_p19, p12, not_p22, not_p21, not_p4, p16, not_p3}
{p2, p3, not_p5, not_p19, p12, not_p22, not_p12, not_p4, p16, not_p3}
{p2, p3, p22, not_p5, not_p19, p12, not_p22, not_p4, p16, not_p3}
{p2, p3, not_p5, p12, not_p22, not_p21, not_p4, p16, p6, not_p3}
{p2, p3, not_p5, p12, not_p22, not_p12, not_p4, p16, p6, not_p3}
{p2, p3, p22, not_p5, p12, not_p22, not_p4, p16, p6, not_p3}
{p2, p3, not_p5, p12, not_p8, not_p22, not_p21, not_p4, p16, not_p25}
{p2, p3, not_p5, p12, not_p8, not_p22, not_p12, not_p4, p16, not_p25}
{p2, p3, p22, not_p5, p12, not_p8, not_p22, not_p4, p16, not_p25}
{p2, p3, not_p5, not_p19, p12, not_p22, not_p21, not_p4, p16, not_p25}
{p2, p3, not_p5, not_p19, p12, not_p22, not_p12, not_p4, p16, not_p25}
{p2, p3, p22, not_p5, not_p19, p12, not_p22, not_p4, p16, not_p25}
{p2, p3, not_p5, p12, not_p22, not_p21, not_p4, p16, p6, not_p25}
{p2, p3, not_p5, p12, not_p22, not_p12, not_p4, p16, p6, not_p25}
{p2, p3, p22, not_p5, p12, not_p22, not_p4, p16, p6, not_p25}
{p2, p3, not_p5, p12, not_p8, not_p22, not_p21, p9, not_p4, p16}
{p2, p3, not_p5, p12, not_p8, not_p22, not_p12, p9, not_p4, p16}
{p2, p3, p22, not_p5, p12, not_p8, not_p22, p9, not_p4, p16}
{p2, p3, not_p5, not_p19, p12, not_p22, not_p21, p9, not_p4, p16}
{p2, p3, not_p5, not_p19, p12, not_p22, not_p12, p9, not_p4, p16}
{p2, p3, p22, not_p5, not_p19, p12, not_p22, p9, not_p4, p16}
{p2, p3, not_p5, p12, not_p22, not_p21, p9, not_p4, p16, p6}
{p2, p3, not_p5, p12, not_p22, not_p12, p9, not_p4, p16, p6}
{p2, p3, p22, not_p5, p12, not_p22, p9, not_p4, p16, p6}
{p2, p25, p11, p22, not_p8, not_p22, not_p4, p5, p14}
{p2, p25, p11, not_p19, not_p8, not_p22, not_p21, not_p4, p5, p14}
{p2, p25, p11, not_p5, not_p8, not_p22, not_p21, not_p4, p5, p14}
{p2, p25, p11, not_p19, not_p8, not_p22, not_p12, not_p4, p5, p14}
{p2, p25, p11, not_p5, not_p8, not_p22, not_p12, not_p4, p5, p14}
{p2, p25, p11, not_p5, not_p8, not_p22, not_p21, not_p4, not_p3}
{p2, p25, p11, not_p5, not_p8, not_p22, not_p12, not_p4, not_p3}
{p2, p25, p11, p22, not_p5, not_p8, not_p22, not_p4, not_p3}
{p2, p25, p11, not_p5, not_p8, not_p22, not_p21, not_p4, not_p25}
{p2, p25, p11, not_p5, not_p8, not_p22, not_p12, not_p4, not_p25}
{p2, p25, p11, p22, not_p5, not_p8, not_p22, not_p4, not_p25}
{p2, p25, p11, not_p5, not_p8, not_p22, not_p21, not_p4, p21, p14}
{p2, p25, p11, not_p5, not_p8, not_p22, not_p12, not_p4, p21, p14}
{p2, p25, p11, p22, not_p5, not_p8, not_p22, not_p4, p21, p14}
{p2, p25, p11, not_p20, not_p5, not_p8, not_p22, not_p21, not_p4, p14}
{p2, p25, p11, not_p20, not_p5, not_p8, not_p22, not_p12, not_p4, p14}
{p2, p25, p11, not_p20, p22, not_p5, not_p8, not_p22, not_p4, p14}
{p2, p25, p11, not_p5, not_p8, not_p22, not_p21, p9, not_p4}
{p2, p25, p11, not_p5, not_p8, not_p22, not_p12, p9, not_p4}
{p2, p25, p11, p22, not_p5, not_p8, not_p22, p9, not_p4}
{p2, p25, p3, p22, not_p8, not_p22, not_p4, p5, p14, p16}
{p2, p25, p3, not_p19, not_p8, not_p22, not_p21, not_p4, p5, p14, p16}
{p2, p25, p3, not_p5, not_p8, not_p22, not_p21, not_p4, p5, p14, p16}
{p2, p25, p3, not_p19, not_p8, not_p22, not_p12, not_p4, p5, p14, p16}
{p2, p25, p3, not_p5, not_p8, not_p22, not_p12, not_p4, p5, p14, p16}
{p2, p25, p3, p22, not_p8, not_p22, p4, p5, p14}
{p2, p25, p3, not_p19, not_p8, not_p22, not_p21, p4, p5, p14}
{p2, p25, p3, not_p5, not_p8, not_p22, not_p21, p4, p5, p14}
{p2, p25, p3, not_p19, not_p8, not_p22, not_p12, p4, p5, p14}
{p2, p25, p3, not_p5, not_p8, not_p22, not_p12, p4, p5, p14}
{p2, p25, p3, not_p5, not_p8, not_p22, not_p21, not_p4, p16, not_p3}
{p2, p25, p3, not_p5, not_p8, not_p22, not_p12, not_p4, p16, not_p3}
{p2, p25, p3, p22, not_p5, not_p8, not_p22, not_p4, p16, not_p3}
{p2, p25, p3, not_p5, not_p8, not_p22, not_p21, not_p4, p16, not_p25}
{p2, p25, p3, not_p5, not_p8, not_p22, not_p12, not_p4, p16, not_p25}
{p2, p25, p3, p22, not_p5, not_p8, not_p22, not_p4, p16, not_p25}
{p2, p25, p3, not_p5, not_p8, not_p22, not_p21, not_p4, p21, p14, p16}
{p2, p25, p3, not_p5, not_p8, not_p22, not_p12, not_p4, p21, p14, p16}
{p2, p25, p3, p22, not_p5, not_p8, not_p22, not_p4, p21, p14, p16}
{p2, p25, not_p20, p3, not_p5, not_p8, not_p22, not_p21, not_p4, p14, p16}
{p2, p25, not_p20, p3, not_p5, not_p8, not_p22, not_p12, not_p4, p14, p16}
{p2, p25, not_p20, p3, p22, not_p5, not_p8, not_p22, not_p4, p14, p16}
{p2, p25, p3, not_p5, not_p8, not_p22, not_p21, p9, not_p4, p16}
{p2, p25, p3, not_p5, not_p8, not_p22, not_p12, p9, not_p4, p16}
{p2, p25, p3, p22, not_p5, not_p8, not_p22, p9, not_p4, p16}
{p2, p25, p3, not_p5, not_p8, not_p22, not_p21, p4, not_p3}
{p2, p25, p3, not_p5, not_p8, not_p22, not_p12, p4, not_p3}
{p2, p25, p3, p22, not_p5, not_p8, not_p22, p4, not_p3}
{p2, p25, p3, not_p5, not_p8, not_p22, not_p21, p4, not_p25}
{p2, p25, p3, not_p5, not_p8, not_p22, not_p12, p4, not_p25}
{p2, p25, p3, p22, not_p5, not_p8, not_p22, p4, not_p25}
{p2, p25, p3, not_p5, not_p8, not_p22, not_p21, p4, p21, p14}
{p2, p25, p3, not_p5, not_p8, not_p22, not_p12, p4, p21, p14}
{p2, p25, p3, p22, not_p5, not_p8, not_p22, p4, p21, p14}
{p2, p25, not_p20, p3, not_p5, not_p8, not_p22, not_p21, p4, p14}
{p2, p25, not_p20, p3, not_p5, not_p8, not_p22, not_p12, p4, p14}
{p2, p25, not_p20, p3, p22, not_p5, not_p8, not_p22, p4, p14}
{p2, p25, p3, not_p5, not_p8, not_p22, not_p21, p9, p4}
{p2, p25, p3, not_p5, not_p8, not_p22, not_p12, p9, p4}
{p2, p25, p3, p22, not_p5, not_p8, not_p22, p9, p4}
"""
| Yarrick13/hwasp | tests/asp/gringo/modelchecker.046.test.py | Python | apache-2.0 | 186,113 |
"""Support for Tasmota fans."""
from hatasmota import const as tasmota_const
from homeassistant.components import fan
from homeassistant.components.fan import FanEntity
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.util.percentage import (
ordered_list_item_to_percentage,
percentage_to_ordered_list_item,
)
from .const import DATA_REMOVE_DISCOVER_COMPONENT
from .discovery import TASMOTA_DISCOVERY_ENTITY_NEW
from .mixins import TasmotaAvailability, TasmotaDiscoveryUpdate
ORDERED_NAMED_FAN_SPEEDS = [
tasmota_const.FAN_SPEED_LOW,
tasmota_const.FAN_SPEED_MEDIUM,
tasmota_const.FAN_SPEED_HIGH,
] # off is not included
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Tasmota fan dynamically through discovery."""
@callback
def async_discover(tasmota_entity, discovery_hash):
"""Discover and add a Tasmota fan."""
async_add_entities(
[TasmotaFan(tasmota_entity=tasmota_entity, discovery_hash=discovery_hash)]
)
hass.data[
DATA_REMOVE_DISCOVER_COMPONENT.format(fan.DOMAIN)
] = async_dispatcher_connect(
hass,
TASMOTA_DISCOVERY_ENTITY_NEW.format(fan.DOMAIN),
async_discover,
)
class TasmotaFan(
TasmotaAvailability,
TasmotaDiscoveryUpdate,
FanEntity,
):
"""Representation of a Tasmota fan."""
def __init__(self, **kwds):
"""Initialize the Tasmota fan."""
self._state = None
super().__init__(
**kwds,
)
@property
def speed_count(self) -> int:
"""Return the number of speeds the fan supports."""
return len(ORDERED_NAMED_FAN_SPEEDS)
@property
def percentage(self):
"""Return the current speed percentage."""
if self._state is None:
return None
if self._state == 0:
return 0
return ordered_list_item_to_percentage(ORDERED_NAMED_FAN_SPEEDS, self._state)
@property
def supported_features(self):
"""Flag supported features."""
return fan.SUPPORT_SET_SPEED
async def async_set_percentage(self, percentage):
"""Set the speed of the fan."""
if percentage == 0:
await self.async_turn_off()
else:
tasmota_speed = percentage_to_ordered_list_item(
ORDERED_NAMED_FAN_SPEEDS, percentage
)
self._tasmota_entity.set_speed(tasmota_speed)
async def async_turn_on(
self, speed=None, percentage=None, preset_mode=None, **kwargs
):
"""Turn the fan on."""
# Tasmota does not support turning a fan on with implicit speed
await self.async_set_percentage(
percentage
or ordered_list_item_to_percentage(
ORDERED_NAMED_FAN_SPEEDS, tasmota_const.FAN_SPEED_MEDIUM
)
)
async def async_turn_off(self, **kwargs):
"""Turn the fan off."""
self._tasmota_entity.set_speed(tasmota_const.FAN_SPEED_OFF)
| kennedyshead/home-assistant | homeassistant/components/tasmota/fan.py | Python | apache-2.0 | 3,093 |
# -*- coding: utf-8 -*-
import time
import urllib
from urlparse import parse_qs
from oauth_provider.tests.auth import BaseOAuthTestCase, METHOD_URL_QUERY, METHOD_AUTHORIZATION_HEADER, METHOD_POST_REQUEST_BODY
class XAuthTestCase(BaseOAuthTestCase):
def setUp(self):
super(XAuthTestCase, self).setUp()
self.consumer.xauth_allowed = True
self.consumer.save()
def _accesss_token(self, method=METHOD_URL_QUERY):
parameters = {
"oauth_consumer_key": self.CONSUMER_KEY,
"oauth_consumer_secret": self.CONSUMER_SECRET,
"oauth_nonce": "12981230918711",
'oauth_signature_method': 'PLAINTEXT',
'oauth_signature': "%s&%s" % (self.CONSUMER_SECRET, ""),
'oauth_timestamp': str(int(time.time())),
'oauth_version': '1.0',
'x_auth_mode': "client_auth",
'x_auth_password': self.password,
'x_auth_username': self.username,
}
if method==METHOD_AUTHORIZATION_HEADER:
header = self._get_http_authorization_header(parameters)
response = self.c.get("/oauth/access_token/", HTTP_AUTHORIZATION=header)
elif method==METHOD_URL_QUERY:
response = self.c.get("/oauth/access_token/", parameters)
elif method==METHOD_POST_REQUEST_BODY:
body = urllib.urlencode(parameters)
response = self.c.post("/oauth/access_token/", body, content_type="application/x-www-form-urlencoded")
else:
raise NotImplementedError
self.assertEqual(response.status_code, 200)
response_params = parse_qs(response.content)
self.ACCESS_TOKEN_KEY = response_params['oauth_token'][0]
self.ACCESS_TOKEN_SECRET = response_params['oauth_token_secret'][0]
def test_xauth(self):
self._access_token(x_auth_mode="client_auth",
x_auth_password=self.password,
x_auth_username=self.username)
assert self.ACCESS_TOKEN_KEY
assert self.ACCESS_TOKEN_SECRET
def test_xauth_using_email(self):
self._access_token(x_auth_mode="client_auth",
x_auth_password=self.password,
x_auth_username=self.email)
assert self.ACCESS_TOKEN_KEY
assert self.ACCESS_TOKEN_SECRET | frasern/ADL_LRS | oauth_provider/tests/xauth.py | Python | apache-2.0 | 2,360 |
# Copyright ClusterHQ Inc. See LICENSE file for details.
"""
Command to start up the Docker plugin.
"""
from os import umask
from stat import S_IRUSR, S_IWUSR, S_IXUSR
from twisted.python.usage import Options
from twisted.internet.endpoints import serverFromString
from twisted.application.internet import StreamServerEndpointService
from twisted.web.server import Site
from twisted.python.filepath import FilePath
from twisted.internet.address import UNIXAddress
from ..common.script import (
flocker_standard_options, FlockerScriptRunner, main_for_service)
from ._api import VolumePlugin
from ..node.script import get_configuration
from ..apiclient import FlockerClient
from ..control.httpapi import REST_API_PORT
PLUGIN_PATH = FilePath("/run/docker/plugins/flocker/flocker.sock")
@flocker_standard_options
class DockerPluginOptions(Options):
"""
Command-line options for the Docker plugin.
"""
optParameters = [
["rest-api-port", "p", REST_API_PORT,
"Port to connect to for control service REST API."],
["agent-config", "c", "/etc/flocker/agent.yml",
"The configuration file for the local agent."],
]
def postOptions(self):
self['agent-config'] = FilePath(self['agent-config'])
class DockerPluginScript(object):
"""
Start the Docker plugin.
"""
def _create_listening_directory(self, directory_path):
"""
Create the parent directory for the Unix socket if it doesn't exist.
:param FilePath directory_path: The directory to create.
"""
original_umask = umask(0)
try:
if not directory_path.exists():
directory_path.makedirs()
directory_path.chmod(S_IRUSR | S_IWUSR | S_IXUSR)
finally:
umask(original_umask)
def main(self, reactor, options):
# Many places in both twisted.web and Klein are unhappy with
# listening on Unix socket, e.g.
# https://twistedmatrix.com/trac/ticket/5406 "fix" that by
# pretending we have a port number. Yes, I feel guilty.
UNIXAddress.port = 0
# We can use /etc/flocker/agent.yml and /etc/flocker/node.crt to load
# some information we need:
agent_config = get_configuration(options)
control_host = agent_config['control-service']['hostname']
node_id = agent_config['node-credential'].uuid
certificates_path = options["agent-config"].parent()
control_port = options["rest-api-port"]
flocker_client = FlockerClient(reactor, control_host, control_port,
certificates_path.child(b"cluster.crt"),
certificates_path.child(b"plugin.crt"),
certificates_path.child(b"plugin.key"))
self._create_listening_directory(PLUGIN_PATH.parent())
endpoint = serverFromString(
reactor, "unix:{}:mode=600".format(PLUGIN_PATH.path))
service = StreamServerEndpointService(endpoint, Site(
VolumePlugin(reactor, flocker_client, node_id).app.resource()))
return main_for_service(reactor, service)
def docker_plugin_main():
"""
Script entry point that runs the Docker plugin.
"""
return FlockerScriptRunner(script=DockerPluginScript(),
options=DockerPluginOptions()).main()
| hackday-profilers/flocker | flocker/dockerplugin/_script.py | Python | apache-2.0 | 3,412 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to warm-start TF.Learn Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
from tensorflow.python.framework import ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_ops
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import saver
from tensorflow.python.util.tf_export import tf_export
@tf_export("train.VocabInfo", "estimator.VocabInfo")
class VocabInfo(
collections.namedtuple("VocabInfo", [
"new_vocab",
"new_vocab_size",
"num_oov_buckets",
"old_vocab",
"old_vocab_size",
"backup_initializer",
])):
"""Vocabulary information for warm-starting.
See @{tf.estimator.WarmStartSettings$WarmStartSettings} for examples of using
VocabInfo to warm-start.
Attributes:
new_vocab: [Required] A path to the new vocabulary file (used with the
model to be trained).
new_vocab_size: [Required] An integer indicating how many entries of the new
vocabulary will used in training.
num_oov_buckets: [Required] An integer indicating how many OOV buckets are
associated with the vocabulary.
old_vocab: [Required] A path to the old vocabulary file (used with the
checkpoint to be warm-started from).
old_vocab_size: [Optional] An integer indicating how many entries of the old
vocabulary were used in the creation of the checkpoint. If not provided,
the entire old vocabulary will be used.
backup_initializer: [Optional] A variable initializer used for variables
corresponding to new vocabulary entries and OOV. If not provided, these
entries will be zero-initialized.
"""
def __new__(cls,
new_vocab,
new_vocab_size,
num_oov_buckets,
old_vocab,
old_vocab_size=-1,
backup_initializer=None):
return super(VocabInfo, cls).__new__(
cls,
new_vocab,
new_vocab_size,
num_oov_buckets,
old_vocab,
old_vocab_size,
backup_initializer,
)
def _is_variable(x):
return (isinstance(x, variables_lib.Variable) or
isinstance(x, resource_variable_ops.ResourceVariable))
def _infer_var_name(var):
"""Returns name of the `var`.
Args:
var: A list. The list can contain either of the following:
(i) A single `Variable`
(ii) A single `ResourceVariable`
(iii) Multiple `Variable` objects which must be slices of the same larger
variable.
(iv) A single `PartitionedVariable`
Returns:
Name of the `var`
"""
name_to_var_dict = saver.BaseSaverBuilder.OpListToDict(var)
if len(name_to_var_dict) > 1:
raise TypeError("`var` = %s passed as arg violates the constraints. "
"name_to_var_dict = %s" % (var, name_to_var_dict))
return list(name_to_var_dict.keys())[0]
def _warm_start_var(var, prev_ckpt, prev_tensor_name=None):
"""Warm-starts given variable from `prev_tensor_name` tensor in `prev_ckpt`.
Args:
var: Current graph's variable that needs to be warm-started (initialized).
Can be either of the following:
(i) `Variable`
(ii) `ResourceVariable`
(iii) list of `Variable`: The list must contain slices of the same larger
variable.
(iv) `PartitionedVariable`
prev_ckpt: A string specifying the directory with checkpoint file(s) or path
to checkpoint. The given checkpoint must have tensor with name
`prev_tensor_name` (if not None) or tensor with name same as given `var`.
prev_tensor_name: Name of the tensor to lookup in provided `prev_ckpt`. If
None, we lookup tensor with same name as given `var`.
"""
if _is_variable(var):
current_var_name = _infer_var_name([var])
elif isinstance(var, list) and all(_is_variable(v) for v in var):
current_var_name = _infer_var_name(var)
elif isinstance(var, variables_lib.PartitionedVariable):
current_var_name = _infer_var_name([var])
var = var._get_variable_list() # pylint: disable=protected-access
else:
raise TypeError(
"var MUST be one of the following: a Variable, list of Variable or "
"PartitionedVariable, but is {}".format(type(var)))
if not prev_tensor_name:
# Assume tensor name remains the same.
prev_tensor_name = current_var_name
checkpoint_utils.init_from_checkpoint(prev_ckpt, {prev_tensor_name: var})
# pylint: disable=protected-access
# Accesses protected members of tf.Variable to reset the variable's internal
# state.
def _warm_start_var_with_vocab(var,
current_vocab_path,
current_vocab_size,
prev_ckpt,
prev_vocab_path,
previous_vocab_size=-1,
current_oov_buckets=0,
prev_tensor_name=None,
initializer=None):
"""Warm-starts given variable from `prev_tensor_name` tensor in `prev_ckpt`.
Use this method when the `var` is backed by vocabulary. This method stitches
the given `var` such that values corresponding to individual features in the
vocabulary remain consistent irrespective of changing order of the features
between old and new vocabularies.
Args:
var: Current graph's variable that needs to be warm-started (initialized).
Can be either of the following:
(i) `Variable`
(ii) `ResourceVariable`
(iii) list of `Variable`: The list must contain slices of the same larger
variable.
(iv) `PartitionedVariable`
current_vocab_path: Path to the vocab file used for the given `var`.
current_vocab_size: An `int` specifying the number of entries in the current
vocab.
prev_ckpt: A string specifying the directory with checkpoint file(s) or path
to checkpoint. The given checkpoint must have tensor with name
`prev_tensor_name` (if not None) or tensor with name same as given `var`.
prev_vocab_path: Path to the vocab file used for the tensor in `prev_ckpt`.
previous_vocab_size: If provided, will constrain previous vocab to the first
`previous_vocab_size` entries. -1 means use the entire previous vocab.
current_oov_buckets: An `int` specifying the number of out-of-vocabulary
buckets used for given `var`.
prev_tensor_name: Name of the tensor to lookup in provided `prev_ckpt`. If
None, we lookup tensor with same name as given `var`.
initializer: Variable initializer to be used for missing entries. If None,
missing entries will be zero-initialized.
Raises:
ValueError: If required args are not provided.
"""
if not (current_vocab_path and current_vocab_size and prev_ckpt and
prev_vocab_path):
raise ValueError("Invalid args: Must provide all of [current_vocab_path, "
"current_vocab_size, prev_ckpt, prev_vocab_path}.")
if _is_variable(var):
var = [var]
elif isinstance(var, list) and all(_is_variable(v) for v in var):
var = var
elif isinstance(var, variables_lib.PartitionedVariable):
var = var._get_variable_list()
else:
raise TypeError(
"var MUST be one of the following: a Variable, list of Variable or "
"PartitionedVariable, but is {}".format(type(var)))
if not prev_tensor_name:
# Assume tensor name remains the same.
prev_tensor_name = _infer_var_name(var)
for v in var:
v_shape = v.get_shape().as_list()
slice_info = v._get_save_slice_info()
partition_info = None
if slice_info:
partition_info = variable_scope._PartitionInfo(
full_shape=slice_info.full_shape,
var_offset=slice_info.var_offset)
# TODO(eddz): Support cases where class vocabularies need remapping too.
init = checkpoint_ops._load_and_remap_matrix_initializer(
ckpt_path=checkpoint_utils._get_checkpoint_filename(prev_ckpt),
old_tensor_name=prev_tensor_name,
new_row_vocab_size=current_vocab_size,
new_col_vocab_size=v_shape[1],
old_row_vocab_size=previous_vocab_size,
old_row_vocab_file=prev_vocab_path,
new_row_vocab_file=current_vocab_path,
old_col_vocab_file=None,
new_col_vocab_file=None,
num_row_oov_buckets=current_oov_buckets,
num_col_oov_buckets=0,
initializer=initializer)
new_init_val = ops.convert_to_tensor(
init(shape=v_shape, partition_info=partition_info))
v._initializer_op = state_ops.assign(v, new_init_val)
# pylint: enable=protected-access
@tf_export("train.warm_start")
def warm_start(ckpt_to_initialize_from,
vars_to_warm_start=".*",
var_name_to_vocab_info=None,
var_name_to_prev_var_name=None):
"""Warm-starts a model using the given settings.
If you are using a tf.estimator.Estimator, this will automatically be called
during training.
Args:
ckpt_to_initialize_from: [Required] A string specifying the directory with
checkpoint file(s) or path to checkpoint from which to warm-start the
model parameters.
vars_to_warm_start: [Optional] A regular expression that captures which
variables to warm-start (see tf.get_collection). Defaults to `'.*'`,
which warm-starts all variables. If `None` is explicitly given, only
variables specified in `var_name_to_vocab_info` will be warm-started.
var_name_to_vocab_info: [Optional] Dict of variable names (strings) to
VocabInfo. The variable names should be "full" variables, not the names
of the partitions. If not explicitly provided, the variable is assumed to
have no vocabulary.
var_name_to_prev_var_name: [Optional] Dict of variable names (strings) to
name of the previously-trained variable in `ckpt_to_initialize_from`. If
not explicitly provided, the name of the variable is assumed to be same
between previous checkpoint and current model.
Raises:
ValueError: If the WarmStartSettings contains prev_var_name or VocabInfo
configuration for variable names that are not used. This is to ensure
a stronger check for variable configuration than relying on users to
examine the logs.
"""
if var_name_to_vocab_info is None:
var_name_to_vocab_info = {}
if var_name_to_prev_var_name is None:
var_name_to_prev_var_name = {}
logging.info("Warm-starting from: %s", (ckpt_to_initialize_from,))
# We have to deal with partitioned variables, since get_collection flattens
# out the list.
grouped_variables = {}
# Both vars_to_warm_start = '.*' and
# vars_to_warm_start = None will match everything here.
for v in ops.get_collection(
# TODO(eddz): Allow for different collections here (to support
# warm-starting accumulators).
ops.GraphKeys.TRAINABLE_VARIABLES,
scope=vars_to_warm_start):
if not isinstance(v, list):
var_name = _infer_var_name([v])
else:
var_name = _infer_var_name(v)
grouped_variables.setdefault(var_name, []).append(v)
# Keep track of which var_names in var_name_to_prev_var_name and
# var_name_to_vocab_info have been used. Err on the safer side by throwing an
# exception if any are unused by the end of the loop. It is easy to misname
# a variable during this configuration, in which case without this check, we
# would fail to warm-start silently.
prev_var_name_used = set()
vocab_info_used = set()
for var_name, variable in six.iteritems(grouped_variables):
prev_var_name = var_name_to_prev_var_name.get(var_name)
if prev_var_name:
prev_var_name_used.add(var_name)
vocab_info = var_name_to_vocab_info.get(var_name)
if vocab_info:
vocab_info_used.add(var_name)
logging.info(
"Warm-starting variable: {}; current_vocab: {} current_vocab_size: {}"
" prev_vocab: {} prev_vocab_size: {} current_oov: {} prev_tensor: {}"
" initializer: {}".format(
var_name,
vocab_info.new_vocab,
vocab_info.new_vocab_size,
vocab_info.old_vocab,
(vocab_info.old_vocab_size if vocab_info.old_vocab_size > 0
else "All"),
vocab_info.num_oov_buckets,
prev_var_name or "Unchanged",
vocab_info.backup_initializer or "zero-initialized"))
_warm_start_var_with_vocab(
variable,
current_vocab_path=vocab_info.new_vocab,
current_vocab_size=vocab_info.new_vocab_size,
prev_ckpt=ckpt_to_initialize_from,
prev_vocab_path=vocab_info.old_vocab,
previous_vocab_size=vocab_info.old_vocab_size,
current_oov_buckets=vocab_info.num_oov_buckets,
prev_tensor_name=prev_var_name,
initializer=vocab_info.backup_initializer)
else:
# For the special value of vars_to_warm_start = None,
# we only warm-start variables with explicitly specified vocabularies.
if vars_to_warm_start:
logging.info("Warm-starting variable: {}; prev_var_name: {}".format(
var_name, prev_var_name or "Unchanged"))
# Because we use a default empty list in grouped_variables, single
# unpartitioned variables will be lists here, which we rectify in order
# for init_from_checkpoint logic to work correctly.
if len(variable) == 1:
variable = variable[0]
_warm_start_var(variable, ckpt_to_initialize_from, prev_var_name)
prev_var_name_not_used = set(
var_name_to_prev_var_name.keys()) - prev_var_name_used
vocab_info_not_used = set(var_name_to_vocab_info.keys()) - vocab_info_used
if prev_var_name_not_used:
raise ValueError(
"You provided the following variables in "
"var_name_to_prev_var_name that were not used: "
"{0}. Perhaps you misspelled them? Here is the list of viable "
"variable names: {1}".format(prev_var_name_not_used,
grouped_variables.keys()))
if vocab_info_not_used:
raise ValueError(
"You provided the following variables in "
"var_name_to_vocab_info that were not used: {0}. "
" Perhaps you misspelled them? Here is the list of viable variable "
"names: {1}".format(vocab_info_not_used, grouped_variables.keys()))
| allenlavoie/tensorflow | tensorflow/python/training/warm_starting_util.py | Python | apache-2.0 | 15,397 |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import torch
import torch.optim
from . import FairseqOptimizer, register_optimizer
from apex.contrib.optimizers.fused_adam import FusedAdam
from apex.contrib.optimizers.distributed_fused_adam import DistributedFusedAdam
from apex.contrib.optimizers.distributed_fused_adam_v2 import DistributedFusedAdamV2
from apex.contrib.optimizers.distributed_fused_adam_v3 import DistributedFusedAdamV3
@register_optimizer('adam')
class FairseqAdam(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args, params)
if self.args.distributed_weight_update == 2:
dwu_args = self.distributed_weight_update_config
print("DistributedFusedAdam",dwu_args)
self._optimizer = DistributedFusedAdam(params, **dwu_args, **self.optimizer_config)
elif self.args.distributed_weight_update == 3:
dwu_args = self.distributed_weight_update_config
print("DistributedFusedAdamV2",dwu_args)
self._optimizer = DistributedFusedAdamV2(params, **dwu_args, **self.optimizer_config)
elif self.args.distributed_weight_update == 4:
dwu_args = self.distributed_weight_update_config
print("DistributedFusedAdamV3",dwu_args)
self._optimizer = DistributedFusedAdamV3(params, **dwu_args, **self.optimizer_config)
else:
assert (self.args.distributed_weight_update == 0), "Vanilla optimizer not supported anymore"
self._optimizer = FusedAdam(params, **self.optimizer_config)
@staticmethod
def add_args(parser):
"""Add optimizer-specific arguments to the parser."""
parser.add_argument('--adam-betas', default='(0.9, 0.999)', metavar='B',
help='betas for Adam optimizer')
parser.add_argument('--adam-eps', type=float, default=1e-8, metavar='D',
help='epsilon for Adam optimizer')
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
'lr': self.args.lr[0],
'betas': eval(self.args.adam_betas),
'eps': self.args.adam_eps,
'weight_decay': self.args.weight_decay,
}
@property
def distributed_weight_update_config(self):
"""
Return a kwarg dictionary that provides arguments for the distributed
weight update feature.
"""
return {
'distributed_weight_update': self.args.distributed_weight_update,
'dwu_group_size': self.args.dwu_group_size,
'dwu_num_blocks': self.args.dwu_num_blocks,
'dwu_num_chunks': self.args.dwu_num_chunks,
'dwu_num_rs_pg': self.args.dwu_num_rs_pg,
'dwu_num_ar_pg': self.args.dwu_num_ar_pg,
'dwu_num_ag_pg': self.args.dwu_num_ag_pg,
'overlap_reductions': self.args.dwu_overlap_reductions,
'full_pipeline': self.args.dwu_full_pipeline,
'compute_L2_grad_norm': self.args.dwu_compute_L2_grad_norm,
'flat_mt': self.args.dwu_flat_mt,
'e5m2_allgather': self.args.dwu_e5m2_allgather,
'do_not_flatten_model': self.args.dwu_do_not_flatten_model,
}
class Adam(torch.optim.Optimizer):
"""Implements Adam algorithm.
This implementation is modified from torch.optim.Adam based on:
`Fixed Weight Decay Regularization in Adam`
(see https://arxiv.org/abs/1711.05101)
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
if group['weight_decay'] != 0:
p.data.add_(-group['weight_decay'] * group['lr'], p.data)
p.data.addcdiv_(-step_size, exp_avg, denom)
return loss
| mlperf/training_results_v0.7 | NVIDIA/benchmarks/transformer/implementations/pytorch/fairseq/optim/adam.py | Python | apache-2.0 | 7,859 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db import exception as db_exc
from oslo_log import log as logging
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import base
from cinder.objects import fields as c_fields
from oslo_versionedobjects import fields
LOG = logging.getLogger(__name__)
@base.CinderObjectRegistry.register
class QualityOfServiceSpecs(base.CinderPersistentObject,
base.CinderObject,
base.CinderObjectDictCompat,
base.CinderComparableObject):
# Version
# 1.0: Initial version
VERSION = "1.0"
OPTIONAL_FIELDS = ['volume_types']
fields = {
'id': fields.UUIDField(),
'name': fields.StringField(),
'consumer': c_fields.QoSConsumerField(
default=c_fields.QoSConsumerValues.BACK_END),
'specs': fields.DictOfNullableStringsField(nullable=True),
'volume_types': fields.ObjectField('VolumeTypeList', nullable=True),
}
def __init__(self, *args, **kwargs):
super(QualityOfServiceSpecs, self).__init__(*args, **kwargs)
self._init_specs = {}
def __setattr__(self, name, value):
try:
super(QualityOfServiceSpecs, self).__setattr__(name, value)
except ValueError:
if name == 'consumer':
# Give more descriptive error message for invalid 'consumer'
msg = (_("Valid consumer of QoS specs are: %s") %
c_fields.QoSConsumerField())
raise exception.InvalidQoSSpecs(reason=msg)
else:
raise
def obj_reset_changes(self, fields=None, recursive=False):
super(QualityOfServiceSpecs, self).obj_reset_changes(fields, recursive)
if fields is None or 'specs' in fields:
self._init_specs = self.specs.copy() if self.specs else {}
def obj_what_changed(self):
changes = super(QualityOfServiceSpecs, self).obj_what_changed()
# Do comparison of what's in the dict vs. reference to the specs object
if self.obj_attr_is_set('id'):
if self.specs != self._init_specs:
changes.add('specs')
else:
# If both dicts are equal don't consider anything gets changed
if 'specs' in changes:
changes.remove('specs')
return changes
def obj_get_changes(self):
changes = super(QualityOfServiceSpecs, self).obj_get_changes()
if 'specs' in changes:
# For specs, we only want what has changed in the dictionary,
# because otherwise we'll individually overwrite the DB value for
# every key in 'specs' even if it hasn't changed
specs_changes = {}
for key, val in self.specs.items():
if val != self._init_specs.get(key):
specs_changes[key] = val
changes['specs'] = specs_changes
specs_keys_removed = (set(self._init_specs.keys()) -
set(self.specs.keys()))
if specs_keys_removed:
# Special key notifying which specs keys have been deleted
changes['specs_keys_removed'] = specs_keys_removed
return changes
def obj_load_attr(self, attrname):
if attrname not in self.OPTIONAL_FIELDS:
raise exception.ObjectActionError(
action='obj_load_attr',
reason=_('attribute %s not lazy-loadable') % attrname)
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
if attrname == 'volume_types':
self.volume_types = objects.VolumeTypeList.get_all_types_for_qos(
self._context, self.id)
@classmethod
def _from_db_object(cls, context, qos_spec, db_qos_spec,
expected_attrs=None):
if expected_attrs is None:
expected_attrs = []
for name, field in qos_spec.fields.items():
if name not in cls.OPTIONAL_FIELDS:
value = db_qos_spec.get(name)
# 'specs' could be null if only a consumer is given, so make
# it an empty dict instead of None
if not value and isinstance(field, fields.DictOfStringsField):
value = {}
setattr(qos_spec, name, value)
if 'volume_types' in expected_attrs:
volume_types = objects.VolumeTypeList.get_all_types_for_qos(
context, db_qos_spec['id'])
qos_spec.volume_types = volume_types
qos_spec._context = context
qos_spec.obj_reset_changes()
return qos_spec
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.cinder_obj_get_changes()
try:
create_ret = db.qos_specs_create(self._context, updates)
except db_exc.DBDataError:
msg = _('Error writing field to database')
LOG.exception(msg)
raise exception.Invalid(msg)
except db_exc.DBError:
LOG.exception('DB error occurred when creating QoS specs.')
raise exception.QoSSpecsCreateFailed(name=self.name,
qos_specs=self.specs)
# Save ID with the object
updates['id'] = create_ret['id']
self._from_db_object(self._context, self, updates)
def save(self):
updates = self.cinder_obj_get_changes()
if updates:
if 'specs_keys_removed' in updates.keys():
for specs_key_to_remove in updates['specs_keys_removed']:
db.qos_specs_item_delete(
self._context, self.id, specs_key_to_remove)
del updates['specs_keys_removed']
db.qos_specs_update(self._context, self.id, updates)
self.obj_reset_changes()
def destroy(self, force=False):
"""Deletes the QoS spec.
:param force: when force is True, all volume_type mappings for this QoS
are deleted. When force is False and volume_type
mappings still exist, a QoSSpecsInUse exception is thrown
"""
if self.volume_types:
if not force:
raise exception.QoSSpecsInUse(specs_id=self.id)
# remove all association
db.qos_specs_disassociate_all(self._context, self.id)
updated_values = db.qos_specs_delete(self._context, self.id)
self.update(updated_values)
self.obj_reset_changes(updated_values.keys())
@base.CinderObjectRegistry.register
class QualityOfServiceSpecsList(base.ObjectListBase, base.CinderObject):
VERSION = '1.0'
fields = {
'objects': fields.ListOfObjectsField('QualityOfServiceSpecs'),
}
@classmethod
def get_all(cls, context, *args, **kwargs):
specs = db.qos_specs_get_all(context, *args, **kwargs)
return base.obj_make_list(context, cls(context),
objects.QualityOfServiceSpecs, specs)
| phenoxim/cinder | cinder/objects/qos_specs.py | Python | apache-2.0 | 7,943 |
#!/usr/bin/python
# Copyright 2002 Dave Abrahams
# Copyright 2002, 2003, 2004 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
from BoostBuild import Tester
import os
t = Tester("--build-system=project-test1", boost_build_path='', pass_toolset=0)
# This test does no modifications, so run in in the invocation dir
os.chdir(t.original_workdir)
expected_output1="""Project Roots:
"""
expected_output2="""'%(root-dir-prefix)sdir2':
Module for project-root is 'project-root<%(root-dir-prefix)sdir2>'
Projects:
'/cool-library':
* Parent project: (none)
* Requirements: <include>/home/ghost/build/boost-cvs
* Default build:
* Source location: %(root-dir-prefix)sdir2
* Projects to build:
"""
expected_output3="""'%(root-dir)s':
Module for project-root is 'project-root<%(root-dir)s>'
Projects:
'/boost-build-test-project-1':
* Parent project: (none)
* Requirements: <include>/home/ghost/local/include <threading>multi
* Default build:
* Source location: %(root-dir)s
* Projects to build: dir dir2
'/boost-build-test-project-1/dir':
* Parent project: %(root-dir)s
* Requirements: <include>/home/ghost/local/include <threading>multi
* Default build: <variant>release
* Source location: %(root-dir-prefix)sdir/src
* Projects to build:
"""
# Test that correct project structure is created when jam is invoked
# outside of the source tree.
expected = (expected_output1 + expected_output2 + expected_output3) % \
{"root-dir": "project-test1",
"root-dir-prefix": "project-test1/" }
t.run_build_system(stdout=expected)
# Test that correct project structure is created when jam is invoked
# at the top of the source tree.
expected = (expected_output1 + expected_output3 + expected_output2) % \
{"root-dir": ".",
"root-dir-prefix": "" }
os.chdir("project-test1")
t.run_build_system(stdout=expected)
t.cleanup()
| cesarpazguzman/The-Eternal-Sorrow | dependencies/luabind/boost-build/test/project_test1.py | Python | apache-2.0 | 1,956 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import cv2
import torch
from torchvision import transforms as T
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.structures.image_list import to_image_list
from maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker
from maskrcnn_benchmark import layers as L
from maskrcnn_benchmark.utils import cv2_util
class COCODemo(object):
# COCO categories for pretty print
CATEGORIES = [
"__background",
"person",
"bicycle",
"car",
"motorcycle",
"airplane",
"bus",
"train",
"truck",
"boat",
"traffic light",
"fire hydrant",
"stop sign",
"parking meter",
"bench",
"bird",
"cat",
"dog",
"horse",
"sheep",
"cow",
"elephant",
"bear",
"zebra",
"giraffe",
"backpack",
"umbrella",
"handbag",
"tie",
"suitcase",
"frisbee",
"skis",
"snowboard",
"sports ball",
"kite",
"baseball bat",
"baseball glove",
"skateboard",
"surfboard",
"tennis racket",
"bottle",
"wine glass",
"cup",
"fork",
"knife",
"spoon",
"bowl",
"banana",
"apple",
"sandwich",
"orange",
"broccoli",
"carrot",
"hot dog",
"pizza",
"donut",
"cake",
"chair",
"couch",
"potted plant",
"bed",
"dining table",
"toilet",
"tv",
"laptop",
"mouse",
"remote",
"keyboard",
"cell phone",
"microwave",
"oven",
"toaster",
"sink",
"refrigerator",
"book",
"clock",
"vase",
"scissors",
"teddy bear",
"hair drier",
"toothbrush",
]
def __init__(
self,
cfg,
confidence_threshold=0.7,
show_mask_heatmaps=False,
masks_per_dim=2,
min_image_size=224,
):
self.cfg = cfg.clone()
self.model = build_detection_model(cfg)
self.model.eval()
self.device = torch.device(cfg.MODEL.DEVICE)
self.model.to(self.device)
self.min_image_size = min_image_size
save_dir = cfg.OUTPUT_DIR
checkpointer = DetectronCheckpointer(cfg, self.model, save_dir=save_dir)
_ = checkpointer.load(cfg.MODEL.WEIGHT)
self.transforms = self.build_transform()
mask_threshold = -1 if show_mask_heatmaps else 0.5
self.masker = Masker(threshold=mask_threshold, padding=1)
# used to make colors for each class
self.palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])
self.cpu_device = torch.device("cpu")
self.confidence_threshold = confidence_threshold
self.show_mask_heatmaps = show_mask_heatmaps
self.masks_per_dim = masks_per_dim
def build_transform(self):
"""
Creates a basic transformation that was used to train the models
"""
cfg = self.cfg
# we are loading images with OpenCV, so we don't need to convert them
# to BGR, they are already! So all we need to do is to normalize
# by 255 if we want to convert to BGR255 format, or flip the channels
# if we want it to be in RGB in [0-1] range.
if cfg.INPUT.TO_BGR255:
to_bgr_transform = T.Lambda(lambda x: x * 255)
else:
to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])
normalize_transform = T.Normalize(
mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD
)
transform = T.Compose(
[
T.ToPILImage(),
T.Resize(self.min_image_size),
T.ToTensor(),
to_bgr_transform,
normalize_transform,
]
)
return transform
def run_on_opencv_image(self, image):
"""
Arguments:
image (np.ndarray): an image as returned by OpenCV
Returns:
prediction (BoxList): the detected objects. Additional information
of the detection properties can be found in the fields of
the BoxList via `prediction.fields()`
"""
predictions = self.compute_prediction(image)
top_predictions = self.select_top_predictions(predictions)
result = image.copy()
if self.show_mask_heatmaps:
return self.create_mask_montage(result, top_predictions)
result = self.overlay_boxes(result, top_predictions)
if self.cfg.MODEL.MASK_ON:
result = self.overlay_mask(result, top_predictions)
if self.cfg.MODEL.KEYPOINT_ON:
result = self.overlay_keypoints(result, top_predictions)
result = self.overlay_class_names(result, top_predictions)
return result
def compute_prediction(self, original_image):
"""
Arguments:
original_image (np.ndarray): an image as returned by OpenCV
Returns:
prediction (BoxList): the detected objects. Additional information
of the detection properties can be found in the fields of
the BoxList via `prediction.fields()`
"""
# apply pre-processing to image
image = self.transforms(original_image)
# convert to an ImageList, padded so that it is divisible by
# cfg.DATALOADER.SIZE_DIVISIBILITY
image_list = to_image_list(image, self.cfg.DATALOADER.SIZE_DIVISIBILITY)
image_list = image_list.to(self.device)
# compute predictions
with torch.no_grad():
predictions = self.model(image_list)
predictions = [o.to(self.cpu_device) for o in predictions]
# always single image is passed at a time
prediction = predictions[0]
# reshape prediction (a BoxList) into the original image size
height, width = original_image.shape[:-1]
prediction = prediction.resize((width, height))
if prediction.has_field("mask"):
# if we have masks, paste the masks in the right position
# in the image, as defined by the bounding boxes
masks = prediction.get_field("mask")
# always single image is passed at a time
masks = self.masker([masks], [prediction])[0]
prediction.add_field("mask", masks)
return prediction
def select_top_predictions(self, predictions):
"""
Select only predictions which have a `score` > self.confidence_threshold,
and returns the predictions in descending order of score
Arguments:
predictions (BoxList): the result of the computation by the model.
It should contain the field `scores`.
Returns:
prediction (BoxList): the detected objects. Additional information
of the detection properties can be found in the fields of
the BoxList via `prediction.fields()`
"""
scores = predictions.get_field("scores")
keep = torch.nonzero(scores > self.confidence_threshold).squeeze(1)
predictions = predictions[keep]
scores = predictions.get_field("scores")
_, idx = scores.sort(0, descending=True)
return predictions[idx]
def compute_colors_for_labels(self, labels):
"""
Simple function that adds fixed colors depending on the class
"""
colors = labels[:, None] * self.palette
colors = (colors % 255).numpy().astype("uint8")
return colors
def overlay_boxes(self, image, predictions):
"""
Adds the predicted boxes on top of the image
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `labels`.
"""
labels = predictions.get_field("labels")
boxes = predictions.bbox
colors = self.compute_colors_for_labels(labels).tolist()
for box, color in zip(boxes, colors):
box = box.to(torch.int64)
top_left, bottom_right = box[:2].tolist(), box[2:].tolist()
image = cv2.rectangle(
image, tuple(top_left), tuple(bottom_right), tuple(color), 1
)
return image
def overlay_mask(self, image, predictions):
"""
Adds the instances contours for each predicted object.
Each label has a different color.
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `mask` and `labels`.
"""
masks = predictions.get_field("mask").numpy()
labels = predictions.get_field("labels")
colors = self.compute_colors_for_labels(labels).tolist()
for mask, color in zip(masks, colors):
thresh = mask[0, :, :, None]
contours, hierarchy = cv2_util.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
)
image = cv2.drawContours(image, contours, -1, color, 3)
composite = image
return composite
def overlay_keypoints(self, image, predictions):
keypoints = predictions.get_field("keypoints")
kps = keypoints.keypoints
scores = keypoints.get_field("logits")
kps = torch.cat((kps[:, :, 0:2], scores[:, :, None]), dim=2).numpy()
for region in kps:
image = vis_keypoints(image, region.transpose((1, 0)))
return image
def create_mask_montage(self, image, predictions):
"""
Create a montage showing the probability heatmaps for each one one of the
detected objects
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `mask`.
"""
masks = predictions.get_field("mask")
masks_per_dim = self.masks_per_dim
masks = L.interpolate(
masks.float(), scale_factor=1 / masks_per_dim
).byte()
height, width = masks.shape[-2:]
max_masks = masks_per_dim ** 2
masks = masks[:max_masks]
# handle case where we have less detections than max_masks
if len(masks) < max_masks:
masks_padded = torch.zeros(max_masks, 1, height, width, dtype=torch.uint8)
masks_padded[: len(masks)] = masks
masks = masks_padded
masks = masks.reshape(masks_per_dim, masks_per_dim, height, width)
result = torch.zeros(
(masks_per_dim * height, masks_per_dim * width), dtype=torch.uint8
)
for y in range(masks_per_dim):
start_y = y * height
end_y = (y + 1) * height
for x in range(masks_per_dim):
start_x = x * width
end_x = (x + 1) * width
result[start_y:end_y, start_x:end_x] = masks[y, x]
return cv2.applyColorMap(result.numpy(), cv2.COLORMAP_JET)
def overlay_class_names(self, image, predictions):
"""
Adds detected class names and scores in the positions defined by the
top-left corner of the predicted bounding box
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `scores` and `labels`.
"""
scores = predictions.get_field("scores").tolist()
labels = predictions.get_field("labels").tolist()
labels = [self.CATEGORIES[i] for i in labels]
boxes = predictions.bbox
template = "{}: {:.2f}"
for box, score, label in zip(boxes, scores, labels):
x, y = box[:2]
s = template.format(label, score)
cv2.putText(
image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 1
)
return image
import numpy as np
import matplotlib.pyplot as plt
from maskrcnn_benchmark.structures.keypoint import PersonKeypoints
def vis_keypoints(img, kps, kp_thresh=2, alpha=0.7):
"""Visualizes keypoints (adapted from vis_one_image).
kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob).
"""
dataset_keypoints = PersonKeypoints.NAMES
kp_lines = PersonKeypoints.CONNECTIONS
# Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv.
cmap = plt.get_cmap('rainbow')
colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]
colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors]
# Perform the drawing on a copy of the image, to allow for blending.
kp_mask = np.copy(img)
# Draw mid shoulder / mid hip first for better visualization.
mid_shoulder = (
kps[:2, dataset_keypoints.index('right_shoulder')] +
kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
sc_mid_shoulder = np.minimum(
kps[2, dataset_keypoints.index('right_shoulder')],
kps[2, dataset_keypoints.index('left_shoulder')])
mid_hip = (
kps[:2, dataset_keypoints.index('right_hip')] +
kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
sc_mid_hip = np.minimum(
kps[2, dataset_keypoints.index('right_hip')],
kps[2, dataset_keypoints.index('left_hip')])
nose_idx = dataset_keypoints.index('nose')
if sc_mid_shoulder > kp_thresh and kps[2, nose_idx] > kp_thresh:
cv2.line(
kp_mask, tuple(mid_shoulder), tuple(kps[:2, nose_idx]),
color=colors[len(kp_lines)], thickness=2, lineType=cv2.LINE_AA)
if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
cv2.line(
kp_mask, tuple(mid_shoulder), tuple(mid_hip),
color=colors[len(kp_lines) + 1], thickness=2, lineType=cv2.LINE_AA)
# Draw the keypoints.
for l in range(len(kp_lines)):
i1 = kp_lines[l][0]
i2 = kp_lines[l][1]
p1 = kps[0, i1], kps[1, i1]
p2 = kps[0, i2], kps[1, i2]
if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
cv2.line(
kp_mask, p1, p2,
color=colors[l], thickness=2, lineType=cv2.LINE_AA)
if kps[2, i1] > kp_thresh:
cv2.circle(
kp_mask, p1,
radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)
if kps[2, i2] > kp_thresh:
cv2.circle(
kp_mask, p2,
radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)
# Blend the keypoints.
return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0)
| mlperf/training_results_v0.7 | NVIDIA/benchmarks/maskrcnn/implementations/pytorch/demo/predictor.py | Python | apache-2.0 | 15,180 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Campaign Manager hook."""
from typing import Any, Dict, List, Optional, Sequence, Union
from googleapiclient import http
from googleapiclient.discovery import Resource, build
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class GoogleCampaignManagerHook(GoogleBaseHook):
"""Hook for Google Campaign Manager."""
_conn = None # type: Optional[Resource]
def __init__(
self,
api_version: str = "v3.3",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self.api_version = api_version
def get_conn(self) -> Resource:
"""Retrieves connection to Campaign Manager."""
if not self._conn:
http_authorized = self._authorize()
self._conn = build(
"dfareporting",
self.api_version,
http=http_authorized,
cache_discovery=False,
)
return self._conn
def delete_report(self, profile_id: str, report_id: str) -> Any:
"""
Deletes a report by its ID.
:param profile_id: The DFA user profile ID.
:param report_id: The ID of the report.
"""
response = (
self.get_conn()
.reports()
.delete(profileId=profile_id, reportId=report_id)
.execute(num_retries=self.num_retries)
)
return response
def insert_report(self, profile_id: str, report: Dict[str, Any]) -> Any:
"""
Creates a report.
:param profile_id: The DFA user profile ID.
:param report: The report resource to be inserted.
"""
response = (
self.get_conn()
.reports()
.insert(profileId=profile_id, body=report)
.execute(num_retries=self.num_retries)
)
return response
def list_reports(
self,
profile_id: str,
max_results: Optional[int] = None,
scope: Optional[str] = None,
sort_field: Optional[str] = None,
sort_order: Optional[str] = None,
) -> List[dict]:
"""
Retrieves list of reports.
:param profile_id: The DFA user profile ID.
:param max_results: Maximum number of results to return.
:param scope: The scope that defines which results are returned.
:param sort_field: The field by which to sort the list.
:param sort_order: Order of sorted results.
"""
reports: List[dict] = []
conn = self.get_conn()
request = conn.reports().list(
profileId=profile_id,
maxResults=max_results,
scope=scope,
sortField=sort_field,
sortOrder=sort_order,
)
while request is not None:
response = request.execute(num_retries=self.num_retries)
reports.extend(response.get("items", []))
request = conn.reports().list_next(previous_request=request, previous_response=response)
return reports
def patch_report(self, profile_id: str, report_id: str, update_mask: dict) -> Any:
"""
Updates a report. This method supports patch semantics.
:param profile_id: The DFA user profile ID.
:param report_id: The ID of the report.
:param update_mask: The relevant portions of a report resource,
according to the rules of patch semantics.
"""
response = (
self.get_conn()
.reports()
.patch(profileId=profile_id, reportId=report_id, body=update_mask)
.execute(num_retries=self.num_retries)
)
return response
def run_report(self, profile_id: str, report_id: str, synchronous: Optional[bool] = None) -> Any:
"""
Runs a report.
:param profile_id: The DFA profile ID.
:param report_id: The ID of the report.
:param synchronous: If set and true, tries to run the report synchronously.
"""
response = (
self.get_conn()
.reports()
.run(profileId=profile_id, reportId=report_id, synchronous=synchronous)
.execute(num_retries=self.num_retries)
)
return response
def update_report(self, profile_id: str, report_id: str) -> Any:
"""
Updates a report.
:param profile_id: The DFA user profile ID.
:param report_id: The ID of the report.
"""
response = (
self.get_conn()
.reports()
.update(profileId=profile_id, reportId=report_id)
.execute(num_retries=self.num_retries)
)
return response
def get_report(self, file_id: str, profile_id: str, report_id: str) -> Any:
"""
Retrieves a report file.
:param profile_id: The DFA user profile ID.
:param report_id: The ID of the report.
:param file_id: The ID of the report file.
"""
response = (
self.get_conn()
.reports()
.files()
.get(fileId=file_id, profileId=profile_id, reportId=report_id)
.execute(num_retries=self.num_retries)
)
return response
def get_report_file(self, file_id: str, profile_id: str, report_id: str) -> http.HttpRequest:
"""
Retrieves a media part of report file.
:param profile_id: The DFA user profile ID.
:param report_id: The ID of the report.
:param file_id: The ID of the report file.
:return: googleapiclient.http.HttpRequest
"""
request = (
self.get_conn()
.reports()
.files()
.get_media(fileId=file_id, profileId=profile_id, reportId=report_id)
)
return request
@staticmethod
def _conversions_batch_request(
conversions: List[Dict[str, Any]],
encryption_entity_type: str,
encryption_entity_id: int,
encryption_source: str,
kind: str,
) -> Dict[str, Any]:
return {
"kind": kind,
"conversions": conversions,
"encryptionInfo": {
"kind": "dfareporting#encryptionInfo",
"encryptionEntityType": encryption_entity_type,
"encryptionEntityId": encryption_entity_id,
"encryptionSource": encryption_source,
},
}
def conversions_batch_insert(
self,
profile_id: str,
conversions: List[Dict[str, Any]],
encryption_entity_type: str,
encryption_entity_id: int,
encryption_source: str,
max_failed_inserts: int = 0,
) -> Any:
"""
Inserts conversions.
:param profile_id: User profile ID associated with this request.
:param conversions: Conversations to insert, should by type of Conversation:
https://developers.google.com/doubleclick-advertisers/v3.3/conversions#resource
:param encryption_entity_type: The encryption entity type. This should match the encryption
configuration for ad serving or Data Transfer.
:param encryption_entity_id: The encryption entity ID. This should match the encryption
configuration for ad serving or Data Transfer.
:param encryption_source: Describes whether the encrypted cookie was received from ad serving
(the %m macro) or from Data Transfer.
:param max_failed_inserts: The maximum number of conversions that failed to be inserted
"""
response = (
self.get_conn()
.conversions()
.batchinsert(
profileId=profile_id,
body=self._conversions_batch_request(
conversions=conversions,
encryption_entity_type=encryption_entity_type,
encryption_entity_id=encryption_entity_id,
encryption_source=encryption_source,
kind="dfareporting#conversionsBatchInsertRequest",
),
)
.execute(num_retries=self.num_retries)
)
if response.get('hasFailures', False):
errored_conversions = [stat['errors'] for stat in response['status'] if 'errors' in stat]
if len(errored_conversions) > max_failed_inserts:
raise AirflowException(errored_conversions)
return response
def conversions_batch_update(
self,
profile_id: str,
conversions: List[Dict[str, Any]],
encryption_entity_type: str,
encryption_entity_id: int,
encryption_source: str,
max_failed_updates: int = 0,
) -> Any:
"""
Updates existing conversions.
:param profile_id: User profile ID associated with this request.
:param conversions: Conversations to update, should by type of Conversation:
https://developers.google.com/doubleclick-advertisers/v3.3/conversions#resource
:param encryption_entity_type: The encryption entity type. This should match the encryption
configuration for ad serving or Data Transfer.
:param encryption_entity_id: The encryption entity ID. This should match the encryption
configuration for ad serving or Data Transfer.
:param encryption_source: Describes whether the encrypted cookie was received from ad serving
(the %m macro) or from Data Transfer.
:param max_failed_updates: The maximum number of conversions that failed to be updated
"""
response = (
self.get_conn()
.conversions()
.batchupdate(
profileId=profile_id,
body=self._conversions_batch_request(
conversions=conversions,
encryption_entity_type=encryption_entity_type,
encryption_entity_id=encryption_entity_id,
encryption_source=encryption_source,
kind="dfareporting#conversionsBatchUpdateRequest",
),
)
.execute(num_retries=self.num_retries)
)
if response.get('hasFailures', False):
errored_conversions = [stat['errors'] for stat in response['status'] if 'errors' in stat]
if len(errored_conversions) > max_failed_updates:
raise AirflowException(errored_conversions)
return response
| Acehaidrey/incubator-airflow | airflow/providers/google/marketing_platform/hooks/campaign_manager.py | Python | apache-2.0 | 11,618 |
from observertest import BaseObserverToscaTest
from core.models import Site, Deployment, User, ControllerUser
# Note that as a side effect, these tests will also create a Site
class ObserverUserTest(BaseObserverToscaTest):
tests = ["create_user"]
# hide_observer_output = False # uncomment to display lots of stuff to screen
def cleanup(self):
# We don't want to leak resources, so we make sure to let the observer
# attempt to delete these objects.
self.try_to_delete(User, purge=False, email="johndoe@foo.bar")
self.try_to_delete(Site, purge=False, login_base="testsite")
self.run_observer()
self.try_to_delete(User, purge=True, email="johndoe@foo.bar")
self.try_to_delete(Site, purge=True, login_base="testsite")
def assert_nouser(self, email):
assert(not User.objects.filter(email=email))
def assert_user(self, email, **kwargs):
obj = User.objects.get(email=email)
assert(obj)
for (k,v) in kwargs.items():
if (getattr(obj,k,None) != v):
print "Object %s property '%s' is '%s' and should be '%s'" % (obj, k, getattr(obj,k,None), v)
assert(False)
return obj
def create_user(self):
self.assert_noobj(Site, "testsite")
self.assert_nouser("johndoe@foo.bar")
self.execute(self.make_nodetemplate(self.get_usable_deployment(), "tosca.nodes.Deployment",
props={"no-delete": True}) + \
"""
testsite:
type: tosca.nodes.Site
properties:
site_url: http://opencloud.us/
requirements:
- deployment:
node: %s
relationship: tosca.relationships.SiteDeployment
requirements:
- controller:
node: %s
relationship: tosca.relationships.UsesController
johndoe@foo.bar:
type: tosca.nodes.User
properties:
password: letmein
firstname: john
lastname: doe
requirements:
- site:
node: testsite
relationship: tosca.relationships.MemberOfSite
""" % (self.get_usable_deployment(), self.get_usable_controller()))
testsite = self.assert_obj(Site, "testsite")
testuser = self.assert_user("johndoe@foo.bar")
self.run_model_policy(save_output="/tmp/usertest:create_user:model_policy")
# make sure a ControllerSite object was created
cu = ControllerUser.objects.filter(user=testuser)
assert(len(cu) == 1)
self.run_observer(save_output="/tmp/usertest:create_user:observer")
testuser = self.assert_user("johndoe@foo.bar")
cu = ControllerUser.objects.filter(user=testuser)
assert(len(cu) == 1)
assert(cu[0].kuser_id is not None)
assert(cu[0].kuser_id != "")
if __name__ == "__main__":
ObserverUserTest()
| xmaruto/mcord | xos/tosca/tests/observerUserTest.py | Python | apache-2.0 | 2,957 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Dict, Optional
from kubernetes import client
from airflow.exceptions import AirflowException
from airflow.providers.cncf.kubernetes.hooks.kubernetes import KubernetesHook
from airflow.sensors.base import BaseSensorOperator
class SparkKubernetesSensor(BaseSensorOperator):
"""
Checks sparkApplication object in kubernetes cluster:
.. seealso::
For more detail about Spark Application Object have a look at the reference:
https://github.com/GoogleCloudPlatform/spark-on-k8s-operator/blob/v1beta2-1.1.0-2.4.5/docs/api-docs.md#sparkapplication
:param application_name: spark Application resource name
:type application_name: str
:param namespace: the kubernetes namespace where the sparkApplication reside in
:type namespace: str
:param kubernetes_conn_id: The :ref:`kubernetes connection<howto/connection:kubernetes>`
to Kubernetes cluster.
:type kubernetes_conn_id: str
:param attach_log: determines whether logs for driver pod should be appended to the sensor log
:type attach_log: bool
:param api_group: kubernetes api group of sparkApplication
:type api_group: str
:param api_version: kubernetes api version of sparkApplication
:type api_version: str
"""
template_fields = ("application_name", "namespace")
FAILURE_STATES = ("FAILED", "UNKNOWN")
SUCCESS_STATES = ("COMPLETED",)
def __init__(
self,
*,
application_name: str,
attach_log: bool = False,
namespace: Optional[str] = None,
kubernetes_conn_id: str = "kubernetes_default",
api_group: str = 'sparkoperator.k8s.io',
api_version: str = 'v1beta2',
**kwargs,
) -> None:
super().__init__(**kwargs)
self.application_name = application_name
self.attach_log = attach_log
self.namespace = namespace
self.kubernetes_conn_id = kubernetes_conn_id
self.hook = KubernetesHook(conn_id=self.kubernetes_conn_id)
self.api_group = api_group
self.api_version = api_version
def _log_driver(self, application_state: str, response: dict) -> None:
if not self.attach_log:
return
status_info = response["status"]
if "driverInfo" not in status_info:
return
driver_info = status_info["driverInfo"]
if "podName" not in driver_info:
return
driver_pod_name = driver_info["podName"]
namespace = response["metadata"]["namespace"]
log_method = self.log.error if application_state in self.FAILURE_STATES else self.log.info
try:
log = ""
for line in self.hook.get_pod_logs(driver_pod_name, namespace=namespace):
log += line.decode()
log_method(log)
except client.rest.ApiException as e:
self.log.warning(
"Could not read logs for pod %s. It may have been disposed.\n"
"Make sure timeToLiveSeconds is set on your SparkApplication spec.\n"
"underlying exception: %s",
driver_pod_name,
e,
)
def poke(self, context: Dict) -> bool:
self.log.info("Poking: %s", self.application_name)
response = self.hook.get_custom_object(
group=self.api_group,
version=self.api_version,
plural="sparkapplications",
name=self.application_name,
namespace=self.namespace,
)
try:
application_state = response["status"]["applicationState"]["state"]
except KeyError:
return False
if self.attach_log and application_state in self.FAILURE_STATES + self.SUCCESS_STATES:
self._log_driver(application_state, response)
if application_state in self.FAILURE_STATES:
raise AirflowException(f"Spark application failed with state: {application_state}")
elif application_state in self.SUCCESS_STATES:
self.log.info("Spark application ended successfully")
return True
else:
self.log.info("Spark application is still in state: %s", application_state)
return False
| apache/incubator-airflow | airflow/providers/cncf/kubernetes/sensors/spark_kubernetes.py | Python | apache-2.0 | 5,015 |
def __load():
import imp, os, sys
try:
dirname = os.path.dirname(__loader__.archive)
except NameError:
dirname = sys.prefix
path = os.path.join(dirname, '_winreg.pyd')
#print "py2exe extension module", __name__, "->", path
mod = imp.load_dynamic(__name__, path)
## mod.frozen = 1
__load()
del __load
| slozier/ironpython2 | Src/StdLib/Lib/site-packages/isapi/test/build/bdist.win32/winexe/temp/_winreg.py | Python | apache-2.0 | 344 |
#
# DEPRECATED: implementation for ffi.verify()
#
import sys, imp
from . import model
from .error import VerificationError
class VCPythonEngine(object):
_class_key = 'x'
_gen_python_module = True
def __init__(self, verifier):
self.verifier = verifier
self.ffi = verifier.ffi
self._struct_pending_verification = {}
self._types_of_builtin_functions = {}
def patch_extension_kwds(self, kwds):
pass
def find_module(self, module_name, path, so_suffixes):
try:
f, filename, descr = imp.find_module(module_name, path)
except ImportError:
return None
if f is not None:
f.close()
# Note that after a setuptools installation, there are both .py
# and .so files with the same basename. The code here relies on
# imp.find_module() locating the .so in priority.
if descr[0] not in so_suffixes:
return None
return filename
def collect_types(self):
self._typesdict = {}
self._generate("collecttype")
def _prnt(self, what=''):
self._f.write(what + '\n')
def _gettypenum(self, type):
# a KeyError here is a bug. please report it! :-)
return self._typesdict[type]
def _do_collect_type(self, tp):
if ((not isinstance(tp, model.PrimitiveType)
or tp.name == 'long double')
and tp not in self._typesdict):
num = len(self._typesdict)
self._typesdict[tp] = num
def write_source_to_f(self):
self.collect_types()
#
# The new module will have a _cffi_setup() function that receives
# objects from the ffi world, and that calls some setup code in
# the module. This setup code is split in several independent
# functions, e.g. one per constant. The functions are "chained"
# by ending in a tail call to each other.
#
# This is further split in two chained lists, depending on if we
# can do it at import-time or if we must wait for _cffi_setup() to
# provide us with the <ctype> objects. This is needed because we
# need the values of the enum constants in order to build the
# <ctype 'enum'> that we may have to pass to _cffi_setup().
#
# The following two 'chained_list_constants' items contains
# the head of these two chained lists, as a string that gives the
# call to do, if any.
self._chained_list_constants = ['((void)lib,0)', '((void)lib,0)']
#
prnt = self._prnt
# first paste some standard set of lines that are mostly '#define'
prnt(cffimod_header)
prnt()
# then paste the C source given by the user, verbatim.
prnt(self.verifier.preamble)
prnt()
#
# call generate_cpy_xxx_decl(), for every xxx found from
# ffi._parser._declarations. This generates all the functions.
self._generate("decl")
#
# implement the function _cffi_setup_custom() as calling the
# head of the chained list.
self._generate_setup_custom()
prnt()
#
# produce the method table, including the entries for the
# generated Python->C function wrappers, which are done
# by generate_cpy_function_method().
prnt('static PyMethodDef _cffi_methods[] = {')
self._generate("method")
prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS, NULL},')
prnt(' {NULL, NULL, 0, NULL} /* Sentinel */')
prnt('};')
prnt()
#
# standard init.
modname = self.verifier.get_module_name()
constants = self._chained_list_constants[False]
prnt('#if PY_MAJOR_VERSION >= 3')
prnt()
prnt('static struct PyModuleDef _cffi_module_def = {')
prnt(' PyModuleDef_HEAD_INIT,')
prnt(' "%s",' % modname)
prnt(' NULL,')
prnt(' -1,')
prnt(' _cffi_methods,')
prnt(' NULL, NULL, NULL, NULL')
prnt('};')
prnt()
prnt('PyMODINIT_FUNC')
prnt('PyInit_%s(void)' % modname)
prnt('{')
prnt(' PyObject *lib;')
prnt(' lib = PyModule_Create(&_cffi_module_def);')
prnt(' if (lib == NULL)')
prnt(' return NULL;')
prnt(' if (%s < 0 || _cffi_init() < 0) {' % (constants,))
prnt(' Py_DECREF(lib);')
prnt(' return NULL;')
prnt(' }')
prnt(' return lib;')
prnt('}')
prnt()
prnt('#else')
prnt()
prnt('PyMODINIT_FUNC')
prnt('init%s(void)' % modname)
prnt('{')
prnt(' PyObject *lib;')
prnt(' lib = Py_InitModule("%s", _cffi_methods);' % modname)
prnt(' if (lib == NULL)')
prnt(' return;')
prnt(' if (%s < 0 || _cffi_init() < 0)' % (constants,))
prnt(' return;')
prnt(' return;')
prnt('}')
prnt()
prnt('#endif')
def load_library(self, flags=None):
# XXX review all usages of 'self' here!
# import it as a new extension module
imp.acquire_lock()
try:
if hasattr(sys, "getdlopenflags"):
previous_flags = sys.getdlopenflags()
try:
if hasattr(sys, "setdlopenflags") and flags is not None:
sys.setdlopenflags(flags)
module = imp.load_dynamic(self.verifier.get_module_name(),
self.verifier.modulefilename)
except ImportError as e:
error = "importing %r: %s" % (self.verifier.modulefilename, e)
raise VerificationError(error)
finally:
if hasattr(sys, "setdlopenflags"):
sys.setdlopenflags(previous_flags)
finally:
imp.release_lock()
#
# call loading_cpy_struct() to get the struct layout inferred by
# the C compiler
self._load(module, 'loading')
#
# the C code will need the <ctype> objects. Collect them in
# order in a list.
revmapping = dict([(value, key)
for (key, value) in self._typesdict.items()])
lst = [revmapping[i] for i in range(len(revmapping))]
lst = list(map(self.ffi._get_cached_btype, lst))
#
# build the FFILibrary class and instance and call _cffi_setup().
# this will set up some fields like '_cffi_types', and only then
# it will invoke the chained list of functions that will really
# build (notably) the constant objects, as <cdata> if they are
# pointers, and store them as attributes on the 'library' object.
class FFILibrary(object):
_cffi_python_module = module
_cffi_ffi = self.ffi
_cffi_dir = []
def __dir__(self):
return FFILibrary._cffi_dir + list(self.__dict__)
library = FFILibrary()
if module._cffi_setup(lst, VerificationError, library):
import warnings
warnings.warn("reimporting %r might overwrite older definitions"
% (self.verifier.get_module_name()))
#
# finally, call the loaded_cpy_xxx() functions. This will perform
# the final adjustments, like copying the Python->C wrapper
# functions from the module to the 'library' object, and setting
# up the FFILibrary class with properties for the global C variables.
self._load(module, 'loaded', library=library)
module._cffi_original_ffi = self.ffi
module._cffi_types_of_builtin_funcs = self._types_of_builtin_functions
return library
def _get_declarations(self):
lst = [(key, tp) for (key, (tp, qual)) in
self.ffi._parser._declarations.items()]
lst.sort()
return lst
def _generate(self, step_name):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
try:
method = getattr(self, '_generate_cpy_%s_%s' % (kind,
step_name))
except AttributeError:
raise VerificationError(
"not implemented in verify(): %r" % name)
try:
method(tp, realname)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _load(self, module, step_name, **kwds):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
method = getattr(self, '_%s_cpy_%s' % (step_name, kind))
try:
method(tp, realname, module, **kwds)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _generate_nothing(self, tp, name):
pass
def _loaded_noop(self, tp, name, module, **kwds):
pass
# ----------
def _convert_funcarg_to_c(self, tp, fromvar, tovar, errcode):
extraarg = ''
if isinstance(tp, model.PrimitiveType):
if tp.is_integer_type() and tp.name != '_Bool':
converter = '_cffi_to_c_int'
extraarg = ', %s' % tp.name
else:
converter = '(%s)_cffi_to_c_%s' % (tp.get_c_name(''),
tp.name.replace(' ', '_'))
errvalue = '-1'
#
elif isinstance(tp, model.PointerType):
self._convert_funcarg_to_c_ptr_or_array(tp, fromvar,
tovar, errcode)
return
#
elif isinstance(tp, (model.StructOrUnion, model.EnumType)):
# a struct (not a struct pointer) as a function argument
self._prnt(' if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)'
% (tovar, self._gettypenum(tp), fromvar))
self._prnt(' %s;' % errcode)
return
#
elif isinstance(tp, model.FunctionPtrType):
converter = '(%s)_cffi_to_c_pointer' % tp.get_c_name('')
extraarg = ', _cffi_type(%d)' % self._gettypenum(tp)
errvalue = 'NULL'
#
else:
raise NotImplementedError(tp)
#
self._prnt(' %s = %s(%s%s);' % (tovar, converter, fromvar, extraarg))
self._prnt(' if (%s == (%s)%s && PyErr_Occurred())' % (
tovar, tp.get_c_name(''), errvalue))
self._prnt(' %s;' % errcode)
def _extra_local_variables(self, tp, localvars, freelines):
if isinstance(tp, model.PointerType):
localvars.add('Py_ssize_t datasize')
localvars.add('struct _cffi_freeme_s *large_args_free = NULL')
freelines.add('if (large_args_free != NULL)'
' _cffi_free_array_arguments(large_args_free);')
def _convert_funcarg_to_c_ptr_or_array(self, tp, fromvar, tovar, errcode):
self._prnt(' datasize = _cffi_prepare_pointer_call_argument(')
self._prnt(' _cffi_type(%d), %s, (char **)&%s);' % (
self._gettypenum(tp), fromvar, tovar))
self._prnt(' if (datasize != 0) {')
self._prnt(' %s = ((size_t)datasize) <= 640 ? '
'alloca((size_t)datasize) : NULL;' % (tovar,))
self._prnt(' if (_cffi_convert_array_argument(_cffi_type(%d), %s, '
'(char **)&%s,' % (self._gettypenum(tp), fromvar, tovar))
self._prnt(' datasize, &large_args_free) < 0)')
self._prnt(' %s;' % errcode)
self._prnt(' }')
def _convert_expr_from_c(self, tp, var, context):
if isinstance(tp, model.PrimitiveType):
if tp.is_integer_type() and tp.name != '_Bool':
return '_cffi_from_c_int(%s, %s)' % (var, tp.name)
elif tp.name != 'long double':
return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var)
else:
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, (model.PointerType, model.FunctionPtrType)):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.ArrayType):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(model.PointerType(tp.item)))
elif isinstance(tp, model.StructOrUnion):
if tp.fldnames is None:
raise TypeError("'%s' is used as %s, but is opaque" % (
tp._get_c_name(), context))
return '_cffi_from_c_struct((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.EnumType):
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
else:
raise NotImplementedError(tp)
# ----------
# typedefs: generates no code so far
_generate_cpy_typedef_collecttype = _generate_nothing
_generate_cpy_typedef_decl = _generate_nothing
_generate_cpy_typedef_method = _generate_nothing
_loading_cpy_typedef = _loaded_noop
_loaded_cpy_typedef = _loaded_noop
# ----------
# function declarations
def _generate_cpy_function_collecttype(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
self._do_collect_type(tp)
else:
# don't call _do_collect_type(tp) in this common case,
# otherwise test_autofilled_struct_as_argument fails
for type in tp.args:
self._do_collect_type(type)
self._do_collect_type(tp.result)
def _generate_cpy_function_decl(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
# cannot support vararg functions better than this: check for its
# exact type (including the fixed arguments), and build it as a
# constant function pointer (no CPython wrapper)
self._generate_cpy_const(False, name, tp)
return
prnt = self._prnt
numargs = len(tp.args)
if numargs == 0:
argname = 'noarg'
elif numargs == 1:
argname = 'arg0'
else:
argname = 'args'
prnt('static PyObject *')
prnt('_cffi_f_%s(PyObject *self, PyObject *%s)' % (name, argname))
prnt('{')
#
context = 'argument of %s' % name
for i, type in enumerate(tp.args):
prnt(' %s;' % type.get_c_name(' x%d' % i, context))
#
localvars = set()
freelines = set()
for type in tp.args:
self._extra_local_variables(type, localvars, freelines)
for decl in sorted(localvars):
prnt(' %s;' % (decl,))
#
if not isinstance(tp.result, model.VoidType):
result_code = 'result = '
context = 'result of %s' % name
prnt(' %s;' % tp.result.get_c_name(' result', context))
prnt(' PyObject *pyresult;')
else:
result_code = ''
#
if len(tp.args) > 1:
rng = range(len(tp.args))
for i in rng:
prnt(' PyObject *arg%d;' % i)
prnt()
prnt(' if (!PyArg_ParseTuple(args, "%s:%s", %s))' % (
'O' * numargs, name, ', '.join(['&arg%d' % i for i in rng])))
prnt(' return NULL;')
prnt()
#
for i, type in enumerate(tp.args):
self._convert_funcarg_to_c(type, 'arg%d' % i, 'x%d' % i,
'return NULL')
prnt()
#
prnt(' Py_BEGIN_ALLOW_THREADS')
prnt(' _cffi_restore_errno();')
prnt(' { %s%s(%s); }' % (
result_code, name,
', '.join(['x%d' % i for i in range(len(tp.args))])))
prnt(' _cffi_save_errno();')
prnt(' Py_END_ALLOW_THREADS')
prnt()
#
prnt(' (void)self; /* unused */')
if numargs == 0:
prnt(' (void)noarg; /* unused */')
if result_code:
prnt(' pyresult = %s;' %
self._convert_expr_from_c(tp.result, 'result', 'result type'))
for freeline in freelines:
prnt(' ' + freeline)
prnt(' return pyresult;')
else:
for freeline in freelines:
prnt(' ' + freeline)
prnt(' Py_INCREF(Py_None);')
prnt(' return Py_None;')
prnt('}')
prnt()
def _generate_cpy_function_method(self, tp, name):
if tp.ellipsis:
return
numargs = len(tp.args)
if numargs == 0:
meth = 'METH_NOARGS'
elif numargs == 1:
meth = 'METH_O'
else:
meth = 'METH_VARARGS'
self._prnt(' {"%s", _cffi_f_%s, %s, NULL},' % (name, name, meth))
_loading_cpy_function = _loaded_noop
def _loaded_cpy_function(self, tp, name, module, library):
if tp.ellipsis:
return
func = getattr(module, name)
setattr(library, name, func)
self._types_of_builtin_functions[func] = tp
# ----------
# named structs
_generate_cpy_struct_collecttype = _generate_nothing
def _generate_cpy_struct_decl(self, tp, name):
assert name == tp.name
self._generate_struct_or_union_decl(tp, 'struct', name)
def _generate_cpy_struct_method(self, tp, name):
self._generate_struct_or_union_method(tp, 'struct', name)
def _loading_cpy_struct(self, tp, name, module):
self._loading_struct_or_union(tp, 'struct', name, module)
def _loaded_cpy_struct(self, tp, name, module, **kwds):
self._loaded_struct_or_union(tp)
_generate_cpy_union_collecttype = _generate_nothing
def _generate_cpy_union_decl(self, tp, name):
assert name == tp.name
self._generate_struct_or_union_decl(tp, 'union', name)
def _generate_cpy_union_method(self, tp, name):
self._generate_struct_or_union_method(tp, 'union', name)
def _loading_cpy_union(self, tp, name, module):
self._loading_struct_or_union(tp, 'union', name, module)
def _loaded_cpy_union(self, tp, name, module, **kwds):
self._loaded_struct_or_union(tp)
def _generate_struct_or_union_decl(self, tp, prefix, name):
if tp.fldnames is None:
return # nothing to do with opaque structs
checkfuncname = '_cffi_check_%s_%s' % (prefix, name)
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
cname = ('%s %s' % (prefix, name)).strip()
#
prnt = self._prnt
prnt('static void %s(%s *p)' % (checkfuncname, cname))
prnt('{')
prnt(' /* only to generate compile-time warnings or errors */')
prnt(' (void)p;')
for fname, ftype, fbitsize, fqual in tp.enumfields():
if (isinstance(ftype, model.PrimitiveType)
and ftype.is_integer_type()) or fbitsize >= 0:
# accept all integers, but complain on float or double
prnt(' (void)((p->%s) << 1);' % fname)
else:
# only accept exactly the type declared.
try:
prnt(' { %s = &p->%s; (void)tmp; }' % (
ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual),
fname))
except VerificationError as e:
prnt(' /* %s */' % str(e)) # cannot verify it, ignore
prnt('}')
prnt('static PyObject *')
prnt('%s(PyObject *self, PyObject *noarg)' % (layoutfuncname,))
prnt('{')
prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname)
prnt(' static Py_ssize_t nums[] = {')
prnt(' sizeof(%s),' % cname)
prnt(' offsetof(struct _cffi_aligncheck, y),')
for fname, ftype, fbitsize, fqual in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
prnt(' offsetof(%s, %s),' % (cname, fname))
if isinstance(ftype, model.ArrayType) and ftype.length is None:
prnt(' 0, /* %s */' % ftype._get_c_name())
else:
prnt(' sizeof(((%s *)0)->%s),' % (cname, fname))
prnt(' -1')
prnt(' };')
prnt(' (void)self; /* unused */')
prnt(' (void)noarg; /* unused */')
prnt(' return _cffi_get_struct_layout(nums);')
prnt(' /* the next line is not executed, but compiled */')
prnt(' %s(0);' % (checkfuncname,))
prnt('}')
prnt()
def _generate_struct_or_union_method(self, tp, prefix, name):
if tp.fldnames is None:
return # nothing to do with opaque structs
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
self._prnt(' {"%s", %s, METH_NOARGS, NULL},' % (layoutfuncname,
layoutfuncname))
def _loading_struct_or_union(self, tp, prefix, name, module):
if tp.fldnames is None:
return # nothing to do with opaque structs
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
#
function = getattr(module, layoutfuncname)
layout = function()
if isinstance(tp, model.StructOrUnion) and tp.partial:
# use the function()'s sizes and offsets to guide the
# layout of the struct
totalsize = layout[0]
totalalignment = layout[1]
fieldofs = layout[2::2]
fieldsize = layout[3::2]
tp.force_flatten()
assert len(fieldofs) == len(fieldsize) == len(tp.fldnames)
tp.fixedlayout = fieldofs, fieldsize, totalsize, totalalignment
else:
cname = ('%s %s' % (prefix, name)).strip()
self._struct_pending_verification[tp] = layout, cname
def _loaded_struct_or_union(self, tp):
if tp.fldnames is None:
return # nothing to do with opaque structs
self.ffi._get_cached_btype(tp) # force 'fixedlayout' to be considered
if tp in self._struct_pending_verification:
# check that the layout sizes and offsets match the real ones
def check(realvalue, expectedvalue, msg):
if realvalue != expectedvalue:
raise VerificationError(
"%s (we have %d, but C compiler says %d)"
% (msg, expectedvalue, realvalue))
ffi = self.ffi
BStruct = ffi._get_cached_btype(tp)
layout, cname = self._struct_pending_verification.pop(tp)
check(layout[0], ffi.sizeof(BStruct), "wrong total size")
check(layout[1], ffi.alignof(BStruct), "wrong total alignment")
i = 2
for fname, ftype, fbitsize, fqual in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
check(layout[i], ffi.offsetof(BStruct, fname),
"wrong offset for field %r" % (fname,))
if layout[i+1] != 0:
BField = ffi._get_cached_btype(ftype)
check(layout[i+1], ffi.sizeof(BField),
"wrong size for field %r" % (fname,))
i += 2
assert i == len(layout)
# ----------
# 'anonymous' declarations. These are produced for anonymous structs
# or unions; the 'name' is obtained by a typedef.
_generate_cpy_anonymous_collecttype = _generate_nothing
def _generate_cpy_anonymous_decl(self, tp, name):
if isinstance(tp, model.EnumType):
self._generate_cpy_enum_decl(tp, name, '')
else:
self._generate_struct_or_union_decl(tp, '', name)
def _generate_cpy_anonymous_method(self, tp, name):
if not isinstance(tp, model.EnumType):
self._generate_struct_or_union_method(tp, '', name)
def _loading_cpy_anonymous(self, tp, name, module):
if isinstance(tp, model.EnumType):
self._loading_cpy_enum(tp, name, module)
else:
self._loading_struct_or_union(tp, '', name, module)
def _loaded_cpy_anonymous(self, tp, name, module, **kwds):
if isinstance(tp, model.EnumType):
self._loaded_cpy_enum(tp, name, module, **kwds)
else:
self._loaded_struct_or_union(tp)
# ----------
# constants, likely declared with '#define'
def _generate_cpy_const(self, is_int, name, tp=None, category='const',
vartp=None, delayed=True, size_too=False,
check_value=None):
prnt = self._prnt
funcname = '_cffi_%s_%s' % (category, name)
prnt('static int %s(PyObject *lib)' % funcname)
prnt('{')
prnt(' PyObject *o;')
prnt(' int res;')
if not is_int:
prnt(' %s;' % (vartp or tp).get_c_name(' i', name))
else:
assert category == 'const'
#
if check_value is not None:
self._check_int_constant_value(name, check_value)
#
if not is_int:
if category == 'var':
realexpr = '&' + name
else:
realexpr = name
prnt(' i = (%s);' % (realexpr,))
prnt(' o = %s;' % (self._convert_expr_from_c(tp, 'i',
'variable type'),))
assert delayed
else:
prnt(' o = _cffi_from_c_int_const(%s);' % name)
prnt(' if (o == NULL)')
prnt(' return -1;')
if size_too:
prnt(' {')
prnt(' PyObject *o1 = o;')
prnt(' o = Py_BuildValue("On", o1, (Py_ssize_t)sizeof(%s));'
% (name,))
prnt(' Py_DECREF(o1);')
prnt(' if (o == NULL)')
prnt(' return -1;')
prnt(' }')
prnt(' res = PyObject_SetAttrString(lib, "%s", o);' % name)
prnt(' Py_DECREF(o);')
prnt(' if (res < 0)')
prnt(' return -1;')
prnt(' return %s;' % self._chained_list_constants[delayed])
self._chained_list_constants[delayed] = funcname + '(lib)'
prnt('}')
prnt()
def _generate_cpy_constant_collecttype(self, tp, name):
is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
if not is_int:
self._do_collect_type(tp)
def _generate_cpy_constant_decl(self, tp, name):
is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
self._generate_cpy_const(is_int, name, tp)
_generate_cpy_constant_method = _generate_nothing
_loading_cpy_constant = _loaded_noop
_loaded_cpy_constant = _loaded_noop
# ----------
# enums
def _check_int_constant_value(self, name, value, err_prefix=''):
prnt = self._prnt
if value <= 0:
prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % (
name, name, value))
else:
prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % (
name, name, value))
prnt(' char buf[64];')
prnt(' if ((%s) <= 0)' % name)
prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % name)
prnt(' else')
prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' %
name)
prnt(' PyErr_Format(_cffi_VerificationError,')
prnt(' "%s%s has the real value %s, not %s",')
prnt(' "%s", "%s", buf, "%d");' % (
err_prefix, name, value))
prnt(' return -1;')
prnt(' }')
def _enum_funcname(self, prefix, name):
# "$enum_$1" => "___D_enum____D_1"
name = name.replace('$', '___D_')
return '_cffi_e_%s_%s' % (prefix, name)
def _generate_cpy_enum_decl(self, tp, name, prefix='enum'):
if tp.partial:
for enumerator in tp.enumerators:
self._generate_cpy_const(True, enumerator, delayed=False)
return
#
funcname = self._enum_funcname(prefix, name)
prnt = self._prnt
prnt('static int %s(PyObject *lib)' % funcname)
prnt('{')
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
self._check_int_constant_value(enumerator, enumvalue,
"enum %s: " % name)
prnt(' return %s;' % self._chained_list_constants[True])
self._chained_list_constants[True] = funcname + '(lib)'
prnt('}')
prnt()
_generate_cpy_enum_collecttype = _generate_nothing
_generate_cpy_enum_method = _generate_nothing
def _loading_cpy_enum(self, tp, name, module):
if tp.partial:
enumvalues = [getattr(module, enumerator)
for enumerator in tp.enumerators]
tp.enumvalues = tuple(enumvalues)
tp.partial_resolved = True
def _loaded_cpy_enum(self, tp, name, module, library):
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
setattr(library, enumerator, enumvalue)
# ----------
# macros: for now only for integers
def _generate_cpy_macro_decl(self, tp, name):
if tp == '...':
check_value = None
else:
check_value = tp # an integer
self._generate_cpy_const(True, name, check_value=check_value)
_generate_cpy_macro_collecttype = _generate_nothing
_generate_cpy_macro_method = _generate_nothing
_loading_cpy_macro = _loaded_noop
_loaded_cpy_macro = _loaded_noop
# ----------
# global variables
def _generate_cpy_variable_collecttype(self, tp, name):
if isinstance(tp, model.ArrayType):
tp_ptr = model.PointerType(tp.item)
else:
tp_ptr = model.PointerType(tp)
self._do_collect_type(tp_ptr)
def _generate_cpy_variable_decl(self, tp, name):
if isinstance(tp, model.ArrayType):
tp_ptr = model.PointerType(tp.item)
self._generate_cpy_const(False, name, tp, vartp=tp_ptr,
size_too = (tp.length == '...'))
else:
tp_ptr = model.PointerType(tp)
self._generate_cpy_const(False, name, tp_ptr, category='var')
_generate_cpy_variable_method = _generate_nothing
_loading_cpy_variable = _loaded_noop
def _loaded_cpy_variable(self, tp, name, module, library):
value = getattr(library, name)
if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the
# sense that "a=..." is forbidden
if tp.length == '...':
assert isinstance(value, tuple)
(value, size) = value
BItemType = self.ffi._get_cached_btype(tp.item)
length, rest = divmod(size, self.ffi.sizeof(BItemType))
if rest != 0:
raise VerificationError(
"bad size: %r does not seem to be an array of %s" %
(name, tp.item))
tp = tp.resolve_length(length)
# 'value' is a <cdata 'type *'> which we have to replace with
# a <cdata 'type[N]'> if the N is actually known
if tp.length is not None:
BArray = self.ffi._get_cached_btype(tp)
value = self.ffi.cast(BArray, value)
setattr(library, name, value)
return
# remove ptr=<cdata 'int *'> from the library instance, and replace
# it by a property on the class, which reads/writes into ptr[0].
ptr = value
delattr(library, name)
def getter(library):
return ptr[0]
def setter(library, value):
ptr[0] = value
setattr(type(library), name, property(getter, setter))
type(library)._cffi_dir.append(name)
# ----------
def _generate_setup_custom(self):
prnt = self._prnt
prnt('static int _cffi_setup_custom(PyObject *lib)')
prnt('{')
prnt(' return %s;' % self._chained_list_constants[True])
prnt('}')
cffimod_header = r'''
#include <Python.h>
#include <stddef.h>
/* this block of #ifs should be kept exactly identical between
c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py
and cffi/_cffi_include.h */
#if defined(_MSC_VER)
# include <malloc.h> /* for alloca() */
# if _MSC_VER < 1600 /* MSVC < 2010 */
typedef __int8 int8_t;
typedef __int16 int16_t;
typedef __int32 int32_t;
typedef __int64 int64_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
typedef __int8 int_least8_t;
typedef __int16 int_least16_t;
typedef __int32 int_least32_t;
typedef __int64 int_least64_t;
typedef unsigned __int8 uint_least8_t;
typedef unsigned __int16 uint_least16_t;
typedef unsigned __int32 uint_least32_t;
typedef unsigned __int64 uint_least64_t;
typedef __int8 int_fast8_t;
typedef __int16 int_fast16_t;
typedef __int32 int_fast32_t;
typedef __int64 int_fast64_t;
typedef unsigned __int8 uint_fast8_t;
typedef unsigned __int16 uint_fast16_t;
typedef unsigned __int32 uint_fast32_t;
typedef unsigned __int64 uint_fast64_t;
typedef __int64 intmax_t;
typedef unsigned __int64 uintmax_t;
# else
# include <stdint.h>
# endif
# if _MSC_VER < 1800 /* MSVC < 2013 */
# ifndef __cplusplus
typedef unsigned char _Bool;
# endif
# endif
#else
# include <stdint.h>
# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux)
# include <alloca.h>
# endif
#endif
#if PY_MAJOR_VERSION < 3
# undef PyCapsule_CheckExact
# undef PyCapsule_GetPointer
# define PyCapsule_CheckExact(capsule) (PyCObject_Check(capsule))
# define PyCapsule_GetPointer(capsule, name) \
(PyCObject_AsVoidPtr(capsule))
#endif
#if PY_MAJOR_VERSION >= 3
# define PyInt_FromLong PyLong_FromLong
#endif
#define _cffi_from_c_double PyFloat_FromDouble
#define _cffi_from_c_float PyFloat_FromDouble
#define _cffi_from_c_long PyInt_FromLong
#define _cffi_from_c_ulong PyLong_FromUnsignedLong
#define _cffi_from_c_longlong PyLong_FromLongLong
#define _cffi_from_c_ulonglong PyLong_FromUnsignedLongLong
#define _cffi_from_c__Bool PyBool_FromLong
#define _cffi_to_c_double PyFloat_AsDouble
#define _cffi_to_c_float PyFloat_AsDouble
#define _cffi_from_c_int_const(x) \
(((x) > 0) ? \
((unsigned long long)(x) <= (unsigned long long)LONG_MAX) ? \
PyInt_FromLong((long)(x)) : \
PyLong_FromUnsignedLongLong((unsigned long long)(x)) : \
((long long)(x) >= (long long)LONG_MIN) ? \
PyInt_FromLong((long)(x)) : \
PyLong_FromLongLong((long long)(x)))
#define _cffi_from_c_int(x, type) \
(((type)-1) > 0 ? /* unsigned */ \
(sizeof(type) < sizeof(long) ? \
PyInt_FromLong((long)x) : \
sizeof(type) == sizeof(long) ? \
PyLong_FromUnsignedLong((unsigned long)x) : \
PyLong_FromUnsignedLongLong((unsigned long long)x)) : \
(sizeof(type) <= sizeof(long) ? \
PyInt_FromLong((long)x) : \
PyLong_FromLongLong((long long)x)))
#define _cffi_to_c_int(o, type) \
((type)( \
sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \
: (type)_cffi_to_c_i8(o)) : \
sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \
: (type)_cffi_to_c_i16(o)) : \
sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \
: (type)_cffi_to_c_i32(o)) : \
sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \
: (type)_cffi_to_c_i64(o)) : \
(Py_FatalError("unsupported size for type " #type), (type)0)))
#define _cffi_to_c_i8 \
((int(*)(PyObject *))_cffi_exports[1])
#define _cffi_to_c_u8 \
((int(*)(PyObject *))_cffi_exports[2])
#define _cffi_to_c_i16 \
((int(*)(PyObject *))_cffi_exports[3])
#define _cffi_to_c_u16 \
((int(*)(PyObject *))_cffi_exports[4])
#define _cffi_to_c_i32 \
((int(*)(PyObject *))_cffi_exports[5])
#define _cffi_to_c_u32 \
((unsigned int(*)(PyObject *))_cffi_exports[6])
#define _cffi_to_c_i64 \
((long long(*)(PyObject *))_cffi_exports[7])
#define _cffi_to_c_u64 \
((unsigned long long(*)(PyObject *))_cffi_exports[8])
#define _cffi_to_c_char \
((int(*)(PyObject *))_cffi_exports[9])
#define _cffi_from_c_pointer \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[10])
#define _cffi_to_c_pointer \
((char *(*)(PyObject *, CTypeDescrObject *))_cffi_exports[11])
#define _cffi_get_struct_layout \
((PyObject *(*)(Py_ssize_t[]))_cffi_exports[12])
#define _cffi_restore_errno \
((void(*)(void))_cffi_exports[13])
#define _cffi_save_errno \
((void(*)(void))_cffi_exports[14])
#define _cffi_from_c_char \
((PyObject *(*)(char))_cffi_exports[15])
#define _cffi_from_c_deref \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[16])
#define _cffi_to_c \
((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[17])
#define _cffi_from_c_struct \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[18])
#define _cffi_to_c_wchar_t \
((wchar_t(*)(PyObject *))_cffi_exports[19])
#define _cffi_from_c_wchar_t \
((PyObject *(*)(wchar_t))_cffi_exports[20])
#define _cffi_to_c_long_double \
((long double(*)(PyObject *))_cffi_exports[21])
#define _cffi_to_c__Bool \
((_Bool(*)(PyObject *))_cffi_exports[22])
#define _cffi_prepare_pointer_call_argument \
((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23])
#define _cffi_convert_array_from_object \
((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24])
#define _CFFI_NUM_EXPORTS 25
typedef struct _ctypedescr CTypeDescrObject;
static void *_cffi_exports[_CFFI_NUM_EXPORTS];
static PyObject *_cffi_types, *_cffi_VerificationError;
static int _cffi_setup_custom(PyObject *lib); /* forward */
static PyObject *_cffi_setup(PyObject *self, PyObject *args)
{
PyObject *library;
int was_alive = (_cffi_types != NULL);
(void)self; /* unused */
if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError,
&library))
return NULL;
Py_INCREF(_cffi_types);
Py_INCREF(_cffi_VerificationError);
if (_cffi_setup_custom(library) < 0)
return NULL;
return PyBool_FromLong(was_alive);
}
union _cffi_union_alignment_u {
unsigned char m_char;
unsigned short m_short;
unsigned int m_int;
unsigned long m_long;
unsigned long long m_longlong;
float m_float;
double m_double;
long double m_longdouble;
};
struct _cffi_freeme_s {
struct _cffi_freeme_s *next;
union _cffi_union_alignment_u alignment;
};
#ifdef __GNUC__
__attribute__((unused))
#endif
static int _cffi_convert_array_argument(CTypeDescrObject *ctptr, PyObject *arg,
char **output_data, Py_ssize_t datasize,
struct _cffi_freeme_s **freeme)
{
char *p;
if (datasize < 0)
return -1;
p = *output_data;
if (p == NULL) {
struct _cffi_freeme_s *fp = (struct _cffi_freeme_s *)PyObject_Malloc(
offsetof(struct _cffi_freeme_s, alignment) + (size_t)datasize);
if (fp == NULL)
return -1;
fp->next = *freeme;
*freeme = fp;
p = *output_data = (char *)&fp->alignment;
}
memset((void *)p, 0, (size_t)datasize);
return _cffi_convert_array_from_object(p, ctptr, arg);
}
#ifdef __GNUC__
__attribute__((unused))
#endif
static void _cffi_free_array_arguments(struct _cffi_freeme_s *freeme)
{
do {
void *p = (void *)freeme;
freeme = freeme->next;
PyObject_Free(p);
} while (freeme != NULL);
}
static int _cffi_init(void)
{
PyObject *module, *c_api_object = NULL;
module = PyImport_ImportModule("_cffi_backend");
if (module == NULL)
goto failure;
c_api_object = PyObject_GetAttrString(module, "_C_API");
if (c_api_object == NULL)
goto failure;
if (!PyCapsule_CheckExact(c_api_object)) {
PyErr_SetNone(PyExc_ImportError);
goto failure;
}
memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"),
_CFFI_NUM_EXPORTS * sizeof(void *));
Py_DECREF(module);
Py_DECREF(c_api_object);
return 0;
failure:
Py_XDECREF(module);
Py_XDECREF(c_api_object);
return -1;
}
#define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num))
/**********/
'''
| xyuanmu/XX-Net | python3.8.2/Lib/site-packages/cffi/vengine_cpy.py | Python | bsd-2-clause | 43,314 |
#
# Copyright (c) 2011, Regents of the University of California
# BSD license, See the COPYING file for more information
# Written by: Derek Kulinski <takeda@takeda.tk>
#
import curses, curses.wrapper, curses.textpad, threading, time, sys
from ChatNet import ChatNet, ChatServer
class ChatGUI(object):
def __init__(self, prefix):
self.prefix = prefix
self.stdscr = None
self.max_size = None
self.chat_sc_border = None
self.chat_sc = None
self.input_sc_border = None
self.input_sc = None
self.textbox = None
def window_setup(self):
self.max_size = self.stdscr.getmaxyx()
max_y, max_x = self.max_size
# Input
self.input_sc_border = curses.newwin(3, max_x, max_y - 3, 0)
self.input_sc_border.border()
self.input_sc_border.noutrefresh()
self.input_sc = curses.newwin(1, max_x - 2, max_y - 2, 1)
self.textbox = curses.textpad.Textbox(self.input_sc)
# Output
self.chat_sc_border = curses.newwin(max_y - 3, max_x)
self.chat_sc_border.border()
self.chat_sc_border.noutrefresh()
self.chat_sc = curses.newwin(max_y - 5, max_x - 2, 1, 1)
self.chat_sc.scrollok(True)
self.chat_sc.noutrefresh()
def write(self, text):
self.chat_sc.addstr(text + "\n")
self.chat_sc.noutrefresh()
def callback(self, nick, text):
self.write("<%s> %s" % (nick, text))
curses.doupdate()
def input_thread(self):
server = ChatServer(self.prefix)
thread = threading.Thread(target=server.listen)
thread.start()
while True:
text = self.textbox.edit()
self.input_sc.erase()
if text == "":
continue
#self.write(text)
server.send_message(text)
def curses_code(self, stdscr):
self.stdscr = stdscr
self.window_setup()
curses.doupdate()
chatnet = ChatNet(self.prefix, self.callback)
thread = threading.Thread(target=self.input_thread)
thread.start()
while True:
chatnet.pullData()
time.sleep(1)
def usage():
#print(("Usage: %s <URI>" % sys.argv[0]), file=sys.stderr)
sys.stderr.write("Usage: %s <URI>\n" % sys.argv[0])
sys.exit(1)
if __name__ == '__main__':
if len(sys.argv) != 2:
usage()
gui = ChatGUI(sys.argv[1])
curses.wrapper(gui.curses_code)
| cawka/packaging-PyNDN | examples/ndnChat/chat.py | Python | bsd-3-clause | 2,131 |
"""
********
Matching
********
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
# Copyright (C) 2011 by
# Nicholas Mancuso <nick.mancuso@gmail.com>
# All rights reserved.
# BSD license.
from itertools import repeat
__author__ = """\n""".join(['Joris van Rantwijk',
'Nicholas Mancuso (nick.mancuso@gmail.com)'])
__all__ = ['max_weight_matching', 'maximal_matching']
def maximal_matching(G):
r"""Find a maximal cardinality matching in the graph.
A matching is a subset of edges in which no node occurs more than once.
The cardinality of a matching is the number of matched edges.
Parameters
----------
G : NetworkX graph
Undirected graph
Returns
-------
matching : set
A maximal matching of the graph.
Notes
-----
The algorithm greedily selects a maximal matching M of the graph G
(i.e. no superset of M exists). It runs in `O(|E|)` time.
"""
matching = set([])
edges = set([])
for u,v in G.edges():
# If the edge isn't covered, add it to the matching
# then remove neighborhood of u and v from consideration.
if (u,v) not in edges and (v,u) not in edges:
matching.add((u,v))
edges |= set(G.edges(u))
edges |= set(G.edges(v))
return matching
def max_weight_matching(G, maxcardinality=False):
"""Compute a maximum-weighted matching of G.
A matching is a subset of edges in which no node occurs more than once.
The cardinality of a matching is the number of matched edges.
The weight of a matching is the sum of the weights of its edges.
Parameters
----------
G : NetworkX graph
Undirected graph
maxcardinality: bool, optional
If maxcardinality is True, compute the maximum-cardinality matching
with maximum weight among all maximum-cardinality matchings.
Returns
-------
mate : dictionary
The matching is returned as a dictionary, mate, such that
mate[v] == w if node v is matched to node w. Unmatched nodes do not
occur as a key in mate.
Notes
------
If G has edges with 'weight' attribute the edge data are used as
weight values else the weights are assumed to be 1.
This function takes time O(number_of_nodes ** 3).
If all edge weights are integers, the algorithm uses only integer
computations. If floating point weights are used, the algorithm
could return a slightly suboptimal matching due to numeric
precision errors.
This method is based on the "blossom" method for finding augmenting
paths and the "primal-dual" method for finding a matching of maximum
weight, both methods invented by Jack Edmonds [1]_.
Bipartite graphs can also be matched using the functions present in
:mod:`networkx.algorithms.bipartite.matching`.
References
----------
.. [1] "Efficient Algorithms for Finding Maximum Matching in Graphs",
Zvi Galil, ACM Computing Surveys, 1986.
"""
#
# The algorithm is taken from "Efficient Algorithms for Finding Maximum
# Matching in Graphs" by Zvi Galil, ACM Computing Surveys, 1986.
# It is based on the "blossom" method for finding augmenting paths and
# the "primal-dual" method for finding a matching of maximum weight, both
# methods invented by Jack Edmonds.
#
# A C program for maximum weight matching by Ed Rothberg was used
# extensively to validate this new code.
#
# Many terms used in the code comments are explained in the paper
# by Galil. You will probably need the paper to make sense of this code.
#
class NoNode:
"""Dummy value which is different from any node."""
pass
class Blossom:
"""Representation of a non-trivial blossom or sub-blossom."""
__slots__ = [ 'childs', 'edges', 'mybestedges' ]
# b.childs is an ordered list of b's sub-blossoms, starting with
# the base and going round the blossom.
# b.edges is the list of b's connecting edges, such that
# b.edges[i] = (v, w) where v is a vertex in b.childs[i]
# and w is a vertex in b.childs[wrap(i+1)].
# If b is a top-level S-blossom,
# b.mybestedges is a list of least-slack edges to neighbouring
# S-blossoms, or None if no such list has been computed yet.
# This is used for efficient computation of delta3.
# Generate the blossom's leaf vertices.
def leaves(self):
for t in self.childs:
if isinstance(t, Blossom):
for v in t.leaves():
yield v
else:
yield t
# Get a list of vertices.
gnodes = list(G)
if not gnodes:
return { } # don't bother with empty graphs
# Find the maximum edge weight.
maxweight = 0
allinteger = True
for i,j,d in G.edges(data=True):
wt=d.get('weight',1)
if i != j and wt > maxweight:
maxweight = wt
allinteger = allinteger and (str(type(wt)).split("'")[1]
in ('int', 'long'))
# If v is a matched vertex, mate[v] is its partner vertex.
# If v is a single vertex, v does not occur as a key in mate.
# Initially all vertices are single; updated during augmentation.
mate = { }
# If b is a top-level blossom,
# label.get(b) is None if b is unlabeled (free),
# 1 if b is an S-blossom,
# 2 if b is a T-blossom.
# The label of a vertex is found by looking at the label of its top-level
# containing blossom.
# If v is a vertex inside a T-blossom, label[v] is 2 iff v is reachable
# from an S-vertex outside the blossom.
# Labels are assigned during a stage and reset after each augmentation.
label = { }
# If b is a labeled top-level blossom,
# labeledge[b] = (v, w) is the edge through which b obtained its label
# such that w is a vertex in b, or None if b's base vertex is single.
# If w is a vertex inside a T-blossom and label[w] == 2,
# labeledge[w] = (v, w) is an edge through which w is reachable from
# outside the blossom.
labeledge = { }
# If v is a vertex, inblossom[v] is the top-level blossom to which v
# belongs.
# If v is a top-level vertex, inblossom[v] == v since v is itself
# a (trivial) top-level blossom.
# Initially all vertices are top-level trivial blossoms.
inblossom = dict(zip(gnodes, gnodes))
# If b is a sub-blossom,
# blossomparent[b] is its immediate parent (sub-)blossom.
# If b is a top-level blossom, blossomparent[b] is None.
blossomparent = dict(zip(gnodes, repeat(None)))
# If b is a (sub-)blossom,
# blossombase[b] is its base VERTEX (i.e. recursive sub-blossom).
blossombase = dict(zip(gnodes, gnodes))
# If w is a free vertex (or an unreached vertex inside a T-blossom),
# bestedge[w] = (v, w) is the least-slack edge from an S-vertex,
# or None if there is no such edge.
# If b is a (possibly trivial) top-level S-blossom,
# bestedge[b] = (v, w) is the least-slack edge to a different S-blossom
# (v inside b), or None if there is no such edge.
# This is used for efficient computation of delta2 and delta3.
bestedge = { }
# If v is a vertex,
# dualvar[v] = 2 * u(v) where u(v) is the v's variable in the dual
# optimization problem (if all edge weights are integers, multiplication
# by two ensures that all values remain integers throughout the algorithm).
# Initially, u(v) = maxweight / 2.
dualvar = dict(zip(gnodes, repeat(maxweight)))
# If b is a non-trivial blossom,
# blossomdual[b] = z(b) where z(b) is b's variable in the dual
# optimization problem.
blossomdual = { }
# If (v, w) in allowedge or (w, v) in allowedg, then the edge
# (v, w) is known to have zero slack in the optimization problem;
# otherwise the edge may or may not have zero slack.
allowedge = { }
# Queue of newly discovered S-vertices.
queue = [ ]
# Return 2 * slack of edge (v, w) (does not work inside blossoms).
def slack(v, w):
return dualvar[v] + dualvar[w] - 2 * G[v][w].get('weight',1)
# Assign label t to the top-level blossom containing vertex w,
# coming through an edge from vertex v.
def assignLabel(w, t, v):
b = inblossom[w]
assert label.get(w) is None and label.get(b) is None
label[w] = label[b] = t
if v is not None:
labeledge[w] = labeledge[b] = (v, w)
else:
labeledge[w] = labeledge[b] = None
bestedge[w] = bestedge[b] = None
if t == 1:
# b became an S-vertex/blossom; add it(s vertices) to the queue.
if isinstance(b, Blossom):
queue.extend(b.leaves())
else:
queue.append(b)
elif t == 2:
# b became a T-vertex/blossom; assign label S to its mate.
# (If b is a non-trivial blossom, its base is the only vertex
# with an external mate.)
base = blossombase[b]
assignLabel(mate[base], 1, base)
# Trace back from vertices v and w to discover either a new blossom
# or an augmenting path. Return the base vertex of the new blossom,
# or NoNode if an augmenting path was found.
def scanBlossom(v, w):
# Trace back from v and w, placing breadcrumbs as we go.
path = [ ]
base = NoNode
while v is not NoNode:
# Look for a breadcrumb in v's blossom or put a new breadcrumb.
b = inblossom[v]
if label[b] & 4:
base = blossombase[b]
break
assert label[b] == 1
path.append(b)
label[b] = 5
# Trace one step back.
if labeledge[b] is None:
# The base of blossom b is single; stop tracing this path.
assert blossombase[b] not in mate
v = NoNode
else:
assert labeledge[b][0] == mate[blossombase[b]]
v = labeledge[b][0]
b = inblossom[v]
assert label[b] == 2
# b is a T-blossom; trace one more step back.
v = labeledge[b][0]
# Swap v and w so that we alternate between both paths.
if w is not NoNode:
v, w = w, v
# Remove breadcrumbs.
for b in path:
label[b] = 1
# Return base vertex, if we found one.
return base
# Construct a new blossom with given base, through S-vertices v and w.
# Label the new blossom as S; set its dual variable to zero;
# relabel its T-vertices to S and add them to the queue.
def addBlossom(base, v, w):
bb = inblossom[base]
bv = inblossom[v]
bw = inblossom[w]
# Create blossom.
b = Blossom()
blossombase[b] = base
blossomparent[b] = None
blossomparent[bb] = b
# Make list of sub-blossoms and their interconnecting edge endpoints.
b.childs = path = [ ]
b.edges = edgs = [ (v, w) ]
# Trace back from v to base.
while bv != bb:
# Add bv to the new blossom.
blossomparent[bv] = b
path.append(bv)
edgs.append(labeledge[bv])
assert label[bv] == 2 or (label[bv] == 1 and labeledge[bv][0] == mate[blossombase[bv]])
# Trace one step back.
v = labeledge[bv][0]
bv = inblossom[v]
# Add base sub-blossom; reverse lists.
path.append(bb)
path.reverse()
edgs.reverse()
# Trace back from w to base.
while bw != bb:
# Add bw to the new blossom.
blossomparent[bw] = b
path.append(bw)
edgs.append((labeledge[bw][1], labeledge[bw][0]))
assert label[bw] == 2 or (label[bw] == 1 and labeledge[bw][0] == mate[blossombase[bw]])
# Trace one step back.
w = labeledge[bw][0]
bw = inblossom[w]
# Set label to S.
assert label[bb] == 1
label[b] = 1
labeledge[b] = labeledge[bb]
# Set dual variable to zero.
blossomdual[b] = 0
# Relabel vertices.
for v in b.leaves():
if label[inblossom[v]] == 2:
# This T-vertex now turns into an S-vertex because it becomes
# part of an S-blossom; add it to the queue.
queue.append(v)
inblossom[v] = b
# Compute b.mybestedges.
bestedgeto = { }
for bv in path:
if isinstance(bv, Blossom):
if bv.mybestedges is not None:
# Walk this subblossom's least-slack edges.
nblist = bv.mybestedges
# The sub-blossom won't need this data again.
bv.mybestedges = None
else:
# This subblossom does not have a list of least-slack
# edges; get the information from the vertices.
nblist = [ (v, w)
for v in bv.leaves()
for w in G.neighbors(v)
if v != w ]
else:
nblist = [ (bv, w)
for w in G.neighbors(bv)
if bv != w ]
for k in nblist:
(i, j) = k
if inblossom[j] == b:
i, j = j, i
bj = inblossom[j]
if (bj != b and label.get(bj) == 1 and
((bj not in bestedgeto) or
slack(i, j) < slack(*bestedgeto[bj]))):
bestedgeto[bj] = k
# Forget about least-slack edge of the subblossom.
bestedge[bv] = None
b.mybestedges = list(bestedgeto.values())
# Select bestedge[b].
mybestedge = None
bestedge[b] = None
for k in b.mybestedges:
kslack = slack(*k)
if mybestedge is None or kslack < mybestslack:
mybestedge = k
mybestslack = kslack
bestedge[b] = mybestedge
# Expand the given top-level blossom.
def expandBlossom(b, endstage):
# Convert sub-blossoms into top-level blossoms.
for s in b.childs:
blossomparent[s] = None
if isinstance(s, Blossom):
if endstage and blossomdual[s] == 0:
# Recursively expand this sub-blossom.
expandBlossom(s, endstage)
else:
for v in s.leaves():
inblossom[v] = s
else:
inblossom[s] = s
# If we expand a T-blossom during a stage, its sub-blossoms must be
# relabeled.
if (not endstage) and label.get(b) == 2:
# Start at the sub-blossom through which the expanding
# blossom obtained its label, and relabel sub-blossoms untili
# we reach the base.
# Figure out through which sub-blossom the expanding blossom
# obtained its label initially.
entrychild = inblossom[labeledge[b][1]]
# Decide in which direction we will go round the blossom.
j = b.childs.index(entrychild)
if j & 1:
# Start index is odd; go forward and wrap.
j -= len(b.childs)
jstep = 1
else:
# Start index is even; go backward.
jstep = -1
# Move along the blossom until we get to the base.
v, w = labeledge[b]
while j != 0:
# Relabel the T-sub-blossom.
if jstep == 1:
p, q = b.edges[j]
else:
q, p = b.edges[j-1]
label[w] = None
label[q] = None
assignLabel(w, 2, v)
# Step to the next S-sub-blossom and note its forward edge.
allowedge[(p, q)] = allowedge[(q, p)] = True
j += jstep
if jstep == 1:
v, w = b.edges[j]
else:
w, v = b.edges[j-1]
# Step to the next T-sub-blossom.
allowedge[(v, w)] = allowedge[(w, v)] = True
j += jstep
# Relabel the base T-sub-blossom WITHOUT stepping through to
# its mate (so don't call assignLabel).
bw = b.childs[j]
label[w] = label[bw] = 2
labeledge[w] = labeledge[bw] = (v, w)
bestedge[bw] = None
# Continue along the blossom until we get back to entrychild.
j += jstep
while b.childs[j] != entrychild:
# Examine the vertices of the sub-blossom to see whether
# it is reachable from a neighbouring S-vertex outside the
# expanding blossom.
bv = b.childs[j]
if label.get(bv) == 1:
# This sub-blossom just got label S through one of its
# neighbours; leave it be.
j += jstep
continue
if isinstance(bv, Blossom):
for v in bv.leaves():
if label.get(v):
break
else:
v = bv
# If the sub-blossom contains a reachable vertex, assign
# label T to the sub-blossom.
if label.get(v):
assert label[v] == 2
assert inblossom[v] == bv
label[v] = None
label[mate[blossombase[bv]]] = None
assignLabel(v, 2, labeledge[v][0])
j += jstep
# Remove the expanded blossom entirely.
label.pop(b, None)
labeledge.pop(b, None)
bestedge.pop(b, None)
del blossomparent[b]
del blossombase[b]
del blossomdual[b]
# Swap matched/unmatched edges over an alternating path through blossom b
# between vertex v and the base vertex. Keep blossom bookkeeping consistent.
def augmentBlossom(b, v):
# Bubble up through the blossom tree from vertex v to an immediate
# sub-blossom of b.
t = v
while blossomparent[t] != b:
t = blossomparent[t]
# Recursively deal with the first sub-blossom.
if isinstance(t, Blossom):
augmentBlossom(t, v)
# Decide in which direction we will go round the blossom.
i = j = b.childs.index(t)
if i & 1:
# Start index is odd; go forward and wrap.
j -= len(b.childs)
jstep = 1
else:
# Start index is even; go backward.
jstep = -1
# Move along the blossom until we get to the base.
while j != 0:
# Step to the next sub-blossom and augment it recursively.
j += jstep
t = b.childs[j]
if jstep == 1:
w, x = b.edges[j]
else:
x, w = b.edges[j-1]
if isinstance(t, Blossom):
augmentBlossom(t, w)
# Step to the next sub-blossom and augment it recursively.
j += jstep
t = b.childs[j]
if isinstance(t, Blossom):
augmentBlossom(t, x)
# Match the edge connecting those sub-blossoms.
mate[w] = x
mate[x] = w
# Rotate the list of sub-blossoms to put the new base at the front.
b.childs = b.childs[i:] + b.childs[:i]
b.edges = b.edges[i:] + b.edges[:i]
blossombase[b] = blossombase[b.childs[0]]
assert blossombase[b] == v
# Swap matched/unmatched edges over an alternating path between two
# single vertices. The augmenting path runs through S-vertices v and w.
def augmentMatching(v, w):
for (s, j) in ((v, w), (w, v)):
# Match vertex s to vertex j. Then trace back from s
# until we find a single vertex, swapping matched and unmatched
# edges as we go.
while 1:
bs = inblossom[s]
assert label[bs] == 1
assert (labeledge[bs] is None and blossombase[bs] not in mate) or (labeledge[bs][0] == mate[blossombase[bs]])
# Augment through the S-blossom from s to base.
if isinstance(bs, Blossom):
augmentBlossom(bs, s)
# Update mate[s]
mate[s] = j
# Trace one step back.
if labeledge[bs] is None:
# Reached single vertex; stop.
break
t = labeledge[bs][0]
bt = inblossom[t]
assert label[bt] == 2
# Trace one more step back.
s, j = labeledge[bt]
# Augment through the T-blossom from j to base.
assert blossombase[bt] == t
if isinstance(bt, Blossom):
augmentBlossom(bt, j)
# Update mate[j]
mate[j] = s
# Verify that the optimum solution has been reached.
def verifyOptimum():
if maxcardinality:
# Vertices may have negative dual;
# find a constant non-negative number to add to all vertex duals.
vdualoffset = max(0, -min(dualvar.values()))
else:
vdualoffset = 0
# 0. all dual variables are non-negative
assert min(dualvar.values()) + vdualoffset >= 0
assert len(blossomdual) == 0 or min(blossomdual.values()) >= 0
# 0. all edges have non-negative slack and
# 1. all matched edges have zero slack;
for i,j,d in G.edges(data=True):
wt=d.get('weight',1)
if i == j:
continue # ignore self-loops
s = dualvar[i] + dualvar[j] - 2 * wt
iblossoms = [ i ]
jblossoms = [ j ]
while blossomparent[iblossoms[-1]] is not None:
iblossoms.append(blossomparent[iblossoms[-1]])
while blossomparent[jblossoms[-1]] is not None:
jblossoms.append(blossomparent[jblossoms[-1]])
iblossoms.reverse()
jblossoms.reverse()
for (bi, bj) in zip(iblossoms, jblossoms):
if bi != bj:
break
s += 2 * blossomdual[bi]
assert s >= 0
if mate.get(i) == j or mate.get(j) == i:
assert mate[i] == j and mate[j] == i
assert s == 0
# 2. all single vertices have zero dual value;
for v in gnodes:
assert (v in mate) or dualvar[v] + vdualoffset == 0
# 3. all blossoms with positive dual value are full.
for b in blossomdual:
if blossomdual[b] > 0:
assert len(b.edges) % 2 == 1
for (i, j) in b.edges[1::2]:
assert mate[i] == j and mate[j] == i
# Ok.
# Main loop: continue until no further improvement is possible.
while 1:
# Each iteration of this loop is a "stage".
# A stage finds an augmenting path and uses that to improve
# the matching.
# Remove labels from top-level blossoms/vertices.
label.clear()
labeledge.clear()
# Forget all about least-slack edges.
bestedge.clear()
for b in blossomdual:
b.mybestedges = None
# Loss of labeling means that we can not be sure that currently
# allowable edges remain allowable througout this stage.
allowedge.clear()
# Make queue empty.
queue[:] = [ ]
# Label single blossoms/vertices with S and put them in the queue.
for v in gnodes:
if (v not in mate) and label.get(inblossom[v]) is None:
assignLabel(v, 1, None)
# Loop until we succeed in augmenting the matching.
augmented = 0
while 1:
# Each iteration of this loop is a "substage".
# A substage tries to find an augmenting path;
# if found, the path is used to improve the matching and
# the stage ends. If there is no augmenting path, the
# primal-dual method is used to pump some slack out of
# the dual variables.
# Continue labeling until all vertices which are reachable
# through an alternating path have got a label.
while queue and not augmented:
# Take an S vertex from the queue.
v = queue.pop()
assert label[inblossom[v]] == 1
# Scan its neighbours:
for w in G.neighbors(v):
if w == v:
continue # ignore self-loops
# w is a neighbour to v
bv = inblossom[v]
bw = inblossom[w]
if bv == bw:
# this edge is internal to a blossom; ignore it
continue
if (v, w) not in allowedge:
kslack = slack(v, w)
if kslack <= 0:
# edge k has zero slack => it is allowable
allowedge[(v, w)] = allowedge[(w, v)] = True
if (v, w) in allowedge:
if label.get(bw) is None:
# (C1) w is a free vertex;
# label w with T and label its mate with S (R12).
assignLabel(w, 2, v)
elif label.get(bw) == 1:
# (C2) w is an S-vertex (not in the same blossom);
# follow back-links to discover either an
# augmenting path or a new blossom.
base = scanBlossom(v, w)
if base is not NoNode:
# Found a new blossom; add it to the blossom
# bookkeeping and turn it into an S-blossom.
addBlossom(base, v, w)
else:
# Found an augmenting path; augment the
# matching and end this stage.
augmentMatching(v, w)
augmented = 1
break
elif label.get(w) is None:
# w is inside a T-blossom, but w itself has not
# yet been reached from outside the blossom;
# mark it as reached (we need this to relabel
# during T-blossom expansion).
assert label[bw] == 2
label[w] = 2
labeledge[w] = (v, w)
elif label.get(bw) == 1:
# keep track of the least-slack non-allowable edge to
# a different S-blossom.
if bestedge.get(bv) is None or kslack < slack(*bestedge[bv]):
bestedge[bv] = (v, w)
elif label.get(w) is None:
# w is a free vertex (or an unreached vertex inside
# a T-blossom) but we can not reach it yet;
# keep track of the least-slack edge that reaches w.
if bestedge.get(w) is None or kslack < slack(*bestedge[w]):
bestedge[w] = (v, w)
if augmented:
break
# There is no augmenting path under these constraints;
# compute delta and reduce slack in the optimization problem.
# (Note that our vertex dual variables, edge slacks and delta's
# are pre-multiplied by two.)
deltatype = -1
delta = deltaedge = deltablossom = None
# Compute delta1: the minumum value of any vertex dual.
if not maxcardinality:
deltatype = 1
delta = min(dualvar.values())
# Compute delta2: the minimum slack on any edge between
# an S-vertex and a free vertex.
for v in G.nodes():
if label.get(inblossom[v]) is None and bestedge.get(v) is not None:
d = slack(*bestedge[v])
if deltatype == -1 or d < delta:
delta = d
deltatype = 2
deltaedge = bestedge[v]
# Compute delta3: half the minimum slack on any edge between
# a pair of S-blossoms.
for b in blossomparent:
if ( blossomparent[b] is None and label.get(b) == 1 and
bestedge.get(b) is not None ):
kslack = slack(*bestedge[b])
if allinteger:
assert (kslack % 2) == 0
d = kslack // 2
else:
d = kslack / 2.0
if deltatype == -1 or d < delta:
delta = d
deltatype = 3
deltaedge = bestedge[b]
# Compute delta4: minimum z variable of any T-blossom.
for b in blossomdual:
if ( blossomparent[b] is None and label.get(b) == 2 and
(deltatype == -1 or blossomdual[b] < delta) ):
delta = blossomdual[b]
deltatype = 4
deltablossom = b
if deltatype == -1:
# No further improvement possible; max-cardinality optimum
# reached. Do a final delta update to make the optimum
# verifyable.
assert maxcardinality
deltatype = 1
delta = max(0, min(dualvar.values()))
# Update dual variables according to delta.
for v in gnodes:
if label.get(inblossom[v]) == 1:
# S-vertex: 2*u = 2*u - 2*delta
dualvar[v] -= delta
elif label.get(inblossom[v]) == 2:
# T-vertex: 2*u = 2*u + 2*delta
dualvar[v] += delta
for b in blossomdual:
if blossomparent[b] is None:
if label.get(b) == 1:
# top-level S-blossom: z = z + 2*delta
blossomdual[b] += delta
elif label.get(b) == 2:
# top-level T-blossom: z = z - 2*delta
blossomdual[b] -= delta
# Take action at the point where minimum delta occurred.
if deltatype == 1:
# No further improvement possible; optimum reached.
break
elif deltatype == 2:
# Use the least-slack edge to continue the search.
(v, w) = deltaedge
assert label[inblossom[v]] == 1
allowedge[(v, w)] = allowedge[(w, v)] = True
queue.append(v)
elif deltatype == 3:
# Use the least-slack edge to continue the search.
(v, w) = deltaedge
allowedge[(v, w)] = allowedge[(w, v)] = True
assert label[inblossom[v]] == 1
queue.append(v)
elif deltatype == 4:
# Expand the least-z blossom.
expandBlossom(deltablossom, False)
# End of a this substage.
# Paranoia check that the matching is symmetric.
for v in mate:
assert mate[mate[v]] == v
# Stop when no more augmenting path can be found.
if not augmented:
break
# End of a stage; expand all S-blossoms which have zero dual.
for b in list(blossomdual.keys()):
if b not in blossomdual:
continue # already expanded
if ( blossomparent[b] is None and label.get(b) == 1 and
blossomdual[b] == 0 ):
expandBlossom(b, True)
# Verify that we reached the optimum solution (only for integer weights).
if allinteger:
verifyOptimum()
return mate
| ltiao/networkx | networkx/algorithms/matching.py | Python | bsd-3-clause | 32,997 |
import pytest
from github3 import AuthenticationFailed, GitHubError
from github3.github import GitHub
from .helper import UnitHelper, UnitIteratorHelper
def url_for(path=''):
"""Simple function to generate URLs with the base GitHub URL."""
return 'https://api.github.com/' + path.strip('/')
class TestGitHub(UnitHelper):
described_class = GitHub
example_data = None
def test_authorization(self):
"""Show that a user can retrieve a specific authorization by id."""
self.instance.authorization(10)
self.session.get.assert_called_once_with(
url_for('authorizations/10'),
)
def test_authorize(self):
"""Show an authorization can be created for a user."""
self.instance.authorize('username', 'password', ['user', 'repo'])
self.session.temporary_basic_auth.assert_called_once_with(
'username', 'password'
)
self.post_called_with(
url_for('authorizations'),
data={'note': '', 'note_url': '', 'client_id': '',
'client_secret': '', 'scopes': ['user', 'repo']}
)
def test_check_authorization(self):
"""Test an app's ability to check a authorization token."""
self.instance.set_client_id('client-id', 'client-secret')
self.instance.check_authorization('super-fake-access-token')
self.session.get.assert_called_once_with(
url_for('applications/client-id/tokens/super-fake-access-token'),
params={'client_id': None, 'client_secret': None},
auth=('client-id', 'client-secret')
)
def test_create_gist(self):
"""Test the request to create a gist."""
self.instance.create_gist('description', {
'example.py': {'content': '# example contents'}
})
self.post_called_with(
url_for('gists'),
data={
'description': 'description',
'files': {
'example.py': {
'content': '# example contents'
}
},
'public': True,
}
)
def test_create_key(self):
"""Test the request to create a key."""
self.instance.create_key('key_name', 'key text')
self.post_called_with(
url_for('user/keys'),
data={
'title': 'key_name',
'key': 'key text'
}
)
def test_create_key_requires_a_key(self):
"""Test that no request is made with an empty key."""
self.instance.create_key('title', '')
assert self.session.post.called is False
def test_create_key_requires_a_title(self):
"""Test that no request is made with an empty title."""
self.instance.create_key('', 'key text')
assert self.session.post.called is False
def test_create_repository(self):
"""Test the request to create a repository."""
self.instance.create_repository('repo-name')
self.post_called_with(
url_for('user/repos'),
data={
'name': 'repo-name',
'description': '',
'homepage': '',
'private': False,
'has_issues': True,
'has_wiki': True,
'auto_init': False,
'gitignore_template': ''
}
)
def test_emojis(self):
"""Test the request to retrieve GitHub's emojis."""
self.instance.emojis()
self.session.get.assert_called_once_with(url_for('emojis'))
def test_follow(self):
"""Test the request to follow a user."""
self.instance.follow('username')
self.session.put.assert_called_once_with(
url_for('user/following/username')
)
def test_follow_requires_a_username(self):
"""Test that GitHub#follow requires a username."""
self.instance.follow(None)
assert self.session.put.called is False
def test_gist(self):
"""Test the request to retrieve a specific gist."""
self.instance.gist(10)
self.session.get.assert_called_once_with(url_for('gists/10'))
def test_gitignore_template(self):
"""Test the request to retrieve a gitignore template."""
self.instance.gitignore_template('Python')
self.session.get.assert_called_once_with(
url_for('gitignore/templates/Python')
)
def test_gitignore_templates(self):
"""Test the request to retrieve gitignore templates."""
self.instance.gitignore_templates()
self.session.get.assert_called_once_with(
url_for('gitignore/templates')
)
def test_is_following(self):
"""Test the request to check if the user is following a user."""
self.instance.is_following('username')
self.session.get.assert_called_once_with(
url_for('user/following/username')
)
def test_is_starred(self):
"""Test the request to check if the user starred a repository."""
self.instance.is_starred('username', 'repository')
self.session.get.assert_called_once_with(
url_for('user/starred/username/repository')
)
def test_is_starred_requires_an_owner(self):
"""Test that GitHub#is_starred requires an owner."""
self.instance.is_starred(None, 'repo')
assert self.session.get.called is False
def test_is_starred_requires_a_repo(self):
"""Test that GitHub#is_starred requires an repo."""
self.instance.is_starred('username', None)
assert self.session.get.called is False
def test_issue(self):
"""Test the request to retrieve a single issue."""
self.instance.issue('owner', 'repo', 1)
self.session.get.assert_called_once_with(
url_for('repos/owner/repo/issues/1')
)
def test_issue_requires_username(self):
"""Test GitHub#issue requires a non-None username."""
self.instance.issue(None, 'foo', 1)
assert self.session.get.called is False
def test_issue_requires_repository(self):
"""Test GitHub#issue requires a non-None repository."""
self.instance.issue('foo', None, 1)
assert self.session.get.called is False
def test_issue_requires_positive_issue_id(self):
"""Test GitHub#issue requires positive issue id."""
self.instance.issue('foo', 'bar', -1)
assert self.session.get.called is False
def test_me(self):
"""Test the ability to retrieve the authenticated user's info."""
self.instance.me()
self.session.get.assert_called_once_with(url_for('user'))
def test_repository(self):
""""Verify the GET request for a repository."""
self.instance.repository('user', 'repo')
self.session.get.assert_called_once_with(url_for('repos/user/repo'))
def test_repository_with_invalid_repo(self):
"""Verify there is no call made for invalid repo combos."""
self.instance.repository('user', None)
assert self.session.get.called is False
def test_repository_with_invalid_user(self):
"""Verify there is no call made for invalid username combos."""
self.instance.repository(None, 'repo')
assert self.session.get.called is False
def test_repository_with_invalid_user_and_repo(self):
"""Verify there is no call made for invalid user/repo combos."""
self.instance.repository(None, None)
assert self.session.get.called is False
def test_repository_with_id(self):
"""Test the ability to retrieve a repository by its id."""
self.instance.repository_with_id(10)
self.session.get.assert_called_once_with(url_for('repositories/10'))
def test_repository_with_id_requires_a_positive_id(self):
"""Test the ability to retrieve a repository by its id."""
self.instance.repository_with_id(-10)
assert self.session.get.called is False
def test_repository_with_id_accepts_a_string(self):
"""Test the ability to retrieve a repository by its id."""
self.instance.repository_with_id('10')
self.session.get.assert_called_once_with(url_for('repositories/10'))
def test_two_factor_login(self):
"""Test the ability to pass two_factor_callback."""
self.instance.login('username', 'password',
two_factor_callback=lambda *args: 'foo')
def test_can_login_without_two_factor_callback(self):
"""Test that two_factor_callback is not required."""
self.instance.login('username', 'password')
self.instance.login(token='token')
def test_update_me(self):
"""Verify the request to update the authenticated user's profile."""
self.instance.update_me(name='New name', email='email@example.com',
blog='http://blog.example.com', company='Corp',
location='here')
self.patch_called_with(
url_for('user'),
data={'name': 'New name', 'email': 'email@example.com',
'blog': 'http://blog.example.com', 'company': 'Corp',
'location': 'here', 'hireable': False}
)
def test_user(self):
"""Test that a user can retrieve information about any user."""
self.instance.user('username')
self.session.get.assert_called_once_with(
url_for('users/username'),
)
def test_user_with_id(self):
"""Test that any user's information can be retrieved by id."""
self.instance.user_with_id(10)
self.session.get.assert_called_once_with(url_for('user/10'))
def test_user_with_id_requires_a_positive_id(self):
"""Test that user_with_id requires a positive parameter."""
self.instance.user_with_id(-10)
assert self.session.get.called is False
def test_user_with_id_accepts_a_string(self):
"""Test that any user's information can be retrieved by id."""
self.instance.user_with_id('10')
self.session.get.assert_called_once_with(url_for('user/10'))
class TestGitHubIterators(UnitIteratorHelper):
described_class = GitHub
example_data = None
def test_all_events(self):
"""Show that one can iterate over all public events."""
i = self.instance.all_events()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('events'),
params={'per_page': 100},
headers={}
)
def test_all_organizations(self):
"""Show that one can iterate over all organizations."""
i = self.instance.all_organizations()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('organizations'),
params={'per_page': 100},
headers={}
)
def test_all_organizations_per_page(self):
"""Show that one can iterate over all organizations with per_page."""
i = self.instance.all_organizations(per_page=25)
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('organizations'),
params={'per_page': 25},
headers={}
)
def test_all_organizations_since(self):
"""Show that one can limit the organizations returned."""
since = 100000
i = self.instance.all_organizations(since=since)
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('organizations'),
params={'per_page': 100, 'since': since},
headers={}
)
def test_all_repositories(self):
"""Show that one can iterate over all repositories."""
i = self.instance.all_repositories()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('repositories'),
params={'per_page': 100},
headers={}
)
def test_all_repositories_per_page(self):
"""Show that one can iterate over all repositories with per_page."""
i = self.instance.all_repositories(per_page=25)
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('repositories'),
params={'per_page': 25},
headers={}
)
def test_all_repositories_since(self):
"""Show that one can limit the repositories returned."""
since = 100000
i = self.instance.all_repositories(since=since)
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('repositories'),
params={'per_page': 100, 'since': since},
headers={}
)
def test_all_users(self):
"""Show that one can iterate over all users."""
i = self.instance.all_users()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('users'),
params={'per_page': 100},
headers={}
)
def test_all_users_per_page(self):
"""Show that one can iterate over all users with per_page."""
i = self.instance.all_users(per_page=25)
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('users'),
params={'per_page': 25},
headers={}
)
def test_all_users_since(self):
"""Show that one can limit the users returned."""
since = 100000
i = self.instance.all_users(since=since)
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('users'),
params={'per_page': 100, 'since': since},
headers={}
)
def test_authorizations(self):
"""
Show that an authenticated user can iterate over their authorizations.
"""
i = self.instance.authorizations()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('authorizations'),
params={'per_page': 100},
headers={}
)
def test_emails(self):
"""Show that an authenticated user can iterate over their emails."""
i = self.instance.emails()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('user/emails'),
params={'per_page': 100},
headers={}
)
def test_followers(self):
"""
Show that an authenticated user can iterate over their followers.
"""
i = self.instance.followers()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('user/followers'),
params={'per_page': 100},
headers={}
)
def test_followers_require_auth(self):
"""Show that one needs to authenticate to use #followers."""
self.session.has_auth.return_value = False
with pytest.raises(GitHubError):
self.instance.followers()
def test_followers_of(self):
"""Show that one can authenticate over the followers of a user."""
i = self.instance.followers_of('sigmavirus24')
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('users/sigmavirus24/followers'),
params={'per_page': 100},
headers={}
)
def test_following(self):
"""
Show that an authenticated user can iterate the users they are
following.
"""
i = self.instance.following()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('user/following'),
params={'per_page': 100},
headers={}
)
def test_following_require_auth(self):
"""Show that one needs to authenticate to use #following."""
self.session.has_auth.return_value = False
with pytest.raises(GitHubError):
self.instance.following()
def test_followed_by(self):
"""
Show that one can authenticate over the users followed by another.
"""
i = self.instance.followed_by('sigmavirus24')
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('users/sigmavirus24/following'),
params={'per_page': 100},
headers={}
)
def test_gists(self):
"""Show that an authenticated user can iterate over their gists."""
i = self.instance.gists()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('gists'),
params={'per_page': 100},
headers={}
)
def test_gists_by(self):
"""Show that an user's gists can be iterated over."""
i = self.instance.gists_by('sigmavirus24')
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('users/sigmavirus24/gists'),
params={'per_page': 100},
headers={}
)
def test_issues(self):
"""Show that an authenticated user can iterate over their issues."""
i = self.instance.issues()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('issues'),
params={'per_page': 100},
headers={}
)
def test_issues_with_params(self):
"""Show that issues can be filtered."""
params = {'filter': 'assigned', 'state': 'closed', 'labels': 'bug',
'sort': 'created', 'direction': 'asc',
'since': '2012-05-20T23:10:27Z'}
p = {'per_page': 100}
p.update(params)
i = self.instance.issues(**params)
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('issues'),
params=p,
headers={}
)
def test_keys(self):
"""
Show that an authenticated user can iterate over their public keys.
"""
i = self.instance.keys()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('user/keys'),
params={'per_page': 100},
headers={}
)
def test_notifications(self):
"""
Show that an authenticated user can iterate over their notifications.
"""
i = self.instance.notifications()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('notifications'),
params={'per_page': 100},
headers={},
)
def test_notifications_participating_in(self):
"""Show that the user can filter by pariticpating."""
i = self.instance.notifications(participating=True)
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('notifications'),
params={'per_page': 100, 'participating': 'true'},
headers={}
)
def test_notifications_all(self):
"""Show that the user can iterate over all of their notifications."""
i = self.instance.notifications(all=True)
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('notifications'),
params={'per_page': 100, 'all': 'true'},
headers={}
)
def test_organization_issues(self):
"""Show that one can iterate over an organization's issues."""
i = self.instance.organization_issues('org')
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('orgs/org/issues'),
params={'per_page': 100},
headers={}
)
def test_organization_issues_with_params(self):
"""Show that one can pass parameters to #organization_issues."""
params = {'filter': 'assigned', 'state': 'closed', 'labels': 'bug',
'sort': 'created', 'direction': 'asc',
'since': '2012-05-20T23:10:27Z'}
i = self.instance.organization_issues('org', **params)
self.get_next(i)
p = {'per_page': 100}
p.update(params)
self.session.get.assert_called_once_with(
url_for('orgs/org/issues'),
params=p,
headers={}
)
def test_organizations(self):
"""
Show that one can iterate over all of the authenticated user's orgs.
"""
i = self.instance.organizations()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('user/orgs'),
params={'per_page': 100},
headers={}
)
def test_organizations_with(self):
"""Show that one can iterate over all of a user's orgs."""
i = self.instance.organizations_with('sigmavirus24')
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('users/sigmavirus24/orgs'),
params={'per_page': 100},
headers={}
)
def test_public_gists(self):
"""Show that all public gists can be iterated over."""
i = self.instance.public_gists()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('gists/public'),
params={'per_page': 100},
headers={}
)
def test_respositories(self):
"""
Show that an authenticated user can iterate over their repositories.
"""
i = self.instance.repositories()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('user/repos'),
params={'per_page': 100},
headers={}
)
def test_respositories_accepts_params(self):
"""Show that an #repositories accepts params."""
i = self.instance.repositories(type='all',
direction='desc',
sort='created')
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('user/repos'),
params={'per_page': 100, 'type': 'all', 'direction': 'desc',
'sort': 'created'},
headers={}
)
def test_issues_on(self):
"""Show that a user can iterate over a repository's issues."""
i = self.instance.issues_on('owner', 'repo')
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('repos/owner/repo/issues'),
params={'per_page': 100},
headers={}
)
def test_issues_on_with_params(self):
"""Show that #issues_on accepts multiple parameters."""
params = {'milestone': 1, 'state': 'all', 'assignee': 'owner',
'mentioned': 'someone', 'labels': 'bug,high'}
i = self.instance.issues_on('owner', 'repo', **params)
self.get_next(i)
params.update(per_page=100)
self.session.get.assert_called_once_with(
url_for('repos/owner/repo/issues'),
params=params,
headers={}
)
def test_starred(self):
"""
Show that one can iterate over an authenticated user's stars.
"""
i = self.instance.starred()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('user/starred'),
params={'per_page': 100},
headers={}
)
def test_starred_by(self):
"""Show that one can iterate over a user's stars."""
i = self.instance.starred_by('sigmavirus24')
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('users/sigmavirus24/starred'),
params={'per_page': 100},
headers={}
)
def test_subscriptions(self):
"""
Show that one can iterate over an authenticated user's subscriptions.
"""
i = self.instance.subscriptions()
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('user/subscriptions'),
params={'per_page': 100},
headers={}
)
def test_subscriptions_for(self):
"""Show that one can iterate over a user's subscriptions."""
i = self.instance.subscriptions_for('sigmavirus24')
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('users/sigmavirus24/subscriptions'),
params={'per_page': 100},
headers={}
)
def test_user_issues(self):
"""Test that one can iterate over a user's issues."""
i = self.instance.user_issues()
# Get the next item from the iterator
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('user/issues'),
params={'per_page': 100},
headers={}
)
def test_user_issues_with_parameters(self):
"""Test that one may pass parameters to GitHub#user_issues."""
# Set up the parameters to be sent
params = {'filter': 'assigned', 'state': 'closed', 'labels': 'bug',
'sort': 'created', 'direction': 'asc',
'since': '2012-05-20T23:10:27Z', 'per_page': 25}
# Make the call with the paramters
i = self.instance.user_issues(**params)
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('user/issues'),
params=params,
headers={}
)
def test_repositories_by(self):
"""Test that one can iterate over a user's repositories."""
i = self.instance.repositories_by('sigmavirus24')
# Get the next item from the iterator
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('users/sigmavirus24/repos'),
params={'per_page': 100},
headers={}
)
def test_repositories_by_with_type(self):
"""
Test that one can iterate over a user's repositories with a type.
"""
i = self.instance.repositories_by('sigmavirus24', 'all')
self.get_next(i)
self.session.get.assert_called_once_with(
url_for('users/sigmavirus24/repos'),
params={'per_page': 100, 'type': 'all'},
headers={}
)
class TestGitHubRequiresAuthentication(UnitHelper):
"""Test methods that require authentication."""
described_class = GitHub
example_data = None
def after_setup(self):
"""Disable authentication on the session."""
self.session.auth = None
self.session.has_auth.return_value = False
def test_authorization(self):
"""A user must be authenticated to retrieve an authorization."""
with pytest.raises(AuthenticationFailed):
self.instance.authorization(1)
def test_authorizations(self):
"""Show that one needs to authenticate to use #authorizations."""
with pytest.raises(AuthenticationFailed):
self.instance.authorizations()
def test_create_issue(self):
"""Show that GitHub#create_issue requires auth."""
with pytest.raises(AuthenticationFailed):
self.instance.create_issue('owner', 'repo', 'title')
def test_create_key(self):
"""Show that GitHub#create_key requires auth."""
with pytest.raises(AuthenticationFailed):
self.instance.create_key('title', 'key')
def test_create_repository(self):
"""Show that GitHub#create_repository requires auth."""
with pytest.raises(AuthenticationFailed):
self.instance.create_repository('repo')
def test_emails(self):
"""Show that one needs to authenticate to use #emails."""
with pytest.raises(AuthenticationFailed):
self.instance.emails()
def test_follow(self):
"""Show that one needs to authenticate to use #follow."""
with pytest.raises(AuthenticationFailed):
self.instance.follow('foo')
def test_gists(self):
"""Show that one needs to authenticate to use #gists."""
with pytest.raises(AuthenticationFailed):
self.instance.gists()
def test_is_following(self):
"""Show that GitHub#is_following requires authentication."""
with pytest.raises(AuthenticationFailed):
self.instance.is_following('foo')
def test_is_starred(self):
"""Show that GitHub#is_starred requires authentication."""
with pytest.raises(AuthenticationFailed):
self.instance.is_starred('foo', 'bar')
def test_issues(self):
"""Show that one needs to authenticate to use #issues."""
with pytest.raises(AuthenticationFailed):
self.instance.issues()
def test_keys(self):
"""Show that one needs to authenticate to use #keys."""
with pytest.raises(AuthenticationFailed):
self.instance.keys()
def test_me(self):
"""Show that GitHub#me requires authentication."""
with pytest.raises(AuthenticationFailed):
self.instance.me()
def test_notifications(self):
"""Show that one needs to authenticate to use #gists."""
with pytest.raises(AuthenticationFailed):
self.instance.notifications()
def test_organization_issues(self):
"""Show that one needs to authenticate to use #organization_issues."""
with pytest.raises(AuthenticationFailed):
self.instance.organization_issues('org')
def test_organizations(self):
"""Show that one needs to authenticate to use #organizations."""
with pytest.raises(AuthenticationFailed):
self.instance.organizations()
def test_repositories(self):
"""Show that one needs to authenticate to use #repositories."""
with pytest.raises(AuthenticationFailed):
self.instance.repositories()
def test_starred(self):
"""Show that one needs to authenticate to use #starred."""
with pytest.raises(AuthenticationFailed):
self.instance.starred()
def test_user_issues(self):
"""Show that GitHub#user_issues requires authentication."""
with pytest.raises(AuthenticationFailed):
self.instance.user_issues()
class TestGitHubAuthorizations(UnitHelper):
described_class = GitHub
example_data = None
def create_session_mock(self, *args):
session = super(TestGitHubAuthorizations,
self).create_session_mock(*args)
session.retrieve_client_credentials.return_value = ('id', 'secret')
return session
def test_revoke_authorization(self):
"""Test that GitHub#revoke_authorization calls the expected methods.
It should use the session's delete and temporary_basic_auth methods.
"""
self.instance.revoke_authorization('access_token')
self.session.delete.assert_called_once_with(
'https://api.github.com/applications/id/tokens/access_token',
params={'client_id': None, 'client_secret': None}
)
self.session.temporary_basic_auth.assert_called_once_with(
'id', 'secret'
)
def test_revoke_authorizations(self):
"""Test that GitHub#revoke_authorizations calls the expected methods.
It should use the session's delete and temporary_basic_auth methods.
"""
self.instance.revoke_authorizations()
self.session.delete.assert_called_once_with(
'https://api.github.com/applications/id/tokens',
params={'client_id': None, 'client_secret': None}
)
self.session.temporary_basic_auth.assert_called_once_with(
'id', 'secret'
)
| icio/github3.py | tests/unit/test_github.py | Python | bsd-3-clause | 31,906 |
from __future__ import unicode_literals
from django.db.models import signals
from django.dispatch import receiver
from django.test import TestCase
from django.utils import six
from .models import Person, Car
# #8285: signals can be any callable
class PostDeleteHandler(object):
def __init__(self, data):
self.data = data
def __call__(self, signal, sender, instance, **kwargs):
self.data.append(
(instance, instance.id is None)
)
class MyReceiver(object):
def __init__(self, param):
self.param = param
self._run = False
def __call__(self, signal, sender, **kwargs):
self._run = True
signal.disconnect(receiver=self, sender=sender)
class SignalTests(TestCase):
def test_basic(self):
# Save up the number of connected signals so that we can check at the
# end that all the signals we register get properly unregistered (#9989)
pre_signals = (
len(signals.pre_save.receivers),
len(signals.post_save.receivers),
len(signals.pre_delete.receivers),
len(signals.post_delete.receivers),
)
data = []
def pre_save_test(signal, sender, instance, **kwargs):
data.append(
(instance, kwargs.get("raw", False))
)
signals.pre_save.connect(pre_save_test)
def post_save_test(signal, sender, instance, **kwargs):
data.append(
(instance, kwargs.get("created"), kwargs.get("raw", False))
)
signals.post_save.connect(post_save_test)
def pre_delete_test(signal, sender, instance, **kwargs):
data.append(
(instance, instance.id is None)
)
signals.pre_delete.connect(pre_delete_test)
post_delete_test = PostDeleteHandler(data)
signals.post_delete.connect(post_delete_test)
# throw a decorator syntax receiver into the mix
@receiver(signals.pre_save)
def pre_save_decorator_test(signal, sender, instance, **kwargs):
data.append(instance)
@receiver(signals.pre_save, sender=Car)
def pre_save_decorator_sender_test(signal, sender, instance, **kwargs):
data.append(instance)
p1 = Person(first_name="John", last_name="Smith")
self.assertEqual(data, [])
p1.save()
self.assertEqual(data, [
(p1, False),
p1,
(p1, True, False),
])
data[:] = []
p1.first_name = "Tom"
p1.save()
self.assertEqual(data, [
(p1, False),
p1,
(p1, False, False),
])
data[:] = []
# Car signal (sender defined)
c1 = Car(make="Volkswagon", model="Passat")
c1.save()
self.assertEqual(data, [
(c1, False),
c1,
c1,
(c1, True, False),
])
data[:] = []
# Calling an internal method purely so that we can trigger a "raw" save.
p1.save_base(raw=True)
self.assertEqual(data, [
(p1, True),
p1,
(p1, False, True),
])
data[:] = []
p1.delete()
self.assertEqual(data, [
(p1, False),
(p1, False),
])
data[:] = []
p2 = Person(first_name="James", last_name="Jones")
p2.id = 99999
p2.save()
self.assertEqual(data, [
(p2, False),
p2,
(p2, True, False),
])
data[:] = []
p2.id = 99998
p2.save()
self.assertEqual(data, [
(p2, False),
p2,
(p2, True, False),
])
data[:] = []
p2.delete()
self.assertEqual(data, [
(p2, False),
(p2, False)
])
self.assertQuerysetEqual(
Person.objects.all(), [
"James Jones",
],
six.text_type
)
signals.post_delete.disconnect(post_delete_test)
signals.pre_delete.disconnect(pre_delete_test)
signals.post_save.disconnect(post_save_test)
signals.pre_save.disconnect(pre_save_test)
signals.pre_save.disconnect(pre_save_decorator_test)
signals.pre_save.disconnect(pre_save_decorator_sender_test, sender=Car)
# Check that all our signals got disconnected properly.
post_signals = (
len(signals.pre_save.receivers),
len(signals.post_save.receivers),
len(signals.pre_delete.receivers),
len(signals.post_delete.receivers),
)
self.assertEqual(pre_signals, post_signals)
def test_disconnect_in_dispatch(self):
"""
Test that signals that disconnect when being called don't mess future
dispatching.
"""
a, b = MyReceiver(1), MyReceiver(2)
signals.post_save.connect(sender=Person, receiver=a)
signals.post_save.connect(sender=Person, receiver=b)
p = Person.objects.create(first_name='John', last_name='Smith')
self.assertTrue(a._run)
self.assertTrue(b._run)
self.assertEqual(signals.post_save.receivers, [])
| denisenkom/django | tests/signals/tests.py | Python | bsd-3-clause | 5,273 |
# Authors:
# Trevor Perrin
# Martin von Loewis - python 3 port
# Yngve Pettersen (ported by Paul Sokolovsky) - TLS 1.2
#
# See the LICENSE file for legal information regarding use of this file.
"""cryptomath module
This module has basic math/crypto code."""
from __future__ import print_function
import os
import math
import base64
import binascii
from .compat import *
# **************************************************************************
# Load Optional Modules
# **************************************************************************
# Try to load M2Crypto/OpenSSL
try:
from M2Crypto import m2
m2cryptoLoaded = True
except ImportError:
m2cryptoLoaded = False
#Try to load GMPY
try:
import gmpy
gmpyLoaded = True
except ImportError:
gmpyLoaded = False
#Try to load pycrypto
try:
import Crypto.Cipher.AES
pycryptoLoaded = True
except ImportError:
pycryptoLoaded = False
# **************************************************************************
# PRNG Functions
# **************************************************************************
# Check that os.urandom works
import zlib
length = len(zlib.compress(os.urandom(1000)))
assert(length > 900)
def getRandomBytes(howMany):
b = bytearray(os.urandom(howMany))
assert(len(b) == howMany)
return b
prngName = "os.urandom"
# **************************************************************************
# Simple hash functions
# **************************************************************************
import hmac
import hashlib
def MD5(b):
return bytearray(hashlib.md5(compat26Str(b)).digest())
def SHA1(b):
return bytearray(hashlib.sha1(compat26Str(b)).digest())
def SHA256(b):
return bytearray(hashlib.sha256(compat26Str(b)).digest())
def HMAC_MD5(k, b):
k = compatHMAC(k)
b = compatHMAC(b)
return bytearray(hmac.new(k, b, hashlib.md5).digest())
def HMAC_SHA1(k, b):
k = compatHMAC(k)
b = compatHMAC(b)
return bytearray(hmac.new(k, b, hashlib.sha1).digest())
def HMAC_SHA256(k, b):
k = compatHMAC(k)
b = compatHMAC(b)
return bytearray(hmac.new(k, b, hashlib.sha256).digest())
# **************************************************************************
# Converter Functions
# **************************************************************************
def bytesToNumber(b):
total = 0
multiplier = 1
for count in range(len(b)-1, -1, -1):
byte = b[count]
total += multiplier * byte
multiplier *= 256
return total
def numberToByteArray(n, howManyBytes=None):
"""Convert an integer into a bytearray, zero-pad to howManyBytes.
The returned bytearray may be smaller than howManyBytes, but will
not be larger. The returned bytearray will contain a big-endian
encoding of the input integer (n).
"""
if howManyBytes == None:
howManyBytes = numBytes(n)
b = bytearray(howManyBytes)
for count in range(howManyBytes-1, -1, -1):
b[count] = int(n % 256)
n >>= 8
return b
def mpiToNumber(mpi): #mpi is an openssl-format bignum string
if (ord(mpi[4]) & 0x80) !=0: #Make sure this is a positive number
raise AssertionError()
b = bytearray(mpi[4:])
return bytesToNumber(b)
def numberToMPI(n):
b = numberToByteArray(n)
ext = 0
#If the high-order bit is going to be set,
#add an extra byte of zeros
if (numBits(n) & 0x7)==0:
ext = 1
length = numBytes(n) + ext
b = bytearray(4+ext) + b
b[0] = (length >> 24) & 0xFF
b[1] = (length >> 16) & 0xFF
b[2] = (length >> 8) & 0xFF
b[3] = length & 0xFF
return bytes(b)
# **************************************************************************
# Misc. Utility Functions
# **************************************************************************
def numBits(n):
if n==0:
return 0
s = "%x" % n
return ((len(s)-1)*4) + \
{'0':0, '1':1, '2':2, '3':2,
'4':3, '5':3, '6':3, '7':3,
'8':4, '9':4, 'a':4, 'b':4,
'c':4, 'd':4, 'e':4, 'f':4,
}[s[0]]
return int(math.floor(math.log(n, 2))+1)
def numBytes(n):
if n==0:
return 0
bits = numBits(n)
return int(math.ceil(bits / 8.0))
# **************************************************************************
# Big Number Math
# **************************************************************************
def getRandomNumber(low, high):
if low >= high:
raise AssertionError()
howManyBits = numBits(high)
howManyBytes = numBytes(high)
lastBits = howManyBits % 8
while 1:
bytes = getRandomBytes(howManyBytes)
if lastBits:
bytes[0] = bytes[0] % (1 << lastBits)
n = bytesToNumber(bytes)
if n >= low and n < high:
return n
def gcd(a,b):
a, b = max(a,b), min(a,b)
while b:
a, b = b, a % b
return a
def lcm(a, b):
return (a * b) // gcd(a, b)
#Returns inverse of a mod b, zero if none
#Uses Extended Euclidean Algorithm
def invMod(a, b):
c, d = a, b
uc, ud = 1, 0
while c != 0:
q = d // c
c, d = d-(q*c), c
uc, ud = ud - (q * uc), uc
if d == 1:
return ud % b
return 0
if gmpyLoaded:
def powMod(base, power, modulus):
base = gmpy.mpz(base)
power = gmpy.mpz(power)
modulus = gmpy.mpz(modulus)
result = pow(base, power, modulus)
return long(result)
else:
def powMod(base, power, modulus):
if power < 0:
result = pow(base, power*-1, modulus)
result = invMod(result, modulus)
return result
else:
return pow(base, power, modulus)
#Pre-calculate a sieve of the ~100 primes < 1000:
def makeSieve(n):
sieve = list(range(n))
for count in range(2, int(math.sqrt(n))+1):
if sieve[count] == 0:
continue
x = sieve[count] * 2
while x < len(sieve):
sieve[x] = 0
x += sieve[count]
sieve = [x for x in sieve[2:] if x]
return sieve
sieve = makeSieve(1000)
def isPrime(n, iterations=5, display=False):
#Trial division with sieve
for x in sieve:
if x >= n: return True
if n % x == 0: return False
#Passed trial division, proceed to Rabin-Miller
#Rabin-Miller implemented per Ferguson & Schneier
#Compute s, t for Rabin-Miller
if display: print("*", end=' ')
s, t = n-1, 0
while s % 2 == 0:
s, t = s//2, t+1
#Repeat Rabin-Miller x times
a = 2 #Use 2 as a base for first iteration speedup, per HAC
for count in range(iterations):
v = powMod(a, s, n)
if v==1:
continue
i = 0
while v != n-1:
if i == t-1:
return False
else:
v, i = powMod(v, 2, n), i+1
a = getRandomNumber(2, n)
return True
def getRandomPrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = ((2 ** (bits-1)) * 3) // 2
high = 2 ** bits - 30
p = getRandomNumber(low, high)
p += 29 - (p % 30)
while 1:
if display: print(".", end=' ')
p += 30
if p >= high:
p = getRandomNumber(low, high)
p += 29 - (p % 30)
if isPrime(p, display=display):
return p
#Unused at the moment...
def getRandomSafePrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = (2 ** (bits-2)) * 3//2
high = (2 ** (bits-1)) - 30
q = getRandomNumber(low, high)
q += 29 - (q % 30)
while 1:
if display: print(".", end=' ')
q += 30
if (q >= high):
q = getRandomNumber(low, high)
q += 29 - (q % 30)
#Ideas from Tom Wu's SRP code
#Do trial division on p and q before Rabin-Miller
if isPrime(q, 0, display=display):
p = (2 * q) + 1
if isPrime(p, display=display):
if isPrime(q, display=display):
return p
| scheib/chromium | third_party/tlslite/tlslite/utils/cryptomath.py | Python | bsd-3-clause | 8,434 |
#!/usr/bin/python
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import mox
import os
import sys
import shutil
import time
import constants
sys.path.insert(0, constants.SOURCE_ROOT)
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import git
from chromite.buildbot import remote_try
from chromite.buildbot import repository
from chromite.scripts import cbuildbot
class RemoteTryJobMock(remote_try.RemoteTryJob):
pass
# pylint: disable=W0212,R0904,E1101
class RemoteTryTests(cros_test_lib.MoxTempDirTestCase):
PATCHES = ('5555', '6666')
BOTS = ('x86-generic-paladin', 'arm-generic-paladin')
def setUp(self):
self.parser = cbuildbot._CreateParser()
args = ['-r', '/tmp/test_build1', '-g', '5555', '-g',
'6666', '--remote']
args.extend(self.BOTS)
self.options, args = cbuildbot._ParseCommandLine(self.parser, args)
self.checkout_dir = os.path.join(self.tempdir, 'test_checkout')
self.int_mirror, self.ext_mirror = None, None
def _RunCommandSingleOutput(self, cmd, cwd):
result = cros_build_lib.RunCommandCaptureOutput(cmd, cwd=cwd)
out_lines = result.output.split()
self.assertEqual(len(out_lines), 1)
return out_lines[0]
def _GetNewestFile(self, dirname, basehash):
newhash = git.GetGitRepoRevision(dirname)
self.assertNotEqual(basehash, newhash)
cmd = ['git', 'log', '--format=%H', '%s..' % basehash]
# Make sure we have a single commit.
self._RunCommandSingleOutput(cmd, cwd=dirname)
cmd = ['git', 'diff', '--name-only', 'HEAD^']
# Make sure only one file per commit.
return self._RunCommandSingleOutput(cmd, cwd=dirname)
def _SubmitJob(self, checkout_dir, job, version=None):
"""Returns the path to the tryjob description."""
self.assertTrue(isinstance(job, RemoteTryJobMock))
basehash = git.GetGitRepoRevision(job.ssh_url)
if version is not None:
self._SetMirrorVersion(version)
job.Submit(workdir=checkout_dir, dryrun=True)
# Get the file that was just created.
created_file = self._GetNewestFile(checkout_dir, basehash)
return os.path.join(checkout_dir, created_file)
def _SetupMirrors(self):
mirror = os.path.join(self.tempdir, 'tryjobs_mirror')
os.mkdir(mirror)
url = '%s/%s' % (constants.GIT_HTTP_URL, 'chromiumos/tryjobs')
repository.CloneGitRepo(mirror, url,
bare=True)
self.ext_mirror = mirror
mirror = os.path.join(self.tempdir, 'tryjobs_int_mirror')
os.mkdir(mirror)
repository.CloneGitRepo(mirror, self.ext_mirror, reference=self.ext_mirror,
bare=True)
self.int_mirror = mirror
RemoteTryJobMock.EXT_SSH_URL = self.ext_mirror
RemoteTryJobMock.INT_SSH_URL = self.int_mirror
self._SetMirrorVersion(remote_try.RemoteTryJob.TRYJOB_FORMAT_VERSION, True)
def _SetMirrorVersion(self, version, only_if_missing=False):
for path in (self.ext_mirror, self.int_mirror):
vpath = os.path.join(path, remote_try.RemoteTryJob.TRYJOB_FORMAT_FILE)
if os.path.exists(vpath) and only_if_missing:
continue
# Get ourselves a working dir.
tmp_repo = os.path.join(self.tempdir, 'tmp-repo')
git.RunGit(self.tempdir, ['clone', path, tmp_repo])
vpath = os.path.join(tmp_repo, remote_try.RemoteTryJob.TRYJOB_FORMAT_FILE)
with open(vpath, 'w') as f:
f.write(str(version))
git.RunGit(tmp_repo, ['add', vpath])
git.RunGit(tmp_repo, ['commit', '-m', 'setting version to %s' % version])
git.RunGit(tmp_repo, ['push', path, 'master:master'])
shutil.rmtree(tmp_repo)
def _CreateJob(self, mirror=True):
job_class = remote_try.RemoteTryJob
if mirror:
job_class = RemoteTryJobMock
self._SetupMirrors()
job = job_class(self.options, self.BOTS, [])
return job
def testJobTimestamp(self):
"""Verify jobs have unique names."""
def submit_helper(dirname):
work_dir = os.path.join(self.tempdir, dirname)
return os.path.basename(self._SubmitJob(work_dir, job))
self.mox.StubOutWithMock(repository, 'IsARepoRoot')
repository.IsARepoRoot(mox.IgnoreArg()).AndReturn(False)
self.mox.ReplayAll()
job = self._CreateJob()
file1 = submit_helper('test1')
# Tryjob file names are based on timestamp, so delay one second to avoid two
# jobfiles having the same name.
time.sleep(1)
file2 = submit_helper('test2')
self.assertNotEqual(file1, file2)
def testSimpleTryJob(self, version=None):
"""Test that a tryjob spec file is created and pushed properly."""
self.mox.StubOutWithMock(repository, 'IsARepoRoot')
repository.IsARepoRoot(mox.IgnoreArg()).AndReturn(True)
self.mox.StubOutWithMock(repository, 'IsInternalRepoCheckout')
repository.IsInternalRepoCheckout(mox.IgnoreArg()).AndReturn(False)
self.mox.ReplayAll()
try:
os.environ["GIT_AUTHOR_EMAIL"] = "Elmer Fudd <efudd@google.com>"
os.environ["GIT_COMMITTER_EMAIL"] = "Elmer Fudd <efudd@google.com>"
job = self._CreateJob()
finally:
os.environ.pop("GIT_AUTHOR_EMAIL", None)
os.environ.pop("GIT_COMMITTER_EMAIL", None)
created_file = self._SubmitJob(self.checkout_dir, job, version=version)
with open(created_file, 'rb') as job_desc_file:
values = json.load(job_desc_file)
self.assertTrue('efudd@google.com' in values['email'][0])
for patch in self.PATCHES:
self.assertTrue(patch in values['extra_args'],
msg="expected patch %s in args %s" %
(patch, values['extra_args']))
self.assertTrue(set(self.BOTS).issubset(values['bot']))
remote_url = cros_build_lib.RunCommand(
['git', 'config', 'remote.origin.url'], redirect_stdout=True,
cwd=self.checkout_dir).output.strip()
self.assertEqual(remote_url, self.ext_mirror)
def testClientVersionAwareness(self):
self.assertRaises(
remote_try.ChromiteUpgradeNeeded,
self.testSimpleTryJob,
version=remote_try.RemoteTryJob.TRYJOB_FORMAT_VERSION + 1)
def testInternalTryJob(self):
"""Verify internal tryjobs are pushed properly."""
self.mox.StubOutWithMock(repository, 'IsARepoRoot')
repository.IsARepoRoot(mox.IgnoreArg()).AndReturn(True)
self.mox.StubOutWithMock(repository, 'IsInternalRepoCheckout')
repository.IsInternalRepoCheckout(mox.IgnoreArg()).AndReturn(True)
self.mox.ReplayAll()
job = self._CreateJob()
self._SubmitJob(self.checkout_dir, job)
remote_url = cros_build_lib.RunCommand(
['git', 'config', 'remote.origin.url'], redirect_stdout=True,
cwd=self.checkout_dir).output.strip()
self.assertEqual(remote_url, self.int_mirror)
def testBareTryJob(self):
"""Verify submitting a tryjob from just a chromite checkout works."""
self.mox.StubOutWithMock(repository, 'IsARepoRoot')
repository.IsARepoRoot(mox.IgnoreArg()).AndReturn(False)
self.mox.StubOutWithMock(repository, 'IsInternalRepoCheckout')
self.mox.ReplayAll()
job = self._CreateJob(mirror=False)
self.assertEqual(job.ssh_url, remote_try.RemoteTryJob.EXT_SSH_URL)
if __name__ == '__main__':
cros_test_lib.main()
| espadrine/opera | chromium/src/third_party/chromite/buildbot/remote_try_unittest.py | Python | bsd-3-clause | 7,349 |
"""
Custom manager for HelpEntry objects.
"""
from django.db import models
from src.utils import logger, utils
__all__ = ("HelpEntryManager",)
class HelpEntryManager(models.Manager):
"""
This HelpEntryManager implements methods for searching
and manipulating HelpEntries directly from the database.
These methods will all return database objects
(or QuerySets) directly.
Evennia-specific:
find_topicmatch
find_apropos
find_topicsuggestions
find_topics_with_category
all_to_category
search_help (equivalent to ev.search_helpentry)
"""
def find_topicmatch(self, topicstr, exact=False):
"""
Searches for matching topics based on player's input.
"""
dbref = utils.dbref(topicstr)
if dbref:
return self.filter(id=dbref)
topics = self.filter(db_key__iexact=topicstr)
if not topics and not exact:
topics = self.filter(db_key__istartswith=topicstr)
if not topics:
topics = self.filter(db_key__icontains=topicstr)
return topics
def find_apropos(self, topicstr):
"""
Do a very loose search, returning all help entries containing
the search criterion in their titles.
"""
return self.filter(db_key__icontains=topicstr)
def find_topicsuggestions(self, topicstr):
"""
Do a fuzzy match, preferably within the category of the
current topic.
"""
return self.filter(db_key__icontains=topicstr).exclude(db_key__iexact=topicstr)
def find_topics_with_category(self, help_category):
"""
Search topics having a particular category
"""
return self.filter(db_help_category__iexact=help_category)
def get_all_topics(self):
"""
Return all topics.
"""
return self.all()
def get_all_categories(self, pobject):
"""
Return all defined category names with at least one
topic in them.
"""
return list(set(topic.help_category for topic in self.all()))
def all_to_category(self, default_category):
"""
Shifts all help entries in database to default_category.
This action cannot be reverted. It is used primarily by
the engine when importing a default help database, making
sure this ends up in one easily separated category.
"""
topics = self.all()
for topic in topics:
topic.help_category = default_category
topic.save()
string = "Help database moved to category %s" % default_category
logger.log_infomsg(string)
def search_help(self, ostring, help_category=None):
"""
Retrieve a search entry object.
ostring - the help topic to look for
category - limit the search to a particular help topic
"""
ostring = ostring.strip().lower()
if help_category:
return self.filter(db_key__iexact=ostring,
db_help_category__iexact=help_category)
else:
return self.filter(db_key__iexact=ostring)
| google-code-export/evennia | src/help/manager.py | Python | bsd-3-clause | 3,168 |
"""Pgbouncer check
Collects metrics from the pgbouncer database.
"""
# 3p
import psycopg2 as pg
# project
from checks import AgentCheck, CheckException
class ShouldRestartException(Exception):
pass
class PgBouncer(AgentCheck):
"""Collects metrics from pgbouncer
"""
RATE = AgentCheck.rate
GAUGE = AgentCheck.gauge
DB_NAME = 'pgbouncer'
SERVICE_CHECK_NAME = 'pgbouncer.can_connect'
STATS_METRICS = {
'descriptors': [
('database', 'db'),
],
'metrics': [
('total_requests', ('pgbouncer.stats.requests_per_second', RATE)),
('total_received', ('pgbouncer.stats.bytes_received_per_second', RATE)),
('total_sent', ('pgbouncer.stats.bytes_sent_per_second', RATE)),
('total_query_time', ('pgbouncer.stats.total_query_time', GAUGE)),
('avg_req', ('pgbouncer.stats.avg_req', GAUGE)),
('avg_recv', ('pgbouncer.stats.avg_recv', GAUGE)),
('avg_sent', ('pgbouncer.stats.avg_sent', GAUGE)),
('avg_query', ('pgbouncer.stats.avg_query', GAUGE)),
],
'query': """SHOW STATS""",
}
POOLS_METRICS = {
'descriptors': [
('database', 'db'),
('user', 'user'),
],
'metrics': [
('cl_active', ('pgbouncer.pools.cl_active', GAUGE)),
('cl_waiting', ('pgbouncer.pools.cl_waiting', GAUGE)),
('sv_active', ('pgbouncer.pools.sv_active', GAUGE)),
('sv_idle', ('pgbouncer.pools.sv_idle', GAUGE)),
('sv_used', ('pgbouncer.pools.sv_used', GAUGE)),
('sv_tested', ('pgbouncer.pools.sv_tested', GAUGE)),
('sv_login', ('pgbouncer.pools.sv_login', GAUGE)),
('maxwait', ('pgbouncer.pools.maxwait', GAUGE)),
],
'query': """SHOW POOLS""",
}
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self.dbs = {}
def _get_service_checks_tags(self, host, port):
service_checks_tags = [
"host:%s" % host,
"port:%s" % port,
"db:%s" % self.DB_NAME
]
return service_checks_tags
def _collect_stats(self, db, instance_tags):
"""Query pgbouncer for various metrics
"""
metric_scope = [self.STATS_METRICS, self.POOLS_METRICS]
try:
cursor = db.cursor()
results = None
for scope in metric_scope:
metrics = scope['metrics']
cols = [m[0] for m in metrics]
try:
query = scope['query']
self.log.debug("Running query: %s" % query)
cursor.execute(query)
results = cursor.fetchall()
except pg.Error, e:
self.log.warning("Not all metrics may be available: %s" % str(e))
continue
for row in results:
if row[0] == self.DB_NAME:
continue
desc = scope['descriptors']
if len(row) == len(cols) + len(desc) + 1:
# Some versions of pgbouncer have an extra field at the end of show pools
row = row[:-1]
assert len(row) == len(cols) + len(desc)
tags = list(instance_tags)
tags += ["%s:%s" % (d[0][1], d[1]) for d in zip(desc, row[:len(desc)])]
for i, (key_name, (mname, mtype)) in enumerate(metrics):
value = row[i + len(desc)]
mtype(self, mname, value, tags)
if not results:
self.warning('No results were found for query: "%s"' % query)
cursor.close()
except pg.Error, e:
self.log.error("Connection error: %s" % str(e))
raise ShouldRestartException
def _get_connection(self, key, host, port, user, password, use_cached=True):
"Get and memoize connections to instances"
if key in self.dbs and use_cached:
return self.dbs[key]
elif host != "" and user != "":
try:
if host == 'localhost' and password == '':
# Use ident method
connection = pg.connect("user=%s dbname=%s" % (user, self.DB_NAME))
elif port != '':
connection = pg.connect(host=host, port=port, user=user,
password=password, database=self.DB_NAME)
else:
connection = pg.connect(host=host, user=user, password=password,
database=self.DB_NAME)
connection.set_isolation_level(pg.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
self.log.debug('pgbouncer status: %s' % AgentCheck.OK)
except Exception:
message = u'Cannot establish connection to pgbouncer://%s:%s/%s' % (host, port, self.DB_NAME)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
tags=self._get_service_checks_tags(host, port),
message=message)
self.log.debug('pgbouncer status: %s' % AgentCheck.CRITICAL)
raise
else:
if not host:
raise CheckException("Please specify a PgBouncer host to connect to.")
elif not user:
raise CheckException("Please specify a user to connect to PgBouncer as.")
self.dbs[key] = connection
return connection
def check(self, instance):
host = instance.get('host', '')
port = instance.get('port', '')
user = instance.get('username', '')
password = instance.get('password', '')
tags = instance.get('tags', [])
key = '%s:%s' % (host, port)
if tags is None:
tags = []
else:
tags = list(set(tags))
try:
db = self._get_connection(key, host, port, user, password)
self._collect_stats(db, tags)
except ShouldRestartException:
self.log.info("Resetting the connection")
db = self._get_connection(key, host, port, user, password, use_cached=False)
self._collect_stats(db, tags)
message = u'Established connection to pgbouncer://%s:%s/%s' % (host, port, self.DB_NAME)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,
tags=self._get_service_checks_tags(host, port),
message=message)
self.log.debug('pgbouncer status: %s' % AgentCheck.OK)
| mderomph-coolblue/dd-agent | checks.d/pgbouncer.py | Python | bsd-3-clause | 7,003 |
#!/usr/bin/env python
# Copyright 2020 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Helper script to execute a single-processed fuzzing session.
Creates fuzz tests in workdir/output/dir-<dir number>/fuzz-XXX.js.
Expects the <dir number> as single parameter.
"""
import os
import subprocess
import sys
import time
BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
APP_DIR = os.path.join(BASE_PATH, 'workdir', 'app_dir')
FUZZ_EXE = os.path.join(BASE_PATH, 'workdir', 'fuzzer', 'ochang_js_fuzzer')
INPUT_DIR = os.path.join(BASE_PATH, 'workdir', 'input')
TEST_CASES = os.path.join(BASE_PATH, 'workdir', 'output')
COUNT = 64
FUZZ = ('FUZZ_MODE=foozzie APP_NAME=d8 APP_DIR=%s %s -o %%s -n %s -i %s > %%s'
% (APP_DIR, FUZZ_EXE, COUNT, INPUT_DIR))
assert(len(sys.argv) > 1)
dir_number = int(sys.argv[1])
assert(dir_number >= 0)
path = os.path.join(TEST_CASES, 'dir-%d' % dir_number)
assert not os.path.exists(path), 'Need fresh workdir for fuzzing'
os.makedirs(path)
start = time.time()
subprocess.check_call(
FUZZ % (path, os.path.join(path, 'out.log')), shell=True)
duration = int(time.time() - start)
with open(os.path.join(path, 'duration.log'), 'w') as f:
f.write(str(duration))
| youtube/cobalt | third_party/v8/tools/clusterfuzz/js_fuzzer/tools/fuzz_one.py | Python | bsd-3-clause | 1,328 |
#!/usr/bin/env python
#
# Copyright 2011, Toru Maesaka
#
# Redistribution and use of this source code is licensed under
# the BSD license. See COPYING file for license description.
import os
import sys
sys.path.insert(0, os.path.join(os.path.split(__file__)[0], '..'))
| sapo/python-kyototycoon | tests/config.py | Python | bsd-3-clause | 271 |
"""
Accessors for related objects.
When a field defines a relation between two models, each model class provides
an attribute to access related instances of the other model class (unless the
reverse accessor has been disabled with related_name='+').
Accessors are implemented as descriptors in order to customize access and
assignment. This module defines the descriptor classes.
Forward accessors follow foreign keys. Reverse accessors trace them back. For
example, with the following models::
class Parent(Model):
pass
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``child.parent`` is a forward many-to-one relation. ``parent.children`` is a
reverse many-to-one relation.
There are three types of relations (many-to-one, one-to-one, and many-to-many)
and two directions (forward and reverse) for a total of six combinations.
1. Related instance on the forward side of a many-to-one or one-to-one
relation: ``ForwardManyToOneDescriptor``.
Uniqueness of foreign key values is irrelevant to accessing the related
instance, making the many-to-one and one-to-one cases identical as far as
the descriptor is concerned. The constraint is checked upstream (unicity
validation in forms) or downstream (unique indexes in the database).
If you're looking for ``ForwardOneToOneDescriptor``, use
``ForwardManyToOneDescriptor`` instead.
2. Related instance on the reverse side of a one-to-one relation:
``ReverseOneToOneDescriptor``.
One-to-one relations are asymmetrical, despite the apparent symmetry of the
name, because they're implemented in the database with a foreign key from
one table to another. As a consequence ``ReverseOneToOneDescriptor`` is
slightly different from ``ForwardManyToOneDescriptor``.
3. Related objects manager for related instances on the reverse side of a
many-to-one relation: ``ReverseManyToOneDescriptor``.
Unlike the previous two classes, this one provides access to a collection
of objects. It returns a manager rather than an instance.
4. Related objects manager for related instances on the forward or reverse
sides of a many-to-many relation: ``ManyToManyDescriptor``.
Many-to-many relations are symmetrical. The syntax of Django models
requires declaring them on one side but that's an implementation detail.
They could be declared on the other side without any change in behavior.
Therefore the forward and reverse descriptors can be the same.
If you're looking for ``ForwardManyToManyDescriptor`` or
``ReverseManyToManyDescriptor``, use ``ManyToManyDescriptor`` instead.
"""
from __future__ import unicode_literals
import warnings
from operator import attrgetter
from django.db import connections, router, transaction
from django.db.models import Q, signals
from django.db.models.query import QuerySet
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.functional import cached_property
class ForwardManyToOneDescriptor(object):
"""
Accessor to the related object on the forward side of a many-to-one or
one-to-one relation.
In the example::
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``child.parent`` is a ``ForwardManyToOneDescriptor`` instance.
"""
def __init__(self, field_with_rel):
self.field = field_with_rel
self.cache_name = self.field.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception can't be created at initialization time since the
# related model might not be resolved yet; `rel.model` might still be
# a string model reference.
return type(
str('RelatedObjectDoesNotExist'),
(self.field.remote_field.model.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
manager = self.field.remote_field.model._default_manager
# If the related manager indicates that it should be used for
# related fields, respect that.
if not getattr(manager, 'use_for_related_fields', False):
manager = self.field.remote_field.model._base_manager
return manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = self.field.get_foreign_related_value
instance_attr = self.field.get_local_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
related_field = self.field.foreign_related_fields[0]
# FIXME: This will need to be revisited when we introduce support for
# composite fields. In the meantime we take this practical approach to
# solve a regression on 1.6 when the reverse manager in hidden
# (related_name ends with a '+'). Refs #21410.
# The check for len(...) == 1 is a special case that allows the query
# to be join-less and smaller. Refs #21760.
if self.field.remote_field.is_hidden() or len(self.field.foreign_related_fields) == 1:
query = {'%s__in' % related_field.name: set(instance_attr(inst)[0] for inst in instances)}
else:
query = {'%s__in' % self.field.related_query_name(): instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
if not self.field.remote_field.multiple:
rel_obj_cache_name = self.field.remote_field.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return queryset, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, cls=None):
"""
Get the related instance through the forward relation.
With the example above, when getting ``child.parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``cls`` is the ``Child`` class (we don't need it)
"""
if instance is None:
return self
# The related instance is loaded from the database and then cached in
# the attribute defined in self.cache_name. It can also be pre-cached
# by the reverse accessor (ReverseOneToOneDescriptor).
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
val = self.field.get_local_related_value(instance)
if None in val:
rel_obj = None
else:
qs = self.get_queryset(instance=instance)
qs = qs.filter(self.field.get_reverse_related_filter(instance))
# Assuming the database enforces foreign keys, this won't fail.
rel_obj = qs.get()
# If this is a one-to-one relation, set the reverse accessor
# cache on the related object to the current instance to avoid
# an extra SQL query if it's accessed later on.
if not self.field.remote_field.multiple:
setattr(rel_obj, self.field.remote_field.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None and not self.field.null:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (self.field.model.__name__, self.field.name)
)
else:
return rel_obj
def __set__(self, instance, value):
"""
Set the related instance through the forward relation.
With the example above, when setting ``child.parent = parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``value`` in the ``parent`` instance on the right of the equal sign
"""
# An object must be an instance of the related class.
if value is not None and not isinstance(value, self.field.remote_field.model._meta.concrete_model):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.field.name,
self.field.remote_field.model._meta.object_name,
)
)
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
# If we're setting the value of a OneToOneField to None, we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still be available
# since we've not yet cleared out the related field.
# Use the cache directly, instead of the accessor; if we haven't
# populated the cache, then we don't care - we're only accessing
# the object to invalidate the accessor cache, so there's no
# need to populate the cache just to expire it again.
related = getattr(instance, self.cache_name, None)
# If we've got an old related object, we need to clear out its
# cache. This cache also might not exist if the related object
# hasn't been accessed yet.
if related is not None:
setattr(related, self.field.remote_field.get_cache_name(), None)
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, None)
# Set the values of the related field.
else:
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, getattr(value, rh_field.attname))
# Set the related instance cache used by __get__ to avoid a SQL query
# when accessing the attribute we just set.
setattr(instance, self.cache_name, value)
# If this is a one-to-one relation, set the reverse accessor cache on
# the related object to the current instance to avoid an extra SQL
# query if it's accessed later on.
if value is not None and not self.field.remote_field.multiple:
setattr(value, self.field.remote_field.get_cache_name(), instance)
class ReverseOneToOneDescriptor(object):
"""
Accessor to the related object on the reverse side of a one-to-one
relation.
In the example::
class Restaurant(Model):
place = OneToOneField(Place, related_name='restaurant')
``place.restaurant`` is a ``ReverseOneToOneDescriptor`` instance.
"""
def __init__(self, related):
self.related = related
self.cache_name = related.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception isn't created at initialization time for the sake of
# consistency with `ForwardManyToOneDescriptor`.
return type(
str('RelatedObjectDoesNotExist'),
(self.related.related_model.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
manager = self.related.related_model._default_manager
# If the related manager indicates that it should be used for
# related fields, respect that.
if not getattr(manager, 'use_for_related_fields', False):
manager = self.related.related_model._base_manager
return manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = attrgetter(self.related.field.attname)
def instance_attr(obj):
return obj._get_pk_val()
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {'%s__in' % self.related.field.name: instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
rel_obj_cache_name = self.related.field.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return queryset, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, cls=None):
"""
Get the related instance through the reverse relation.
With the example above, when getting ``place.restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``instance_type`` in the ``Place`` class (we don't need it)
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.
"""
if instance is None:
return self
# The related instance is loaded from the database and then cached in
# the attribute defined in self.cache_name. It can also be pre-cached
# by the forward accessor (ForwardManyToOneDescriptor).
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
related_pk = instance._get_pk_val()
if related_pk is None:
rel_obj = None
else:
filter_args = self.related.field.get_forward_related_filter(instance)
try:
rel_obj = self.get_queryset(instance=instance).get(**filter_args)
except self.related.related_model.DoesNotExist:
rel_obj = None
else:
# Set the forward accessor cache on the related object to
# the current instance to avoid an extra SQL query if it's
# accessed later on.
setattr(rel_obj, self.related.field.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (
instance.__class__.__name__,
self.related.get_accessor_name()
)
)
else:
return rel_obj
def __set__(self, instance, value):
"""
Set the related instance through the reverse relation.
With the example above, when setting ``place.restaurant = restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``value`` in the ``restaurant`` instance on the right of the equal sign
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.
"""
# The similarity of the code below to the code in
# ForwardManyToOneDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
if value is None:
# Update the cached related instance (if any) & clear the cache.
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
pass
else:
delattr(instance, self.cache_name)
setattr(rel_obj, self.related.field.name, None)
elif not isinstance(value, self.related.related_model):
# An object must be an instance of the related class.
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.related.get_accessor_name(),
self.related.related_model._meta.object_name,
)
)
else:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
related_pk = tuple(getattr(instance, field.attname) for field in self.related.field.foreign_related_fields)
# Set the value of the related field to the value of the related object's related field
for index, field in enumerate(self.related.field.local_related_fields):
setattr(value, field.attname, related_pk[index])
# Set the related instance cache used by __get__ to avoid a SQL query
# when accessing the attribute we just set.
setattr(instance, self.cache_name, value)
# Set the forward accessor cache on the related object to the current
# instance to avoid an extra SQL query if it's accessed later on.
setattr(value, self.related.field.get_cache_name(), instance)
class ReverseManyToOneDescriptor(object):
"""
Accessor to the related objects manager on the reverse side of a
many-to-one relation.
In the example::
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``parent.children`` is a ``ReverseManyToOneDescriptor`` instance.
Most of the implementation is delegated to a dynamically defined manager
class built by ``create_forward_many_to_many_manager()`` defined below.
"""
def __init__(self, rel):
self.rel = rel
self.field = rel.field
@cached_property
def related_manager_cls(self):
return create_reverse_many_to_one_manager(
self.rel.related_model._default_manager.__class__,
self.rel,
)
def __get__(self, instance, cls=None):
"""
Get the related objects through the reverse relation.
With the example above, when getting ``parent.children``:
- ``self`` is the descriptor managing the ``children`` attribute
- ``instance`` is the ``parent`` instance
- ``instance_type`` in the ``Parent`` class (we don't need it)
"""
if instance is None:
return self
return self.related_manager_cls(instance)
def __set__(self, instance, value):
"""
Set the related objects through the reverse relation.
With the example above, when setting ``parent.children = children``:
- ``self`` is the descriptor managing the ``children`` attribute
- ``instance`` is the ``parent`` instance
- ``value`` in the ``children`` sequence on the right of the equal sign
"""
warnings.warn(
'Direct assignment to the reverse side of a related set is '
'deprecated due to the implicit save() that happens. Use %s.set() '
'instead.' % self.rel.get_accessor_name(), RemovedInDjango20Warning, stacklevel=2,
)
manager = self.__get__(instance)
manager.set(value)
def create_reverse_many_to_one_manager(superclass, rel):
"""
Create a manager for the reverse side of a many-to-one relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-one relations.
"""
class RelatedManager(superclass):
def __init__(self, instance):
super(RelatedManager, self).__init__()
self.instance = instance
self.model = rel.related_model
self.field = rel.field
self.core_filters = {self.field.name: instance}
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_reverse_many_to_one_manager(manager.__class__, rel)
return manager_class(self.instance)
do_not_call_in_templates = True
def _apply_rel_filters(self, queryset):
"""
Filter the queryset for the instance this manager is bound to.
"""
db = self._db or router.db_for_read(self.model, instance=self.instance)
empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
queryset = queryset.filter(**self.core_filters)
for field in self.field.foreign_related_fields:
val = getattr(self.instance, field.attname)
if val is None or (val == '' and empty_strings_as_null):
return queryset.none()
queryset._known_related_objects = {self.field: {self.instance.pk: self.instance}}
return queryset
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.field.related_query_name()]
except (AttributeError, KeyError):
queryset = super(RelatedManager, self).get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(RelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
rel_obj_attr = self.field.get_local_related_value
instance_attr = self.field.get_foreign_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {'%s__in' % self.field.name: instances}
queryset = queryset.filter(**query)
# Since we just bypassed this class' get_queryset(), we must manage
# the reverse relation manually.
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, self.field.name, instance)
cache_name = self.field.related_query_name()
return queryset, rel_obj_attr, instance_attr, False, cache_name
def add(self, *objs, **kwargs):
bulk = kwargs.pop('bulk', True)
objs = list(objs)
db = router.db_for_write(self.model, instance=self.instance)
def check_and_update_obj(obj):
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" % (
self.model._meta.object_name, obj,
))
setattr(obj, self.field.name, self.instance)
if bulk:
pks = []
for obj in objs:
check_and_update_obj(obj)
if obj._state.adding or obj._state.db != db:
raise ValueError(
"%r instance isn't saved. Use bulk=False or save "
"the object first." % obj
)
pks.append(obj.pk)
self.model._base_manager.using(db).filter(pk__in=pks).update(**{
self.field.name: self.instance,
})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
check_and_update_obj(obj)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).update_or_create(**kwargs)
update_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel.field.null:
def remove(self, *objs, **kwargs):
if not objs:
return
bulk = kwargs.pop('bulk', True)
val = self.field.get_foreign_related_value(self.instance)
old_ids = set()
for obj in objs:
# Is obj actually part of this descriptor set?
if self.field.get_local_related_value(obj) == val:
old_ids.add(obj.pk)
else:
raise self.field.remote_field.model.DoesNotExist(
"%r is not related to %r." % (obj, self.instance)
)
self._clear(self.filter(pk__in=old_ids), bulk)
remove.alters_data = True
def clear(self, **kwargs):
bulk = kwargs.pop('bulk', True)
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.update()` is intrinsically atomic.
queryset.update(**{self.field.name: None})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
setattr(obj, self.field.name, None)
obj.save(update_fields=[self.field.name])
_clear.alters_data = True
def set(self, objs, **kwargs):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
bulk = kwargs.pop('bulk', True)
clear = kwargs.pop('clear', False)
if self.field.null:
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs, bulk=bulk)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if obj in old_objs:
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs, bulk=bulk)
self.add(*new_objs, bulk=bulk)
else:
self.add(*objs, bulk=bulk)
set.alters_data = True
return RelatedManager
class ManyToManyDescriptor(ReverseManyToOneDescriptor):
"""
Accessor to the related objects manager on the forward and reverse sides of
a many-to-many relation.
In the example::
class Pizza(Model):
toppings = ManyToManyField(Topping, related_name='pizzas')
``pizza.toppings`` and ``topping.pizzas`` are ``ManyToManyDescriptor``
instances.
Most of the implementation is delegated to a dynamically defined manager
class built by ``create_forward_many_to_many_manager()`` defined below.
"""
def __init__(self, rel, reverse=False):
super(ManyToManyDescriptor, self).__init__(rel)
self.reverse = reverse
@property
def through(self):
# through is provided so that you have easy access to the through
# model (Book.authors.through) for inlines, etc. This is done as
# a property to ensure that the fully resolved value is returned.
return self.rel.through
@cached_property
def related_manager_cls(self):
model = self.rel.related_model if self.reverse else self.rel.model
return create_forward_many_to_many_manager(
model._default_manager.__class__,
self.rel,
reverse=self.reverse,
)
def create_forward_many_to_many_manager(superclass, rel, reverse):
"""
Create a manager for the either side of a many-to-many relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-many relations.
"""
class ManyRelatedManager(superclass):
def __init__(self, instance=None):
super(ManyRelatedManager, self).__init__()
self.instance = instance
if not reverse:
self.model = rel.model
self.query_field_name = rel.field.related_query_name()
self.prefetch_cache_name = rel.field.name
self.source_field_name = rel.field.m2m_field_name()
self.target_field_name = rel.field.m2m_reverse_field_name()
self.symmetrical = rel.symmetrical
else:
self.model = rel.related_model
self.query_field_name = rel.field.name
self.prefetch_cache_name = rel.field.related_query_name()
self.source_field_name = rel.field.m2m_reverse_field_name()
self.target_field_name = rel.field.m2m_field_name()
self.symmetrical = False
self.through = rel.through
self.reverse = reverse
self.source_field = self.through._meta.get_field(self.source_field_name)
self.target_field = self.through._meta.get_field(self.target_field_name)
self.core_filters = {}
for lh_field, rh_field in self.source_field.related_fields:
core_filter_key = '%s__%s' % (self.query_field_name, rh_field.name)
self.core_filters[core_filter_key] = getattr(instance, rh_field.attname)
self.related_val = self.source_field.get_foreign_related_value(instance)
if None in self.related_val:
raise ValueError('"%r" needs to have a value for field "%s" before '
'this many-to-many relationship can be used.' %
(instance, self.source_field_name))
# Even if this relation is not to pk, we require still pk value.
# The wish is that the instance has been already saved to DB,
# although having a pk value isn't a guarantee of that.
if instance.pk is None:
raise ValueError("%r instance needs to have a primary key value before "
"a many-to-many relationship can be used." %
instance.__class__.__name__)
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_forward_many_to_many_manager(manager.__class__, rel, reverse)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def _build_remove_filters(self, removed_vals):
filters = Q(**{self.source_field_name: self.related_val})
# No need to add a subquery condition if removed_vals is a QuerySet without
# filters.
removed_vals_filters = (not isinstance(removed_vals, QuerySet) or
removed_vals._has_filters())
if removed_vals_filters:
filters &= Q(**{'%s__in' % self.target_field_name: removed_vals})
if self.symmetrical:
symmetrical_filters = Q(**{self.target_field_name: self.related_val})
if removed_vals_filters:
symmetrical_filters &= Q(
**{'%s__in' % self.source_field_name: removed_vals})
filters |= symmetrical_filters
return filters
def _apply_rel_filters(self, queryset):
"""
Filter the queryset for the instance this manager is bound to.
"""
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
return queryset._next_is_sticky().filter(**self.core_filters)
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
queryset = super(ManyRelatedManager, self).get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(ManyRelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {'%s__in' % self.query_field_name: instances}
queryset = queryset._next_is_sticky().filter(**query)
# M2M: need to annotate the query in order to get the primary model
# that the secondary model was actually related to. We know that
# there will already be a join on the join table, so we can just add
# the select.
# For non-autocreated 'through' models, can't assume we are
# dealing with PK values.
fk = self.through._meta.get_field(self.source_field_name)
join_table = self.through._meta.db_table
connection = connections[queryset.db]
qn = connection.ops.quote_name
queryset = queryset.extra(select={
'_prefetch_related_val_%s' % f.attname:
'%s.%s' % (qn(join_table), qn(f.column)) for f in fk.local_related_fields})
return (
queryset,
lambda result: tuple(
getattr(result, '_prefetch_related_val_%s' % f.attname)
for f in fk.local_related_fields
),
lambda inst: tuple(
f.get_db_prep_value(getattr(inst, f.attname), connection)
for f in fk.foreign_related_fields
),
False,
self.prefetch_cache_name,
)
def add(self, *objs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use add() on a ManyToManyField which specifies an "
"intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
self._add_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs)
add.alters_data = True
def remove(self, *objs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use remove() on a ManyToManyField which specifies "
"an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
self._remove_items(self.source_field_name, self.target_field_name, *objs)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
signals.m2m_changed.send(sender=self.through, action="pre_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
filters = self._build_remove_filters(super(ManyRelatedManager, self).get_queryset().using(db))
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action="post_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
clear.alters_data = True
def set(self, objs, **kwargs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot set values on a ManyToManyField which specifies an "
"intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
clear = kwargs.pop('clear', False)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs)
else:
old_ids = set(self.using(db).values_list(self.target_field.target_field.attname, flat=True))
new_objs = []
for obj in objs:
fk_val = (self.target_field.get_foreign_related_value(obj)[0]
if isinstance(obj, self.model) else obj)
if fk_val in old_ids:
old_ids.remove(fk_val)
else:
new_objs.append(obj)
self.remove(*old_ids)
self.add(*new_objs)
set.alters_data = True
def create(self, **kwargs):
# This check needs to be done here, since we can't later remove this
# from the method lookup table, as we do with add and remove.
if not self.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use create() on a ManyToManyField which specifies "
"an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
def get_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).update_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
update_or_create.alters_data = True
def _add_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK fieldname in join table for the source object
# target_field_name: the PK fieldname in join table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
from django.db.models import Model
if objs:
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError(
'Cannot add "%r": instance is on database "%s", value is on database "%s"' %
(obj, self.instance._state.db, obj._state.db)
)
fk_val = self.through._meta.get_field(
target_field_name).get_foreign_related_value(obj)[0]
if fk_val is None:
raise ValueError(
'Cannot add "%r": the value for field "%s" is None' %
(obj, target_field_name)
)
new_ids.add(fk_val)
elif isinstance(obj, Model):
raise TypeError(
"'%s' instance expected, got %r" %
(self.model._meta.object_name, obj)
)
else:
new_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
vals = (self.through._default_manager.using(db)
.values_list(target_field_name, flat=True)
.filter(**{
source_field_name: self.related_val[0],
'%s__in' % target_field_name: new_ids,
}))
new_ids = new_ids - set(vals)
with transaction.atomic(using=db, savepoint=False):
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='pre_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
# Add the ones that aren't there already
self.through._default_manager.using(db).bulk_create([
self.through(**{
'%s_id' % source_field_name: self.related_val[0],
'%s_id' % target_field_name: obj_id,
})
for obj_id in new_ids
])
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='post_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK colname in join table for the source object
# target_field_name: the PK colname in join table for the target object
# *objs - objects to remove
if not objs:
return
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
fk_val = self.target_field.get_foreign_related_value(obj)[0]
old_ids.add(fk_val)
else:
old_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
# Send a signal to the other end if need be.
signals.m2m_changed.send(sender=self.through, action="pre_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
target_model_qs = super(ManyRelatedManager, self).get_queryset()
if target_model_qs._has_filters():
old_vals = target_model_qs.using(db).filter(**{
'%s__in' % self.target_field.target_field.attname: old_ids})
else:
old_vals = old_ids
filters = self._build_remove_filters(old_vals)
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action="post_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
return ManyRelatedManager
| vincepandolfo/django | django/db/models/fields/related_descriptors.py | Python | bsd-3-clause | 47,154 |
"""
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_arrays
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Parameters
----------
X : array, shape = (n_samples, n_features)
Training vectors.
y : array, shape = (length)
Target values for training vectors
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
wether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
`coef_` : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
`alpha_` : float
estimated precision of the noise.
`lambda_` : array, shape = (n_features)
estimated precisions of the weights.
`scores_` : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_arrays(X, y, sparse_format='dense',
dtype=np.float)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Parameters
----------
X : array, shape = (n_samples, n_features)
Training vectors.
y : array, shape = (n_samples)
Target values for training vectors
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
wether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
`coef_` : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
`alpha_` : float
estimated precision of the noise.
`lambda_` : array, shape = (n_features)
estimated precisions of the weights.
`sigma_` : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
`scores_` : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_arrays(X, y, sparse_format='dense',
dtype=np.float)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self._set_intercept(X_mean, y_mean, X_std)
return self
| jmargeta/scikit-learn | sklearn/linear_model/bayes.py | Python | bsd-3-clause | 15,486 |
# Copyright 2015 Cedraro Andrea <a.cedraro@gmail.com>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
if sys.version_info[0] >= 3:
basestring = str
unicode = str
def encode_string( value ):
return value.encode('utf-8') if isinstance(value, unicode) else value
def decode_string(value):
return value if isinstance(value, basestring) else value.decode('utf-8')
# hmac.compare_digest were introduced in python 2.7.7
if sys.version_info >= ( 2, 7, 7 ):
from hmac import compare_digest as SecureStringsEqual
else:
# This is the compare_digest function from python 3.4, adapted for 2.6:
# http://hg.python.org/cpython/file/460407f35aa9/Lib/hmac.py#l16
#
# Stolen from https://github.com/Valloric/ycmd
def SecureStringsEqual( a, b ):
"""Returns the equivalent of 'a == b', but avoids content based short
circuiting to reduce the vulnerability to timing attacks."""
# Consistent timing matters more here than data type flexibility
if not ( isinstance( a, str ) and isinstance( b, str ) ):
raise TypeError( "inputs must be str instances" )
# We assume the length of the expected digest is public knowledge,
# thus this early return isn't leaking anything an attacker wouldn't
# already know
if len( a ) != len( b ):
return False
# We assume that integers in the bytes range are all cached,
# thus timing shouldn't vary much due to integer object creation
result = 0
for x, y in zip( a, b ):
result |= ord( x ) ^ ord( y )
return result == 0
def compare_digest( a, b ):
return SecureStringsEqual( a, b )
| NcLang/vimrc | sources_non_forked/YouCompleteMe/third_party/ycmd/third_party/JediHTTP/jedihttp/compatibility.py | Python | mit | 2,108 |
'''
Tests for fileinput module.
Nick Mathewson
'''
import unittest
from test.support import verbose, TESTFN, run_unittest
from test.support import unlink as safe_unlink
import sys, re
from io import StringIO
from fileinput import FileInput, hook_encoded
# The fileinput module has 2 interfaces: the FileInput class which does
# all the work, and a few functions (input, etc.) that use a global _state
# variable. We only test the FileInput class, since the other functions
# only provide a thin facade over FileInput.
# Write lines (a list of lines) to temp file number i, and return the
# temp file's name.
def writeTmp(i, lines, mode='w'): # opening in text mode is the default
name = TESTFN + str(i)
f = open(name, mode)
for line in lines:
f.write(line)
f.close()
return name
def remove_tempfiles(*names):
for name in names:
if name:
safe_unlink(name)
class BufferSizesTests(unittest.TestCase):
def test_buffer_sizes(self):
# First, run the tests with default and teeny buffer size.
for round, bs in (0, 0), (1, 30):
t1 = t2 = t3 = t4 = None
try:
t1 = writeTmp(1, ["Line %s of file 1\n" % (i+1) for i in range(15)])
t2 = writeTmp(2, ["Line %s of file 2\n" % (i+1) for i in range(10)])
t3 = writeTmp(3, ["Line %s of file 3\n" % (i+1) for i in range(5)])
t4 = writeTmp(4, ["Line %s of file 4\n" % (i+1) for i in range(1)])
self.buffer_size_test(t1, t2, t3, t4, bs, round)
finally:
remove_tempfiles(t1, t2, t3, t4)
def buffer_size_test(self, t1, t2, t3, t4, bs=0, round=0):
pat = re.compile(r'LINE (\d+) OF FILE (\d+)')
start = 1 + round*6
if verbose:
print('%s. Simple iteration (bs=%s)' % (start+0, bs))
fi = FileInput(files=(t1, t2, t3, t4), bufsize=bs)
lines = list(fi)
fi.close()
self.assertEqual(len(lines), 31)
self.assertEqual(lines[4], 'Line 5 of file 1\n')
self.assertEqual(lines[30], 'Line 1 of file 4\n')
self.assertEqual(fi.lineno(), 31)
self.assertEqual(fi.filename(), t4)
if verbose:
print('%s. Status variables (bs=%s)' % (start+1, bs))
fi = FileInput(files=(t1, t2, t3, t4), bufsize=bs)
s = "x"
while s and s != 'Line 6 of file 2\n':
s = fi.readline()
self.assertEqual(fi.filename(), t2)
self.assertEqual(fi.lineno(), 21)
self.assertEqual(fi.filelineno(), 6)
self.failIf(fi.isfirstline())
self.failIf(fi.isstdin())
if verbose:
print('%s. Nextfile (bs=%s)' % (start+2, bs))
fi.nextfile()
self.assertEqual(fi.readline(), 'Line 1 of file 3\n')
self.assertEqual(fi.lineno(), 22)
fi.close()
if verbose:
print('%s. Stdin (bs=%s)' % (start+3, bs))
fi = FileInput(files=(t1, t2, t3, t4, '-'), bufsize=bs)
savestdin = sys.stdin
try:
sys.stdin = StringIO("Line 1 of stdin\nLine 2 of stdin\n")
lines = list(fi)
self.assertEqual(len(lines), 33)
self.assertEqual(lines[32], 'Line 2 of stdin\n')
self.assertEqual(fi.filename(), '<stdin>')
fi.nextfile()
finally:
sys.stdin = savestdin
if verbose:
print('%s. Boundary conditions (bs=%s)' % (start+4, bs))
fi = FileInput(files=(t1, t2, t3, t4), bufsize=bs)
self.assertEqual(fi.lineno(), 0)
self.assertEqual(fi.filename(), None)
fi.nextfile()
self.assertEqual(fi.lineno(), 0)
self.assertEqual(fi.filename(), None)
if verbose:
print('%s. Inplace (bs=%s)' % (start+5, bs))
savestdout = sys.stdout
try:
fi = FileInput(files=(t1, t2, t3, t4), inplace=1, bufsize=bs)
for line in fi:
line = line[:-1].upper()
print(line)
fi.close()
finally:
sys.stdout = savestdout
fi = FileInput(files=(t1, t2, t3, t4), bufsize=bs)
for line in fi:
self.assertEqual(line[-1], '\n')
m = pat.match(line[:-1])
self.assertNotEqual(m, None)
self.assertEqual(int(m.group(1)), fi.filelineno())
fi.close()
class FileInputTests(unittest.TestCase):
def test_zero_byte_files(self):
t1 = t2 = t3 = t4 = None
try:
t1 = writeTmp(1, [""])
t2 = writeTmp(2, [""])
t3 = writeTmp(3, ["The only line there is.\n"])
t4 = writeTmp(4, [""])
fi = FileInput(files=(t1, t2, t3, t4))
line = fi.readline()
self.assertEqual(line, 'The only line there is.\n')
self.assertEqual(fi.lineno(), 1)
self.assertEqual(fi.filelineno(), 1)
self.assertEqual(fi.filename(), t3)
line = fi.readline()
self.failIf(line)
self.assertEqual(fi.lineno(), 1)
self.assertEqual(fi.filelineno(), 0)
self.assertEqual(fi.filename(), t4)
fi.close()
finally:
remove_tempfiles(t1, t2, t3, t4)
def test_files_that_dont_end_with_newline(self):
t1 = t2 = None
try:
t1 = writeTmp(1, ["A\nB\nC"])
t2 = writeTmp(2, ["D\nE\nF"])
fi = FileInput(files=(t1, t2))
lines = list(fi)
self.assertEqual(lines, ["A\n", "B\n", "C", "D\n", "E\n", "F"])
self.assertEqual(fi.filelineno(), 3)
self.assertEqual(fi.lineno(), 6)
finally:
remove_tempfiles(t1, t2)
## def test_unicode_filenames(self):
## # XXX A unicode string is always returned by writeTmp.
## # So is this needed?
## try:
## t1 = writeTmp(1, ["A\nB"])
## encoding = sys.getfilesystemencoding()
## if encoding is None:
## encoding = 'ascii'
## fi = FileInput(files=str(t1, encoding))
## lines = list(fi)
## self.assertEqual(lines, ["A\n", "B"])
## finally:
## remove_tempfiles(t1)
def test_fileno(self):
t1 = t2 = None
try:
t1 = writeTmp(1, ["A\nB"])
t2 = writeTmp(2, ["C\nD"])
fi = FileInput(files=(t1, t2))
self.assertEqual(fi.fileno(), -1)
line =next( fi)
self.assertNotEqual(fi.fileno(), -1)
fi.nextfile()
self.assertEqual(fi.fileno(), -1)
line = list(fi)
self.assertEqual(fi.fileno(), -1)
finally:
remove_tempfiles(t1, t2)
def test_opening_mode(self):
try:
# invalid mode, should raise ValueError
fi = FileInput(mode="w")
self.fail("FileInput should reject invalid mode argument")
except ValueError:
pass
t1 = None
try:
# try opening in universal newline mode
t1 = writeTmp(1, [b"A\nB\r\nC\rD"], mode="wb")
fi = FileInput(files=t1, mode="U")
lines = list(fi)
self.assertEqual(lines, ["A\n", "B\n", "C\n", "D"])
finally:
remove_tempfiles(t1)
def test_file_opening_hook(self):
try:
# cannot use openhook and inplace mode
fi = FileInput(inplace=1, openhook=lambda f, m: None)
self.fail("FileInput should raise if both inplace "
"and openhook arguments are given")
except ValueError:
pass
try:
fi = FileInput(openhook=1)
self.fail("FileInput should check openhook for being callable")
except ValueError:
pass
# XXX The rot13 codec was removed.
# So this test needs to be changed to use something else.
# (Or perhaps the API needs to change so we can just pass
# an encoding rather than using a hook?)
## try:
## t1 = writeTmp(1, ["A\nB"], mode="wb")
## fi = FileInput(files=t1, openhook=hook_encoded("rot13"))
## lines = list(fi)
## self.assertEqual(lines, ["N\n", "O"])
## finally:
## remove_tempfiles(t1)
def test_main():
run_unittest(BufferSizesTests, FileInputTests)
if __name__ == "__main__":
test_main()
| MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.1/Lib/test/test_fileinput.py | Python | mit | 8,498 |
# coding: utf-8
# pylint: disable=missing-docstring, invalid-name
from __future__ import absolute_import
from google.appengine.api import users
import flask
import auth
import model
import util
from main import app
@app.route('/signin/google/')
def signin_google():
auth.save_request_params()
google_url = users.create_login_url(flask.url_for('google_authorized'))
return flask.redirect(google_url)
@app.route('/_s/callback/google/authorized/')
def google_authorized():
google_user = users.get_current_user()
if google_user is None:
flask.flash('You denied the request to sign in.')
return flask.redirect(flask.url_for('index'))
user_db = retrieve_user_from_google(google_user)
return auth.signin_via_social(user_db)
def retrieve_user_from_google(google_user):
auth_id = 'federated_%s' % google_user.user_id()
user_db = model.User.get_by('auth_ids', auth_id)
if user_db:
if not user_db.admin and users.is_current_user_admin():
user_db.admin = True
user_db.put()
return user_db
return auth.create_or_get_user_db(
auth_id=auth_id,
name=util.create_name_from_email(google_user.email()),
username=google_user.email(),
email=google_user.email(),
verified=True,
admin=users.is_current_user_admin(),
)
| sidharta/hansel-app | main/auth/google.py | Python | mit | 1,359 |
from __future__ import generators
"""
httplib2
A caching http interface that supports ETags and gzip
to conserve bandwidth.
Requires Python 2.3 or later
Changelog:
2007-08-18, Rick: Modified so it's able to use a socks proxy if needed.
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = ["Thomas Broyer (t.broyer@ltgt.net)",
"James Antill",
"Xavier Verges Farrero",
"Jonathan Feinberg",
"Blair Zajac",
"Sam Ruby",
"Louis Nyffenegger"]
__license__ = "MIT"
__version__ = "0.7.5"
import re
import sys
import email
import email.Utils
import email.Message
import email.FeedParser
import StringIO
import gzip
import zlib
import httplib
import urlparse
import urllib
import base64
import os
import copy
import calendar
import time
import random
import errno
import logging
try:
from hashlib import sha1 as _sha, md5 as _md5
except ImportError:
# prior to Python 2.5, these were separate modules
import sha
import md5
_sha = sha.new
_md5 = md5.new
import hmac
from gettext import gettext as _
import socket
try:
from httplib2 import socks
except ImportError:
try:
import socks
except ImportError:
socks = None
# Build the appropriate socket wrapper for ssl
try:
import ssl # python 2.6
ssl_SSLError = ssl.SSLError
def _ssl_wrap_socket(sock, key_file, cert_file,
disable_validation, ca_certs):
disable_validation = True
# Force to disable validation
if disable_validation:
cert_reqs = ssl.CERT_NONE
ca_certs = None
else:
cert_reqs = ssl.CERT_REQUIRED
# We should be specifying SSL version 3 or TLS v1, but the ssl module
# doesn't expose the necessary knobs. So we need to go with the default
# of SSLv23.
return ssl.wrap_socket(sock, keyfile=key_file, certfile=cert_file,
cert_reqs=cert_reqs, ca_certs=ca_certs)
except (AttributeError, ImportError):
ssl_SSLError = None
def _ssl_wrap_socket(sock, key_file, cert_file,
disable_validation, ca_certs):
if not disable_validation:
raise CertificateValidationUnsupported(
"SSL certificate validation is not supported without "
"the ssl module installed. To avoid this error, install "
"the ssl module, or explicity disable validation.")
ssl_sock = socket.ssl(sock, key_file, cert_file)
return httplib.FakeSocket(sock, ssl_sock)
if sys.version_info >= (2,3):
from iri2uri import iri2uri
else:
def iri2uri(uri):
return uri
def has_timeout(timeout): # python 2.6
if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'):
return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT)
return (timeout is not None)
__all__ = ['Http', 'Response', 'ProxyInfo', 'HttpLib2Error',
'RedirectMissingLocation', 'RedirectLimit', 'FailedToDecompressContent',
'UnimplementedDigestAuthOptionError', 'UnimplementedHmacDigestAuthOptionError',
'debuglevel', 'ProxiesUnavailableError']
# The httplib debug level, set to a non-zero value to get debug output
debuglevel = 0
# A request will be tried 'RETRIES' times if it fails at the socket/connection level.
RETRIES = 2
# Python 2.3 support
if sys.version_info < (2,4):
def sorted(seq):
seq.sort()
return seq
# Python 2.3 support
def HTTPResponse__getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise httplib.ResponseNotReady()
return self.msg.items()
if not hasattr(httplib.HTTPResponse, 'getheaders'):
httplib.HTTPResponse.getheaders = HTTPResponse__getheaders
# All exceptions raised here derive from HttpLib2Error
class HttpLib2Error(Exception): pass
# Some exceptions can be caught and optionally
# be turned back into responses.
class HttpLib2ErrorWithResponse(HttpLib2Error):
def __init__(self, desc, response, content):
self.response = response
self.content = content
HttpLib2Error.__init__(self, desc)
class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass
class RedirectLimit(HttpLib2ErrorWithResponse): pass
class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass
class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class MalformedHeader(HttpLib2Error): pass
class RelativeURIError(HttpLib2Error): pass
class ServerNotFoundError(HttpLib2Error): pass
class ProxiesUnavailableError(HttpLib2Error): pass
class CertificateValidationUnsupported(HttpLib2Error): pass
class SSLHandshakeError(HttpLib2Error): pass
class NotSupportedOnThisPlatform(HttpLib2Error): pass
class CertificateHostnameMismatch(SSLHandshakeError):
def __init__(self, desc, host, cert):
HttpLib2Error.__init__(self, desc)
self.host = host
self.cert = cert
# Open Items:
# -----------
# Proxy support
# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?)
# Pluggable cache storage (supports storing the cache in
# flat files by default. We need a plug-in architecture
# that can support Berkeley DB and Squid)
# == Known Issues ==
# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator.
# Does not handle Cache-Control: max-stale
# Does not use Age: headers when calculating cache freshness.
# The number of redirections to follow before giving up.
# Note that only GET redirects are automatically followed.
# Will also honor 301 requests by saving that info and never
# requesting that URI again.
DEFAULT_MAX_REDIRECTS = 5
# Default CA certificates file bundled with httplib2.
CA_CERTS = os.path.join(
os.path.dirname(os.path.abspath(__file__ )), "cacerts.txt")
# Which headers are hop-by-hop headers by default
HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']
def _get_end2end_headers(response):
hopbyhop = list(HOP_BY_HOP)
hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')])
return [header for header in response.keys() if header not in hopbyhop]
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
def urlnorm(uri):
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri)
authority = authority.lower()
scheme = scheme.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
scheme = scheme.lower()
defrag_uri = scheme + "://" + authority + request_uri
return scheme, authority, request_uri, defrag_uri
# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/)
re_url_scheme = re.compile(r'^\w+://')
re_slash = re.compile(r'[?/:|]+')
def safename(filename):
"""Return a filename suitable for the cache.
Strips dangerous and common characters to create a filename we
can use to store the cache in.
"""
try:
if re_url_scheme.match(filename):
if isinstance(filename,str):
filename = filename.decode('utf-8')
filename = filename.encode('idna')
else:
filename = filename.encode('idna')
except UnicodeError:
pass
if isinstance(filename,unicode):
filename=filename.encode('utf-8')
filemd5 = _md5(filename).hexdigest()
filename = re_url_scheme.sub("", filename)
filename = re_slash.sub(",", filename)
# limit length of filename
if len(filename)>200:
filename=filename[:200]
return ",".join((filename, filemd5))
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()])
def _parse_cache_control(headers):
retval = {}
if headers.has_key('cache-control'):
parts = headers['cache-control'].split(',')
parts_with_args = [tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")]
parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")]
retval = dict(parts_with_args + parts_wo_args)
return retval
# Whether to use a strict mode to parse WWW-Authenticate headers
# Might lead to bad results in case of ill-formed header value,
# so disabled by default, falling back to relaxed parsing.
# Set to true to turn on, usefull for testing servers.
USE_WWW_AUTH_STRICT_PARSING = 0
# In regex below:
# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP
# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space
# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both:
# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?
WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$")
WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$")
UNQUOTE_PAIRS = re.compile(r'\\(.)')
def _parse_www_authenticate(headers, headername='www-authenticate'):
"""Returns a dictionary of dictionaries, one dict
per auth_scheme."""
retval = {}
if headers.has_key(headername):
try:
authenticate = headers[headername].strip()
www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED
while authenticate:
# Break off the scheme at the beginning of the line
if headername == 'authentication-info':
(auth_scheme, the_rest) = ('digest', authenticate)
else:
(auth_scheme, the_rest) = authenticate.split(" ", 1)
# Now loop over all the key value pairs that come after the scheme,
# being careful not to roll into the next scheme
match = www_auth.search(the_rest)
auth_params = {}
while match:
if match and len(match.groups()) == 3:
(key, value, the_rest) = match.groups()
auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')])
match = www_auth.search(the_rest)
retval[auth_scheme.lower()] = auth_params
authenticate = the_rest.strip()
except ValueError:
raise MalformedHeader("WWW-Authenticate")
return retval
def _entry_disposition(response_headers, request_headers):
"""Determine freshness from the Date, Expires and Cache-Control headers.
We don't handle the following:
1. Cache-Control: max-stale
2. Age: headers are not used in the calculations.
Not that this algorithm is simpler than you might think
because we are operating as a private (non-shared) cache.
This lets us ignore 's-maxage'. We can also ignore
'proxy-invalidate' since we aren't a proxy.
We will never return a stale document as
fresh as a design decision, and thus the non-implementation
of 'max-stale'. This also lets us safely ignore 'must-revalidate'
since we operate as if every server has sent 'must-revalidate'.
Since we are private we get to ignore both 'public' and
'private' parameters. We also ignore 'no-transform' since
we don't do any transformations.
The 'no-store' parameter is handled at a higher level.
So the only Cache-Control parameters we look at are:
no-cache
only-if-cached
max-age
min-fresh
"""
retval = "STALE"
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if request_headers.has_key('pragma') and request_headers['pragma'].lower().find('no-cache') != -1:
retval = "TRANSPARENT"
if 'cache-control' not in request_headers:
request_headers['cache-control'] = 'no-cache'
elif cc.has_key('no-cache'):
retval = "TRANSPARENT"
elif cc_response.has_key('no-cache'):
retval = "STALE"
elif cc.has_key('only-if-cached'):
retval = "FRESH"
elif response_headers.has_key('date'):
date = calendar.timegm(email.Utils.parsedate_tz(response_headers['date']))
now = time.time()
current_age = max(0, now - date)
if cc_response.has_key('max-age'):
try:
freshness_lifetime = int(cc_response['max-age'])
except ValueError:
freshness_lifetime = 0
elif response_headers.has_key('expires'):
expires = email.Utils.parsedate_tz(response_headers['expires'])
if None == expires:
freshness_lifetime = 0
else:
freshness_lifetime = max(0, calendar.timegm(expires) - date)
else:
freshness_lifetime = 0
if cc.has_key('max-age'):
try:
freshness_lifetime = int(cc['max-age'])
except ValueError:
freshness_lifetime = 0
if cc.has_key('min-fresh'):
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
current_age += min_fresh
if freshness_lifetime > current_age:
retval = "FRESH"
return retval
def _decompressContent(response, new_content):
content = new_content
try:
encoding = response.get('content-encoding', None)
if encoding in ['gzip', 'deflate']:
if encoding == 'gzip':
content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read()
if encoding == 'deflate':
content = zlib.decompress(content)
response['content-length'] = str(len(content))
# Record the historical presence of the encoding in a way the won't interfere.
response['-content-encoding'] = response['content-encoding']
del response['content-encoding']
except IOError:
content = ""
raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content)
return content
def _updateCache(request_headers, response_headers, content, cache, cachekey):
if cachekey:
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if cc.has_key('no-store') or cc_response.has_key('no-store'):
cache.delete(cachekey)
else:
info = email.Message.Message()
for key, value in response_headers.iteritems():
if key not in ['status','content-encoding','transfer-encoding']:
info[key] = value
# Add annotations to the cache to indicate what headers
# are variant for this request.
vary = response_headers.get('vary', None)
if vary:
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
try:
info[key] = request_headers[header]
except KeyError:
pass
status = response_headers.status
if status == 304:
status = 200
status_header = 'status: %d\r\n' % status
header_str = info.as_string()
header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str)
text = "".join([status_header, header_str, content])
cache.set(cachekey, text)
def _cnonce():
dig = _md5("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest()
return dig[:16]
def _wsse_username_token(cnonce, iso_now, password):
return base64.b64encode(_sha("%s%s%s" % (cnonce, iso_now, password)).digest()).strip()
# For credentials we need two things, first
# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.)
# Then we also need a list of URIs that have already demanded authentication
# That list is tricky since sub-URIs can take the same auth, or the
# auth scheme may change as you descend the tree.
# So we also need each Auth instance to be able to tell us
# how close to the 'top' it is.
class Authentication(object):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
self.path = path
self.host = host
self.credentials = credentials
self.http = http
def depth(self, request_uri):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return request_uri[len(self.path):].count("/")
def inscope(self, host, request_uri):
# XXX Should we normalize the request_uri?
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return (host == self.host) and path.startswith(self.path)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header. Over-ride this in sub-classes."""
pass
def response(self, response, content):
"""Gives us a chance to update with new nonces
or such returned from the last authorized response.
Over-rise this in sub-classes if necessary.
Return TRUE is the request is to be retried, for
example Digest may return stale=true.
"""
return False
class BasicAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'Basic ' + base64.b64encode("%s:%s" % self.credentials).strip()
class DigestAuthentication(Authentication):
"""Only do qop='auth' and MD5, since that
is all Apache currently implements"""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['digest']
qop = self.challenge.get('qop', 'auth')
self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None
if self.challenge['qop'] is None:
raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5').upper()
if self.challenge['algorithm'] != 'MD5':
raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]])
self.challenge['nc'] = 1
def request(self, method, request_uri, headers, content, cnonce = None):
"""Modify the request headers"""
H = lambda x: _md5(x).hexdigest()
KD = lambda s, d: H("%s:%s" % (s, d))
A2 = "".join([method, ":", request_uri])
self.challenge['cnonce'] = cnonce or _cnonce()
request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % (self.challenge['nonce'],
'%08x' % self.challenge['nc'],
self.challenge['cnonce'],
self.challenge['qop'], H(A2)
))
headers['authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['nonce'],
request_uri,
self.challenge['algorithm'],
request_digest,
self.challenge['qop'],
self.challenge['nc'],
self.challenge['cnonce'],
)
if self.challenge.get('opaque'):
headers['authorization'] += ', opaque="%s"' % self.challenge['opaque']
self.challenge['nc'] += 1
def response(self, response, content):
if not response.has_key('authentication-info'):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {})
if 'true' == challenge.get('stale'):
self.challenge['nonce'] = challenge['nonce']
self.challenge['nc'] = 1
return True
else:
updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {})
if updated_challenge.has_key('nextnonce'):
self.challenge['nonce'] = updated_challenge['nextnonce']
self.challenge['nc'] = 1
return False
class HmacDigestAuthentication(Authentication):
"""Adapted from Robert Sayre's code and DigestAuthentication above."""
__author__ = "Thomas Broyer (t.broyer@ltgt.net)"
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['hmacdigest']
# TODO: self.challenge['domain']
self.challenge['reason'] = self.challenge.get('reason', 'unauthorized')
if self.challenge['reason'] not in ['unauthorized', 'integrity']:
self.challenge['reason'] = 'unauthorized'
self.challenge['salt'] = self.challenge.get('salt', '')
if not self.challenge.get('snonce'):
raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty."))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1')
if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1')
if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm']))
if self.challenge['algorithm'] == 'HMAC-MD5':
self.hashmod = _md5
else:
self.hashmod = _sha
if self.challenge['pw-algorithm'] == 'MD5':
self.pwhashmod = _md5
else:
self.pwhashmod = _sha
self.key = "".join([self.credentials[0], ":",
self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(),
":", self.challenge['realm']
])
self.key = self.pwhashmod.new(self.key).hexdigest().lower()
def request(self, method, request_uri, headers, content):
"""Modify the request headers"""
keys = _get_end2end_headers(headers)
keylist = "".join(["%s " % k for k in keys])
headers_val = "".join([headers[k] for k in keys])
created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime())
cnonce = _cnonce()
request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val)
request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower()
headers['authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['snonce'],
cnonce,
request_uri,
created,
request_digest,
keylist,
)
def response(self, response, content):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {})
if challenge.get('reason') in ['integrity', 'stale']:
return True
return False
class WsseAuthentication(Authentication):
"""This is thinly tested and should not be relied upon.
At this time there isn't any third party server to test against.
Blogger and TypePad implemented this algorithm at one point
but Blogger has since switched to Basic over HTTPS and
TypePad has implemented it wrong, by never issuing a 401
challenge but instead requiring your client to telepathically know that
their endpoint is expecting WSSE profile="UsernameToken"."""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'WSSE profile="UsernameToken"'
iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
cnonce = _cnonce()
password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1])
headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % (
self.credentials[0],
password_digest,
cnonce,
iso_now)
class GoogleLoginAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
from urllib import urlencode
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
service = challenge['googlelogin'].get('service', 'xapi')
# Bloggger actually returns the service in the challenge
# For the rest we guess based on the URI
if service == 'xapi' and request_uri.find("calendar") > 0:
service = "cl"
# No point in guessing Base or Spreadsheet
#elif request_uri.find("spreadsheets") > 0:
# service = "wise"
auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent'])
resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'})
lines = content.split('\n')
d = dict([tuple(line.split("=", 1)) for line in lines if line])
if resp.status == 403:
self.Auth = ""
else:
self.Auth = d['Auth']
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'GoogleLogin Auth=' + self.Auth
AUTH_SCHEME_CLASSES = {
"basic": BasicAuthentication,
"wsse": WsseAuthentication,
"digest": DigestAuthentication,
"hmacdigest": HmacDigestAuthentication,
"googlelogin": GoogleLoginAuthentication
}
AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"]
class FileCache(object):
"""Uses a local directory as a store for cached files.
Not really safe to use if multiple threads or processes are going to
be running on the same cache.
"""
def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
self.cache = cache
self.safe = safe
if not os.path.exists(cache):
os.makedirs(self.cache)
def get(self, key):
retval = None
cacheFullPath = os.path.join(self.cache, self.safe(key))
try:
f = file(cacheFullPath, "rb")
retval = f.read()
f.close()
except IOError:
pass
return retval
def set(self, key, value):
cacheFullPath = os.path.join(self.cache, self.safe(key))
f = file(cacheFullPath, "wb")
f.write(value)
f.close()
def delete(self, key):
cacheFullPath = os.path.join(self.cache, self.safe(key))
if os.path.exists(cacheFullPath):
os.remove(cacheFullPath)
class Credentials(object):
def __init__(self):
self.credentials = []
def add(self, name, password, domain=""):
self.credentials.append((domain.lower(), name, password))
def clear(self):
self.credentials = []
def iter(self, domain):
for (cdomain, name, password) in self.credentials:
if cdomain == "" or domain == cdomain:
yield (name, password)
class KeyCerts(Credentials):
"""Identical to Credentials except that
name/password are mapped to key/cert."""
pass
class AllHosts(object):
pass
class ProxyInfo(object):
"""Collect information required to use a proxy."""
bypass_hosts = ()
def __init__(self, proxy_type, proxy_host, proxy_port,
proxy_rdns=True, proxy_user=None, proxy_pass=None):
"""The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX
constants. For example:
p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP,
proxy_host='localhost', proxy_port=8000)
"""
self.proxy_type = proxy_type
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.proxy_rdns = proxy_rdns
self.proxy_user = proxy_user
self.proxy_pass = proxy_pass
def astuple(self):
return (self.proxy_type, self.proxy_host, self.proxy_port,
self.proxy_rdns, self.proxy_user, self.proxy_pass)
def isgood(self):
return (self.proxy_host != None) and (self.proxy_port != None)
@classmethod
def from_environment(cls, method='http'):
"""
Read proxy info from the environment variables.
"""
if method not in ['http', 'https']:
return
env_var = method + '_proxy'
url = os.environ.get(env_var, os.environ.get(env_var.upper()))
if not url:
return
pi = cls.from_url(url, method)
no_proxy = os.environ.get('no_proxy', os.environ.get('NO_PROXY', ''))
bypass_hosts = []
if no_proxy:
bypass_hosts = no_proxy.split(',')
# special case, no_proxy=* means all hosts bypassed
if no_proxy == '*':
bypass_hosts = AllHosts
pi.bypass_hosts = bypass_hosts
return pi
@classmethod
def from_url(cls, url, method='http'):
"""
Construct a ProxyInfo from a URL (such as http_proxy env var)
"""
url = urlparse.urlparse(url)
username = None
password = None
port = None
if '@' in url[1]:
ident, host_port = url[1].split('@', 1)
if ':' in ident:
username, password = ident.split(':', 1)
else:
password = ident
else:
host_port = url[1]
if ':' in host_port:
host, port = host_port.split(':', 1)
else:
host = host_port
if port:
port = int(port)
else:
port = dict(https=443, http=80)[method]
proxy_type = 3 # socks.PROXY_TYPE_HTTP
return cls(
proxy_type = proxy_type,
proxy_host = host,
proxy_port = port,
proxy_user = username or None,
proxy_pass = password or None,
)
def applies_to(self, hostname):
return not self.bypass_host(hostname)
def bypass_host(self, hostname):
"""Has this host been excluded from the proxy config"""
if self.bypass_hosts is AllHosts:
return True
bypass = False
for domain in self.bypass_hosts:
if hostname.endswith(domain):
bypass = True
return bypass
class HTTPConnectionWithTimeout(httplib.HTTPConnection):
"""
HTTPConnection subclass that supports timeouts
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"""Connect to the host and port specified in __init__."""
# Mostly verbatim from httplib.py.
if self.proxy_info and socks is None:
raise ProxiesUnavailableError(
'Proxy support missing but proxy use was requested!')
msg = "getaddrinfo returns an empty list"
if self.proxy_info and self.proxy_info.isgood():
use_proxy = True
proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass = self.proxy_info.astuple()
else:
use_proxy = False
if use_proxy and proxy_rdns:
host = proxy_host
port = proxy_port
else:
host = self.host
port = self.port
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
if use_proxy:
self.sock = socks.socksocket(af, socktype, proto)
self.sock.setproxy(proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)
else:
self.sock = socket.socket(af, socktype, proto)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Different from httplib: support timeouts.
if has_timeout(self.timeout):
self.sock.settimeout(self.timeout)
# End of difference from httplib.
if self.debuglevel > 0:
print "connect: (%s, %s) ************" % (self.host, self.port)
if use_proxy:
print "proxy: %s ************" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
self.sock.connect((self.host, self.port) + sa[2:])
except socket.error, msg:
if self.debuglevel > 0:
print "connect fail: (%s, %s)" % (self.host, self.port)
if use_proxy:
print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class HTTPSConnectionWithTimeout(httplib.HTTPSConnection):
"""
This class allows communication via SSL.
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None,
ca_certs=None, disable_ssl_certificate_validation=True):
httplib.HTTPSConnection.__init__(self, host, port=port, key_file=key_file,
cert_file=cert_file, strict=strict)
self.timeout = timeout
self.proxy_info = proxy_info
if ca_certs is None:
ca_certs = CA_CERTS
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = \
disable_ssl_certificate_validation
if not self.disable_ssl_certificate_validation:
print("err")
self.disable_ssl_certificate_validation = True
# The following two methods were adapted from https_wrapper.py, released
# with the Google Appengine SDK at
# http://googleappengine.googlecode.com/svn-history/r136/trunk/python/google/appengine/tools/https_wrapper.py
# under the following license:
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def _GetValidHostsForCert(self, cert):
"""Returns a list of valid host globs for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
Returns:
list: A list of valid host globs.
"""
if 'subjectAltName' in cert:
return [x[1] for x in cert['subjectAltName']
if x[0].lower() == 'dns']
else:
return [x[0][1] for x in cert['subject']
if x[0][0].lower() == 'commonname']
def _ValidateCertificateHostname(self, cert, hostname):
"""Validates that a given hostname is valid for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
hostname: The hostname to test.
Returns:
bool: Whether or not the hostname is valid for this certificate.
"""
hosts = self._GetValidHostsForCert(cert)
for host in hosts:
host_re = host.replace('.', '\.').replace('*', '[^.]*')
if re.search('^%s$' % (host_re,), hostname, re.I):
return True
return False
def connect(self):
"Connect to a host on a given (SSL) port."
msg = "getaddrinfo returns an empty list"
if self.proxy_info and self.proxy_info.isgood():
use_proxy = True
proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass = self.proxy_info.astuple()
else:
use_proxy = False
if use_proxy:
host = proxy_host
port = proxy_port
else:
host = self.host
port = self.port
for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo(
host, port, 0, socket.SOCK_STREAM):
try:
if use_proxy:
sock = socks.socksocket(family, socktype, proto)
sock.setproxy(proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)
else:
sock = socket.socket(family, socktype, proto)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if has_timeout(self.timeout):
sock.settimeout(self.timeout)
sock.connect((self.host, self.port))
self.sock =_ssl_wrap_socket(
sock, self.key_file, self.cert_file,
self.disable_ssl_certificate_validation, self.ca_certs)
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
if use_proxy:
print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
if not self.disable_ssl_certificate_validation:
cert = self.sock.getpeercert()
hostname = self.host.split(':', 0)[0]
if not self._ValidateCertificateHostname(cert, hostname):
raise CertificateHostnameMismatch(
'Server presented certificate that does not match '
'host %s: %s' % (hostname, cert), hostname, cert)
except ssl_SSLError as e:
logging.exception("connect error:%r", e)
if sock:
sock.close()
if self.sock:
self.sock.close()
self.sock = None
# Unfortunately the ssl module doesn't seem to provide any way
# to get at more detailed error information, in particular
# whether the error is due to certificate validation or
# something else (such as SSL protocol mismatch).
if e.errno == ssl.SSL_ERROR_SSL:
raise SSLHandshakeError(e)
else:
raise
except (socket.timeout, socket.gaierror):
raise
except socket.error, msg:
if self.debuglevel > 0:
print "connect fail: (%s, %s)" % (self.host, self.port)
if use_proxy:
print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
SCHEME_TO_CONNECTION = {
'http': HTTPConnectionWithTimeout,
'https': HTTPSConnectionWithTimeout
}
# Use a different connection object for Google App Engine
try:
from google.appengine.api import apiproxy_stub_map
if apiproxy_stub_map.apiproxy.GetStub('urlfetch') is None:
raise ImportError # Bail out; we're not actually running on App Engine.
from google.appengine.api.urlfetch import fetch
from google.appengine.api.urlfetch import InvalidURLError
from google.appengine.api.urlfetch import DownloadError
from google.appengine.api.urlfetch import ResponseTooLargeError
from google.appengine.api.urlfetch import SSLCertificateError
class ResponseDict(dict):
"""Is a dictionary that also has a read() method, so
that it can pass itself off as an httlib.HTTPResponse()."""
def read(self):
pass
class AppEngineHttpConnection(object):
"""Emulates an httplib.HTTPConnection object, but actually uses the Google
App Engine urlfetch library. This allows the timeout to be properly used on
Google App Engine, and avoids using httplib, which on Google App Engine is
just another wrapper around urlfetch.
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None, ca_certs=None,
disable_ssl_certificate_validation=False):
self.host = host
self.port = port
self.timeout = timeout
if key_file or cert_file or proxy_info or ca_certs:
raise NotSupportedOnThisPlatform()
self.response = None
self.scheme = 'http'
self.validate_certificate = not disable_ssl_certificate_validation
self.sock = True
def request(self, method, url, body, headers):
# Calculate the absolute URI, which fetch requires
netloc = self.host
if self.port:
netloc = '%s:%s' % (self.host, self.port)
absolute_uri = '%s://%s%s' % (self.scheme, netloc, url)
try:
response = fetch(absolute_uri, payload=body, method=method,
headers=headers, allow_truncated=False, follow_redirects=False,
deadline=self.timeout,
validate_certificate=self.validate_certificate)
self.response = ResponseDict(response.headers)
self.response['status'] = str(response.status_code)
self.response['reason'] = httplib.responses.get(response.status_code, 'Ok')
self.response.status = response.status_code
setattr(self.response, 'read', lambda : response.content)
# Make sure the exceptions raised match the exceptions expected.
except InvalidURLError:
raise socket.gaierror('')
except (DownloadError, ResponseTooLargeError, SSLCertificateError):
raise httplib.HTTPException()
def getresponse(self):
if self.response:
return self.response
else:
raise httplib.HTTPException()
def set_debuglevel(self, level):
pass
def connect(self):
pass
def close(self):
pass
class AppEngineHttpsConnection(AppEngineHttpConnection):
"""Same as AppEngineHttpConnection, but for HTTPS URIs."""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None, ca_certs=None,
disable_ssl_certificate_validation=False):
AppEngineHttpConnection.__init__(self, host, port, key_file, cert_file,
strict, timeout, proxy_info, ca_certs, disable_ssl_certificate_validation)
self.scheme = 'https'
# Update the connection classes to use the Googel App Engine specific ones.
SCHEME_TO_CONNECTION = {
'http': AppEngineHttpConnection,
'https': AppEngineHttpsConnection
}
except ImportError:
pass
class Http(object):
"""An HTTP client that handles:
- all methods
- caching
- ETags
- compression,
- HTTPS
- Basic
- Digest
- WSSE
and more.
"""
def __init__(self, cache=None, timeout=None,
proxy_info=ProxyInfo.from_environment,
ca_certs=None, disable_ssl_certificate_validation=False):
"""If 'cache' is a string then it is used as a directory name for
a disk cache. Otherwise it must be an object that supports the
same interface as FileCache.
All timeouts are in seconds. If None is passed for timeout
then Python's default timeout for sockets will be used. See
for example the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
`proxy_info` may be:
- a callable that takes the http scheme ('http' or 'https') and
returns a ProxyInfo instance per request. By default, uses
ProxyInfo.from_environment.
- a ProxyInfo instance (static proxy config).
- None (proxy disabled).
ca_certs is the path of a file containing root CA certificates for SSL
server certificate validation. By default, a CA cert file bundled with
httplib2 is used.
If disable_ssl_certificate_validation is true, SSL cert validation will
not be performed.
"""
self.proxy_info = proxy_info
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = \
disable_ssl_certificate_validation
# Map domain name to an httplib connection
self.connections = {}
# The location of the cache, for now a directory
# where cached responses are held.
if cache and isinstance(cache, basestring):
self.cache = FileCache(cache)
else:
self.cache = cache
# Name/password
self.credentials = Credentials()
# Key/cert
self.certificates = KeyCerts()
# authorization objects
self.authorizations = []
# If set to False then no redirects are followed, even safe ones.
self.follow_redirects = True
# Which HTTP methods do we apply optimistic concurrency to, i.e.
# which methods get an "if-match:" etag header added to them.
self.optimistic_concurrency_methods = ["PUT", "PATCH"]
# If 'follow_redirects' is True, and this is set to True then
# all redirecs are followed, including unsafe ones.
self.follow_all_redirects = False
self.ignore_etag = False
self.force_exception_to_status_code = False
self.timeout = timeout
# Keep Authorization: headers on a redirect.
self.forward_authorization_headers = False
def _auth_from_challenge(self, host, request_uri, headers, response, content):
"""A generator that creates Authorization objects
that can be applied to requests.
"""
challenges = _parse_www_authenticate(response, 'www-authenticate')
for cred in self.credentials.iter(host):
for scheme in AUTH_SCHEME_ORDER:
if challenges.has_key(scheme):
yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self)
def add_credentials(self, name, password, domain=""):
"""Add a name and password that will be used
any time a request requires authentication."""
self.credentials.add(name, password, domain)
def add_certificate(self, key, cert, domain):
"""Add a key and cert that will be used
any time a request requires authentication."""
self.certificates.add(key, cert, domain)
def clear_credentials(self):
"""Remove all the names and passwords
that are used for authentication"""
self.credentials.clear()
self.authorizations = []
def _conn_request(self, conn, request_uri, method, body, headers):
for i in range(RETRIES):
try:
if conn.sock is None:
conn.connect()
conn.request(method, request_uri, body, headers)
except socket.timeout:
raise
except socket.gaierror:
conn.close()
raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
except ssl_SSLError:
conn.close()
raise
except socket.error, e:
err = 0
if hasattr(e, 'args'):
err = getattr(e, 'args')[0]
else:
err = e.errno
if err == errno.ECONNREFUSED: # Connection refused
raise
except httplib.HTTPException:
# Just because the server closed the connection doesn't apparently mean
# that the server didn't send a response.
if conn.sock is None:
if i < RETRIES-1:
conn.close()
conn.connect()
continue
else:
conn.close()
raise
if i < RETRIES-1:
conn.close()
conn.connect()
continue
try:
response = conn.getresponse()
except (socket.error, httplib.HTTPException):
if i < RETRIES-1:
conn.close()
conn.connect()
continue
else:
raise
else:
content = ""
if method == "HEAD":
conn.close()
else:
content = response.read()
response = Response(response)
if method != "HEAD":
content = _decompressContent(response, content)
break
return (response, content)
def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey):
"""Do the actual request using the connection object
and also follow one level of redirects if necessary"""
auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
auth = auths and sorted(auths)[0][1] or None
if auth:
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers)
if auth:
if auth.response(response, body):
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers )
response._stale_digest = 1
if response.status == 401:
for authorization in self._auth_from_challenge(host, request_uri, headers, response, content):
authorization.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers, )
if response.status != 401:
self.authorizations.append(authorization)
authorization.response(response, body)
break
if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303):
if self.follow_redirects and response.status in [300, 301, 302, 303, 307]:
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if redirections:
if not response.has_key('location') and response.status != 300:
raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if response.has_key('location'):
location = response['location']
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
response['location'] = urlparse.urljoin(absolute_uri, location)
if response.status == 301 and method in ["GET", "HEAD"]:
response['-x-permanent-redirect-url'] = response['location']
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
if headers.has_key('if-none-match'):
del headers['if-none-match']
if headers.has_key('if-modified-since'):
del headers['if-modified-since']
if 'authorization' in headers and not self.forward_authorization_headers:
del headers['authorization']
if response.has_key('location'):
location = response['location']
old_response = copy.deepcopy(response)
if not old_response.has_key('content-location'):
old_response['content-location'] = absolute_uri
redirect_method = method
if response.status in [302, 303]:
redirect_method = "GET"
body = None
(response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1)
response.previous = old_response
else:
raise RedirectLimit("Redirected more times than rediection_limit allows.", response, content)
elif response.status in [200, 203] and method in ["GET", "HEAD"]:
# Don't cache 206's since we aren't going to handle byte range requests
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
return (response, content)
def _normalize_headers(self, headers):
return _normalize_headers(headers)
# Need to catch and rebrand some exceptions
# Then need to optionally turn all exceptions into status codes
# including all socket.* and httplib.* exceptions.
def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None):
""" Performs a single HTTP request.
The 'uri' is the URI of the HTTP resource and can begin
with either 'http' or 'https'. The value of 'uri' must be an absolute URI.
The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc.
There is no restriction on the methods allowed.
The 'body' is the entity body to be sent with the request. It is a string
object.
Any extra headers that are to be sent with the request should be provided in the
'headers' dictionary.
The maximum number of redirect to follow before raising an
exception is 'redirections. The default is 5.
The return value is a tuple of (response, content), the first
being and instance of the 'Response' class, the second being
a string that contains the response entity body.
"""
try:
if headers is None:
headers = {}
else:
headers = self._normalize_headers(headers)
if not headers.has_key('user-agent'):
headers['user-agent'] = "Python-httplib2/%s (gzip)" % __version__
uri = iri2uri(uri)
(scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
domain_port = authority.split(":")[0:2]
if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http':
scheme = 'https'
authority = domain_port[0]
proxy_info = self._get_proxy_info(scheme, authority)
conn_key = scheme+":"+authority
if conn_key in self.connections:
conn = self.connections[conn_key]
else:
if not connection_type:
connection_type = SCHEME_TO_CONNECTION[scheme]
certs = list(self.certificates.iter(authority))
if scheme == 'https':
if certs:
conn = self.connections[conn_key] = connection_type(
authority, key_file=certs[0][0],
cert_file=certs[0][1], timeout=self.timeout,
proxy_info=proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=
self.disable_ssl_certificate_validation)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout,
proxy_info=proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=
self.disable_ssl_certificate_validation)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout,
proxy_info=proxy_info)
conn.set_debuglevel(debuglevel)
if 'range' not in headers and 'accept-encoding' not in headers:
headers['accept-encoding'] = 'gzip, deflate'
info = email.Message.Message()
cached_value = None
if self.cache:
cachekey = defrag_uri
cached_value = self.cache.get(cachekey)
if cached_value:
# info = email.message_from_string(cached_value)
#
# Need to replace the line above with the kludge below
# to fix the non-existent bug not fixed in this
# bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html
try:
info, content = cached_value.split('\r\n\r\n', 1)
feedparser = email.FeedParser.FeedParser()
feedparser.feed(info)
info = feedparser.close()
feedparser._parse = None
except (IndexError, ValueError):
self.cache.delete(cachekey)
cachekey = None
cached_value = None
else:
cachekey = None
if method in self.optimistic_concurrency_methods and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers:
# http://www.w3.org/1999/04/Editing/
headers['if-match'] = info['etag']
if method not in ["GET", "HEAD"] and self.cache and cachekey:
# RFC 2616 Section 13.10
self.cache.delete(cachekey)
# Check the vary header in the cache to see if this request
# matches what varies in the cache.
if method in ['GET', 'HEAD'] and 'vary' in info:
vary = info['vary']
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
value = info[key]
if headers.get(header, None) != value:
cached_value = None
break
if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers:
if info.has_key('-x-permanent-redirect-url'):
# Should cached permanent redirects be counted in our redirection count? For now, yes.
if redirections <= 0:
raise RedirectLimit("Redirected more times than rediection_limit allows.", {}, "")
(response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1)
response.previous = Response(info)
response.previous.fromcache = True
else:
# Determine our course of action:
# Is the cached entry fresh or stale?
# Has the client requested a non-cached response?
#
# There seems to be three possible answers:
# 1. [FRESH] Return the cache entry w/o doing a GET
# 2. [STALE] Do the GET (but add in cache validators if available)
# 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
entry_disposition = _entry_disposition(info, headers)
if entry_disposition == "FRESH":
if not cached_value:
info['status'] = '504'
content = ""
response = Response(info)
if cached_value:
response.fromcache = True
return (response, content)
if entry_disposition == "STALE":
if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers:
headers['if-none-match'] = info['etag']
if info.has_key('last-modified') and not 'last-modified' in headers:
headers['if-modified-since'] = info['last-modified']
elif entry_disposition == "TRANSPARENT":
pass
(response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
if response.status == 304 and method == "GET":
# Rewrite the cache entry with the new end-to-end headers
# Take all headers that are in response
# and overwrite their values in info.
# unless they are hop-by-hop, or are listed in the connection header.
for key in _get_end2end_headers(response):
info[key] = response[key]
merged_response = Response(info)
if hasattr(response, "_stale_digest"):
merged_response._stale_digest = response._stale_digest
_updateCache(headers, merged_response, content, self.cache, cachekey)
response = merged_response
response.status = 200
response.fromcache = True
elif response.status == 200:
content = new_content
else:
self.cache.delete(cachekey)
content = new_content
else:
cc = _parse_cache_control(headers)
if cc.has_key('only-if-cached'):
info['status'] = '504'
response = Response(info)
content = ""
else:
(response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
except Exception, e:
if self.force_exception_to_status_code:
if isinstance(e, HttpLib2ErrorWithResponse):
response = e.response
content = e.content
response.status = 500
response.reason = str(e)
elif isinstance(e, socket.timeout):
content = "Request Timeout"
response = Response( {
"content-type": "text/plain",
"status": "408",
"content-length": len(content)
})
response.reason = "Request Timeout"
else:
content = str(e)
response = Response( {
"content-type": "text/plain",
"status": "400",
"content-length": len(content)
})
response.reason = "Bad Request"
else:
raise
return (response, content)
def _get_proxy_info(self, scheme, authority):
"""Return a ProxyInfo instance (or None) based on the scheme
and authority.
"""
hostname, port = urllib.splitport(authority)
proxy_info = self.proxy_info
if callable(proxy_info):
proxy_info = proxy_info(scheme)
if (hasattr(proxy_info, 'applies_to')
and not proxy_info.applies_to(hostname)):
proxy_info = None
return proxy_info
class Response(dict):
"""An object more like email.Message than httplib.HTTPResponse."""
"""Is this response from our local cache"""
fromcache = False
"""HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """
version = 11
"Status code returned by server. "
status = 200
"""Reason phrase returned by server."""
reason = "Ok"
previous = None
def __init__(self, info):
# info is either an email.Message or
# an httplib.HTTPResponse object.
if isinstance(info, httplib.HTTPResponse):
for key, value in info.getheaders():
self[key.lower()] = value
self.status = info.status
self['status'] = str(self.status)
self.reason = info.reason
self.version = info.version
elif isinstance(info, email.Message.Message):
for key, value in info.items():
self[key.lower()] = value
self.status = int(self['status'])
else:
for key, value in info.iteritems():
self[key.lower()] = value
self.status = int(self.get('status', self.status))
self.reason = self.get('reason', self.reason)
def __getattr__(self, name):
if name == 'dict':
return self
else:
raise AttributeError, name
| viger/docker | proxy/proxy/code/default/gae_proxy/server/lib/httplib2/__init__.py | Python | mit | 69,728 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
"""
Sync's doctype and docfields from txt files to database
perms will get synced only if none exist
"""
import frappe
import os
from frappe.modules.import_file import import_file_by_path
from frappe.modules.patch_handler import block_user
from frappe.utils import update_progress_bar
def sync_all(force=0, verbose=False, reset_permissions=False):
block_user(True)
for app in frappe.get_installed_apps():
sync_for(app, force, verbose=verbose, reset_permissions=reset_permissions)
block_user(False)
frappe.clear_cache()
def sync_for(app_name, force=0, sync_everything = False, verbose=False, reset_permissions=False):
files = []
if app_name == "frappe":
# these need to go first at time of install
for d in (("core", "docfield"), ("core", "docperm"), ("core", "has_role"), ("core", "doctype"),
("core", "user"), ("core", "role"), ("custom", "custom_field"),
("custom", "property_setter"), ("website", "web_form"),
("website", "web_form_field"), ("website", "portal_menu_item")):
files.append(os.path.join(frappe.get_app_path("frappe"), d[0],
"doctype", d[1], d[1] + ".json"))
for module_name in frappe.local.app_modules.get(app_name) or []:
folder = os.path.dirname(frappe.get_module(app_name + "." + module_name).__file__)
get_doc_files(files, folder, force, sync_everything, verbose=verbose)
l = len(files)
if l:
for i, doc_path in enumerate(files):
import_file_by_path(doc_path, force=force, ignore_version=True,
reset_permissions=reset_permissions, for_sync=True)
#print module_name + ' | ' + doctype + ' | ' + name
frappe.db.commit()
# show progress bar
update_progress_bar("Updating DocTypes for {0}".format(app_name), i, l)
print()
def get_doc_files(files, start_path, force=0, sync_everything = False, verbose=False):
"""walk and sync all doctypes and pages"""
# load in sequence - warning for devs
document_types = ['doctype', 'page', 'report', 'print_format',
'website_theme', 'web_form', 'email_alert', 'print_style',
'data_migration_mapping', 'data_migration_plan']
for doctype in document_types:
doctype_path = os.path.join(start_path, doctype)
if os.path.exists(doctype_path):
for docname in os.listdir(doctype_path):
if os.path.isdir(os.path.join(doctype_path, docname)):
doc_path = os.path.join(doctype_path, docname, docname) + ".json"
if os.path.exists(doc_path):
if not doc_path in files:
files.append(doc_path)
| bohlian/frappe | frappe/model/sync.py | Python | mit | 2,605 |