repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
ladybug-analysis-tools/honeybee-core
|
refs/heads/master
|
honeybee/radiance/analysispoint.py
|
1
|
# """Honeybee PointGroup and TestPointGroup."""
from __future__ import division
from ..vectormath.euclid import Point3, Vector3
from ..schedule import Schedule
from collections import defaultdict, OrderedDict
try:
from itertools import izip as zip
except ImportError:
# python 3
pass
import types
import copy
import ladybug.dt as dt
class AnalysisPoint(object):
"""A radiance analysis point.
Attributes:
location: Location of analysis points as (x, y, z).
direction: Direction of analysis point as (x, y, z).
This class is developed to enable honeybee for running daylight control
studies with dynamic shadings without going back to several files.
Each AnalysisPoint can load annual total and direct results for every state of
each source assigned to it. As a result once can end up with a lot of data for
a single point (8760 * sources * states for each source). The data are sorted as
integers and in different lists for each source. There are several methods to
set or get the data but if you're interested in more details read the comments
under __init__ to know how the data is stored.
In this class:
- Id stands for 'the id of a blind state'. Each state has a name and an ID will
be assigned to it based on the order of loading.
- coupledValue stands for a tuple of (total, direct) values. If one the values is
not available it will be set to None.
"""
__slots__ = ('_loc', '_dir', '_sources', '_values', '_is_directLoaded', 'logic')
def __init__(self, location, direction):
"""Create an analysis point."""
self.location = location
self.direction = direction
# name of sources and their state. It's only meaningful in multi-phase daylight
# analysis. In analysis for a single time it will be {None: [None]}
# It is set inside _create_data_structure method on setting values.
self._sources = OrderedDict()
# an empty list for values
# for each source there will be a new list
# inside each source list there will be a dictionary for each state
# in each dictionary the key is the hoy and the values are a list which
# is [total, direct]. If the value is not available it will be None
self._values = []
self._is_directLoaded = False
self.logic = self._logic
# TODO(mostapha): Restructure analysis points and write a class to keep track of
# results.
# Note to self! This is a hack!
# assume it's only a single source
@classmethod
def from_json(cls, ap_json):
"""Create an analysis point from json object.
{"location": [x, y, z], "direction": [x, y, z]}
"""
_cls = cls(ap_json['location'], ap_json['direction'])
if 'values' in ap_json:
sid, stateid = _cls._create_data_structure(None, None)
values = []
hoys = []
try:
state_res = ap_json['values'][0]
except IndexError:
state_res = []
for item in state_res:
for k, v in item.items():
values.append(v)
hoys.append(float(k))
# set the values
_cls.set_coupled_values(values, hoys, source=None, state=None)
return _cls
@classmethod
def from_raw_values(cls, x, y, z, x1, y1, z1):
"""Create an analysis point from 6 values.
x, y, z are the location of the point and x1, y1 and z1 is the direction.
"""
return cls((x, y, z), (x1, y1, z1))
@property
def location(self):
"""Location of analysis points as Point3."""
return self._loc
@location.setter
def location(self, location):
try:
self._loc = Point3(*(float(l) for l in location))
except TypeError:
try:
# Dynamo Points!
self._loc = Point3(location.X, location.Y, location.Z)
except Exception as e:
raise TypeError(
'Failed to convert {} to location.\n'
'location should be a list or a tuple with 3 values.\n{}'
.format(location, e))
@property
def direction(self):
"""Direction of analysis points as Point3."""
return self._dir
@direction.setter
def direction(self, direction):
try:
self._dir = Vector3(*(float(d) for d in direction))
except TypeError:
try:
# Dynamo Points!
self._dir = Vector3(direction.X, direction.Y, direction.Z)
except Exception as e:
raise TypeError(
'Failed to convert {} to direction.\n'
'location should be a list or a tuple with 3 values.\n{}'
.format(direction, e))
@property
def sources(self):
"""Get sorted list of light sources.
In most of the cases light sources are window groups.
"""
srcs = range(len(self._sources))
for name, d in self._sources.items():
srcs[d['id']] = name
return srcs
@property
def details(self):
"""Human readable details."""
header = 'Location: {}\nDirection: {}\n#hours: {}\n#window groups: {}\n'.format(
', '.join(str(c) for c in self.location),
', '.join(str(c) for c in self.direction),
len(self.hoys), len(self._sources)
)
sep = '-' * 15
wg = '\nWindow Group {}: {}\n'
st = '....State {}: {}\n'
# sort sources based on ids
sources = range(len(self._sources))
for s, d in self._sources.items():
sources[d['id']] = (s, d)
# create the string for eacj window groups
notes = [header, sep]
for count, s in enumerate(sources):
name, states = s
notes.append(wg.format(count, name))
for count, name in enumerate(states['state']):
notes.append(st.format(count, name))
return ''.join(notes)
@property
def has_values(self):
"""Check if this point has results values."""
return len(self._values) != 0
@property
def has_direct_values(self):
"""Check if direct values are loaded for this point.
In some cases and based on the recipe only total values are available.
"""
return self._is_directLoaded
@property
def hoys(self):
"""Return hours of the year for results if any."""
if not self.has_values:
return []
else:
return sorted(key / 60.0 for key in self._values[0][0].keys())
@property
def moys(self):
"""Return minutes of the year for results if any."""
if not self.has_values:
return []
else:
return sorted(self._values[0][0].keys())
@staticmethod
def _logic(*args, **kwargs):
"""Dynamic blinds state logic.
If the logic is not met the blind will be moved to the next state.
Overwrite this method for optional blind control.
"""
return args[0] > 3000
def source_id(self, source):
"""Get source id from source name."""
# find the id for source and state
try:
return self._sources[source]['id']
except KeyError:
raise ValueError('Invalid source input: {}'.format(source))
def blind_state_id(self, source, state):
"""Get state id if available."""
try:
return int(state)
except ValueError:
pass
try:
return self._sources[source]['state'].index(state)
except ValueError:
raise ValueError('Invalid state input: {}'.format(state))
@property
def states(self):
"""Get list of states names for each source."""
return tuple(s[1]['state'] for s in self._sources.items())
@property
def longest_state_ids(self):
"""Get longest combination between blind states as blinds_state_ids."""
states = tuple(len(s[1]['state']) - 1 for s in self._sources.items())
if not states:
raise ValueError('This sensor is associated with no dynamic blinds.')
return tuple(tuple(min(s, i) for s in states)
for i in range(max(states) + 1))
def _create_data_structure(self, source, state):
"""Create place holders for sources and states if needed.
Returns:
source id and state id as a tuple.
"""
def double():
return [None, None]
current_sources = self._sources.keys()
if source not in current_sources:
self._sources[source] = {
'id': len(current_sources),
'state': []
}
# append a new list to values for the new source
self._values.append([])
# find the id for source and state
sid = self._sources[source]['id']
if state not in self._sources[source]['state']:
# add sources
self._sources[source]['state'].append(state)
# append a new dictionary for this state
self._values[sid].append(defaultdict(double))
# find the state id
stateid = self._sources[source]['state'].index(state)
return sid, stateid
def set_value(self, value, hoy, source=None, state=None, is_direct=False):
"""Set value for a specific hour of the year.
Args:
value: Value as a number.
hoy: The hour of the year that corresponds to this value.
source: Name of the source of light. Only needed in case of multiple
sources / window groups (default: None).
state: State of the source if any (default: None).
is_direct: Set to True if the value is direct contribution of sunlight.
"""
if hoy is None:
return
sid, stateid = self._create_data_structure(source, state)
if is_direct:
self._is_directLoaded = True
ind = 1 if is_direct else 0
self._values[sid][stateid][int(hoy * 60)][ind] = value
def set_values(self, values, hoys, source=None, state=None, is_direct=False):
"""Set values for several hours of the year.
Args:
values: List of values as numbers.
hoys: List of hours of the year that corresponds to input values.
source: Name of the source of light. Only needed in case of multiple
sources / window groups (default: None).
state: State of the source if any (default: None).
is_direct: Set to True if the value is direct contribution of sunlight.
"""
if not (isinstance(values, types.GeneratorType) or
isinstance(hoys, types.GeneratorType)):
assert len(values) == len(hoys), \
ValueError(
'Length of values [%d] is not equal to length of hoys [%d].'
% (len(values), len(hoys)))
sid, stateid = self._create_data_structure(source, state)
if is_direct:
self._is_directLoaded = True
ind = 1 if is_direct else 0
for hoy, value in zip(hoys, values):
if hoy is None:
continue
try:
self._values[sid][stateid][int(hoy * 60)][ind] = value
except Exception as e:
raise ValueError(
'Failed to load {} results for window_group [{}], state[{}]'
' for hour {}.\n{}'.format('direct' if is_direct else 'total',
sid, stateid, hoy, e)
)
def set_coupled_value(self, value, hoy, source=None, state=None):
"""Set both total and direct values for a specific hour of the year.
Args:
value: Value as as tuples (total, direct).
hoy: The hour of the year that corresponds to this value.
source: Name of the source of light. Only needed in case of multiple
sources / window groups (default: None).
state: State of the source if any (default: None).
"""
sid, stateid = self._create_data_structure(source, state)
if hoy is None:
return
try:
self._values[sid][stateid][int(hoy * 60)] = value[0], value[1]
except TypeError:
raise ValueError(
"Wrong input: {}. Input values must be of length of 2.".format(value)
)
except IndexError:
raise ValueError(
"Wrong input: {}. Input values must be of length of 2.".format(value)
)
else:
self._is_directLoaded = True
def set_coupled_values(self, values, hoys, source=None, state=None):
"""Set total and direct values for several hours of the year.
Args:
values: List of values as tuples (total, direct).
hoys: List of hours of the year that corresponds to input values.
source: Name of the source of light. Only needed in case of multiple
sources / window groups (default: None).
state: State of the source if any (default: None).
"""
if not (isinstance(values, types.GeneratorType) or
isinstance(hoys, types.GeneratorType)):
assert len(values) == len(hoys), \
ValueError(
'Length of values [%d] is not equal to length of hoys [%d].'
% (len(values), len(hoys)))
sid, stateid = self._create_data_structure(source, state)
for hoy, value in zip(hoys, values):
if hoy is None:
continue
try:
self._values[sid][stateid][int(hoy * 60)] = value[0], value[1]
except TypeError:
raise ValueError(
"Wrong input: {}. Input values must be of length of 2.".format(value)
)
except IndexError:
raise ValueError(
"Wrong input: {}. Input values must be of length of 2.".format(value)
)
self._is_directLoaded = True
def value(self, hoy, source=None, state=None):
"""Get total value for an hour of the year."""
# find the id for source and state
sid = self.source_id(source)
# find the state id
stateid = self.blind_state_id(source, state)
if int(hoy * 60) not in self._values[sid][stateid]:
raise ValueError('Hourly values are not available for {}.'
.format(dt.DateTime.from_hoy(hoy)))
return self._values[sid][stateid][int(hoy * 60)][0]
def direct_value(self, hoy, source=None, state=None):
"""Get direct value for an hour of the year."""
# find the id for source and state
sid = self.source_id(source)
# find the state id
stateid = self.blind_state_id(source, state)
if int(hoy * 60) not in self._values[sid][stateid]:
raise ValueError('Hourly values are not available for {}.'
.format(dt.DateTime.from_hoy(hoy)))
return self._values[sid][stateid][int(hoy * 60)][1]
def values(self, hoys=None, source=None, state=None):
"""Get values for several hours of the year."""
# find the id for source and state
sid = self.source_id(source)
# find the state id
stateid = self.blind_state_id(source, state)
hoys = hoys or self.hoys
for hoy in hoys:
if int(hoy * 60) not in self._values[sid][stateid]:
raise ValueError('Hourly values are not available for {}.'
.format(dt.DateTime.from_hoy(hoy)))
return tuple(self._values[sid][stateid][int(hoy * 60)][0] for hoy in hoys)
def direct_values(self, hoys=None, source=None, state=None):
"""Get direct values for several hours of the year."""
# find the id for source and state
sid = self.source_id(source)
# find the state id
stateid = self.blind_state_id(source, state)
hoys = hoys or self.hoys
for hoy in hoys:
if int(hoy * 60) not in self._values[sid][stateid]:
raise ValueError('Hourly values are not available for {}.'
.format(dt.DateTime.from_hoy(hoy)))
return tuple(self._values[sid][stateid][int(hoy * 60)][1] for hoy in hoys)
def coupled_value(self, hoy, source=None, state=None):
"""Get total and direct values for an hoy."""
# find the id for source and state
sid = self.source_id(source)
# find the state id
stateid = self.blind_state_id(source, state)
if int(hoy * 60) not in self._values[sid][stateid]:
raise ValueError('Hourly values are not available for {}.'
.format(dt.DateTime.from_hoy(hoy)))
return self._values[sid][stateid][int(hoy * 60)]
def coupled_values(self, hoys=None, source=None, state=None):
"""Get total and direct values for several hours of year."""
# find the id for source and state
sid = self.source_id(source)
# find the state id
stateid = self.blind_state_id(source, state)
hoys = hoys or self.hoys
for hoy in hoys:
if int(hoy * 60) not in self._values[sid][stateid]:
raise ValueError('Hourly values are not available for {}.'
.format(dt.DateTime.from_hoy(hoy)))
return tuple(self._values[sid][stateid][int(hoy * 60)] for hoy in hoys)
def coupled_value_by_id(self, hoy, source_id=None, state_id=None):
"""Get total and direct values for an hoy."""
# find the id for source and state
sid = source_id or 0
# find the state id
stateid = state_id or 0
if int(hoy * 60) not in self._values[sid][stateid]:
raise ValueError('Hourly values are not available for {}.'
.format(dt.DateTime.from_hoy(hoy)))
return self._values[sid][stateid][int(hoy * 60)]
def coupled_values_by_id(self, hoys=None, source_id=None, state_id=None):
"""Get total and direct values for several hours of year by source id.
Use this method to load the values if you have the ids for source and state.
Args:
hoys: A collection of hoys.
source_id: Id of source as an integer (default: 0).
state_id: Id of state as an integer (default: 0).
"""
sid = source_id or 0
stateid = state_id or 0
hoys = hoys or self.hoys
for hoy in hoys:
if int(hoy * 60) not in self._values[sid][stateid]:
raise ValueError('Hourly values are not available for {}.'
.format(dt.DateTime.from_hoy(hoy)))
return tuple(self._values[sid][stateid][int(hoy * 60)] for hoy in hoys)
def combined_value_by_id(self, hoy, blinds_state_ids=None):
"""Get combined value from all sources based on state_id.
Args:
hoy: hour of the year.
blinds_state_ids: List of state ids for all the sources for an hour. If you
want a source to be removed set the state to -1.
Returns:
total, direct values.
"""
total = 0
direct = 0 if self._is_directLoaded else None
if not blinds_state_ids:
blinds_state_ids = [0] * len(self._sources)
assert len(self._sources) == len(blinds_state_ids), \
'There should be a state for each source. #sources[{}] != #states[{}]' \
.format(len(self._sources), len(blinds_state_ids))
for sid, stateid in enumerate(blinds_state_ids):
if stateid == -1:
t = 0
d = 0
else:
if int(hoy * 60) not in self._values[sid][stateid]:
raise ValueError('Hourly values are not available for {}.'
.format(dt.DateTime.from_hoy(hoy)))
t, d = self._values[sid][stateid][int(hoy * 60)]
try:
total += t
direct += d
except TypeError:
# direct value is None
pass
return total, direct
def combined_values_by_id(self, hoys=None, blinds_state_ids=None):
"""Get combined value from all sources based on state_id.
Args:
hoys: A collection of hours of the year.
blinds_state_ids: List of state ids for all the sources for input hoys. If
you want a source to be removed set the state to -1.
Returns:
Return a generator for (total, direct) values.
"""
hoys = hoys or self.hoys
if not blinds_state_ids:
try:
hours_count = len(hoys)
except TypeError:
raise TypeError('hoys must be an iterable object: {}'.format(hoys))
blinds_state_ids = [[0] * len(self._sources)] * hours_count
assert len(hoys) == len(blinds_state_ids), \
'There should be a list of states for each hour. #states[{}] != #hours[{}]' \
.format(len(blinds_state_ids), len(hoys))
dir_value = 0 if self._is_directLoaded else None
for count, hoy in enumerate(hoys):
total = 0
direct = dir_value
for sid, stateid in enumerate(blinds_state_ids[count]):
if stateid == -1:
t = 0
d = 0
else:
if int(hoy * 60) not in self._values[sid][stateid]:
raise ValueError('Hourly values are not available for {}.'
.format(dt.DateTime.from_hoy(hoy)))
t, d = self._values[sid][stateid][int(hoy * 60)]
try:
total += t
direct += d
except TypeError:
# direct value is None
pass
yield total, direct
def sum_values_by_id(self, hoys=None, blinds_state_ids=None):
"""Get sum of value for all the hours.
This method is mostly useful for radiation and solar access analysis.
Args:
hoys: A collection of hours of the year.
blinds_state_ids: List of state ids for all the sources for input hoys. If
you want a source to be removed set the state to -1.
Returns:
Return a tuple for sum of (total, direct) values.
"""
values = tuple(self.combined_values_by_id(hoys, blinds_state_ids))
total = sum(v[0] for v in values)
try:
direct = sum(v[1] for v in values)
except TypeError as e:
if "'long' and 'NoneType'" in str(e):
# direct value is not loaded
direct = 0
else:
raise TypeError(e)
return total, direct
def max_values_by_id(self, hoys=None, blinds_state_ids=None):
"""Get maximum value for all the hours.
Args:
hoys: A collection of hours of the year.
blinds_state_ids: List of state ids for all the sources for input hoys. If
you want a source to be removed set the state to -1.
Returns:
Return a tuple for sum of (total, direct) values.
"""
values = tuple(self.combined_values_by_id(hoys, blinds_state_ids))
total = max(v[0] for v in values)
direct = max(v[1] for v in values)
return total, direct
def blinds_state(self, hoys=None, blinds_state_ids=None, *args, **kwargs):
"""Calculte blinds state based on a control logic.
Overwrite self.logic to overwrite the logic for this point.
Args:
hoys: List of hours of year. If None default is self.hoys.
blinds_state_ids: List of state ids for all the sources for an hour. If you
want a source to be removed set the state to -1. If not provided
a longest combination of states from sources (window groups) will
be used. Length of each item in states should be equal to number
of sources.
args: Additional inputs for self.logic. args will be passed to self.logic
kwargs: Additional inputs for self.logic. kwargs will be passed to self.logic
"""
hoys = hoys or self.hoys
if blinds_state_ids:
# recreate the states in case the inputs are the names of the states
# and not the numbers.
sources = self.sources
comb_ids = copy.deepcopy(blinds_state_ids)
# find state ids for each state if inputs are state names
try:
for c, comb in enumerate(comb_ids):
for count, source in enumerate(sources):
comb_ids[c][count] = self.blind_state_id(source, comb[count])
except IndexError:
raise ValueError(
'Length of each state should be equal to number of sources: {}'
.format(len(sources))
)
else:
comb_ids = self.longest_state_ids
print("Blinds combinations:\n{}".format(
'\n'.join(str(ids) for ids in comb_ids)))
# collect the results for each combination
results = range(len(comb_ids))
for count, state in enumerate(comb_ids):
results[count] = tuple(self.combined_values_by_id(hoys, [state] * len(hoys)))
# assume the last state happens for all
hours_count = len(hoys)
blinds_index = [len(comb_ids) - 1] * hours_count
ill_values = [None] * hours_count
dir_values = [None] * hours_count
success = [0] * hours_count
for count, h in enumerate(hoys):
for state in range(len(comb_ids)):
ill, ill_dir = results[state][count]
if not self.logic(ill, ill_dir, h, args, kwargs):
blinds_index[count] = state
ill_values[count] = ill
dir_values[count] = ill_dir
if state > 0:
success[count] = 1
break
else:
success[count] = -1
ill_values[count] = ill
dir_values[count] = ill_dir
blinds_state = tuple(comb_ids[ids] for ids in blinds_index)
return blinds_state, blinds_index, ill_values, dir_values, success
def annual_metrics(self, da_threshhold=None, udi_min_max=None, blinds_state_ids=None,
occ_schedule=None):
"""Calculate annual metrics.
Daylight autonomy, continious daylight autonomy and useful daylight illuminance.
Args:
da_threshhold: Threshhold for daylight autonomy in lux (default: 300).
udi_min_max: A tuple of min, max value for useful daylight illuminance
(default: (100, 2000)).
blinds_state_ids: List of state ids for all the sources for input hoys. If
you want a source to be removed set the state to -1.
occ_schedule: An annual occupancy schedule (default: Office Schedule).
Returns:
Daylight autonomy, Continious daylight autonomy, Useful daylight illuminance,
Less than UDI, More than UDI
"""
hours = self.hoys
values = tuple(v[0] for v in self.combined_values_by_id(hours, blinds_state_ids))
return self._calculate_annual_metrics(
values, hours, da_threshhold, udi_min_max, blinds_state_ids, occ_schedule)
def useful_daylight_illuminance(self, udi_min_max=None, blinds_state_ids=None,
occ_schedule=None):
"""Calculate useful daylight illuminance.
Args:
udi_min_max: A tuple of min, max value for useful daylight illuminance
(default: (100, 2000)).
blinds_state_ids: List of state ids for all the sources for input hoys. If
you want a source to be removed set the state to -1.
occ_schedule: An annual occupancy schedule.
Returns:
Useful daylight illuminance, Less than UDI, More than UDI
"""
udi_min_max = udi_min_max or (100, 2000)
udiMin, udiMax = udi_min_max
hours = self.hoys
schedule = occ_schedule or Schedule.eight_am_to_six_pm()
udi = 0
udi_l = 0
udi_m = 0
total_hour_count = len(hours)
values = tuple(v[0] for v in self.combined_values_by_id(hours, blinds_state_ids))
for h, v in zip(hours, values):
if h not in schedule:
total_hour_count -= 1
continue
if v < udiMin:
udi_l += 1
elif v > udiMax:
udi_m += 1
else:
udi += 1
if total_hour_count == 0:
raise ValueError('There is 0 hours available in the schedule.')
return 100 * udi / total_hour_count, 100 * udi_l / total_hour_count, \
100 * udi_m / total_hour_count
def daylight_autonomy(self, da_threshhold=None, blinds_state_ids=None,
occ_schedule=None):
"""Calculate daylight autonomy and continious daylight autonomy.
Args:
da_threshhold: Threshhold for daylight autonomy in lux (default: 300).
blinds_state_ids: List of state ids for all the sources for input hoys. If
you want a source to be removed set the state to -1.
occ_schedule: An annual occupancy schedule.
Returns:
Daylight autonomy, Continious daylight autonomy
"""
da_threshhold = da_threshhold or 300
hours = self.hoys
schedule = occ_schedule or Schedule.eight_am_to_six_pm()
DA = 0
cda = 0
total_hour_count = len(hours)
values = tuple(v[0] for v in self.combined_values_by_id(hours, blinds_state_ids))
for h, v in zip(hours, values):
if h not in schedule:
total_hour_count -= 1
continue
if v >= da_threshhold:
DA += 1
cda += 1
else:
cda += v / da_threshhold
if total_hour_count == 0:
raise ValueError('There is 0 hours available in the schedule.')
return 100 * DA / total_hour_count, 100 * cda / total_hour_count
def annual_sunlight_exposure(self, threshhold=None, blinds_state_ids=None,
occ_schedule=None, target_hours=None):
"""Annual Solar Exposure (ASE).
Calculate number of hours that this point is exposed to more than 1000lux
of direct sunlight. The point meets the traget in the number of hours is
less than 250 hours per year.
Args:
threshhold: Threshhold for daylight autonomy in lux (default: 1000).
blinds_state_ids: List of state ids for all the sources for input hoys.
If you want a source to be removed set the state to -1. ase must
be calculated without dynamic blinds but you can use this option
to study the effect of different blind states.
occ_schedule: An annual occupancy schedule.
target_hours: Target minimum hours (default: 250).
Returns:
Success as a Boolean, Number of hours, Problematic hours
"""
if not self.has_direct_values:
raise ValueError(
'Direct values are not loaded. Data is not available to calculate ASE.')
hoys = self.hoys
values = tuple(v[1] for v in self.combined_values_by_id(hoys, blinds_state_ids))
return self._calculate_annual_sunlight_exposure(
values, hoys, threshhold, blinds_state_ids, occ_schedule, target_hours)
@staticmethod
def _calculate_annual_sunlight_exposure(
values, hoys, threshhold=None, blinds_state_ids=None, occ_schedule=None,
target_hours=None):
threshhold = threshhold or 1000
target_hours = target_hours or 250
schedule = occ_schedule or Schedule.eight_am_to_six_pm()
ase = 0
problematic_hours = []
for h, v in zip(hoys, values):
if h not in schedule:
continue
if v > threshhold:
ase += 1
problematic_hours.append(h)
return ase < target_hours, ase, problematic_hours
@staticmethod
def _calculate_annual_metrics(
values, hours, da_threshhold=None, udi_min_max=None, blinds_state_ids=None,
occ_schedule=None):
total_hour_count = len(hours)
udiMin, udiMax = udi_min_max
udi_min_max = udi_min_max or (100, 2000)
da_threshhold = da_threshhold or 300.0
schedule = occ_schedule or Schedule.eight_am_to_six_pm()
DA = 0
cda = 0
udi = 0
udi_l = 0
udi_m = 0
for h, v in zip(hours, values):
if h not in schedule:
total_hour_count -= 1
continue
if v >= da_threshhold:
DA += 1
cda += 1
else:
cda += v / da_threshhold
if v < udiMin:
udi_l += 1
elif v > udiMax:
udi_m += 1
else:
udi += 1
if total_hour_count == 0:
raise ValueError('There is 0 hours available in the schedule.')
return 100 * DA / total_hour_count, 100 * cda / total_hour_count, \
100 * udi / total_hour_count, 100 * udi_l / total_hour_count, \
100 * udi_m / total_hour_count
@staticmethod
def _calculate_daylight_autonomy(
values, hoys, da_threshhold=None, blinds_state_ids=None, occ_schedule=None):
"""Calculate daylight autonomy and continious daylight autonomy.
Args:
da_threshhold: Threshhold for daylight autonomy in lux (default: 300).
blinds_state_ids: List of state ids for all the sources for input hoys. If
you want a source to be removed set the state to -1.
occ_schedule: An annual occupancy schedule.
Returns:
Daylight autonomy, Continious daylight autonomy
"""
da_threshhold = da_threshhold or 300
hours = hoys
schedule = occ_schedule or Schedule.eight_am_to_six_pm()
DA = 0
cda = 0
total_hour_count = len(hours)
for h, v in zip(hours, values):
if h not in schedule:
total_hour_count -= 1
continue
if v >= da_threshhold:
DA += 1
cda += 1
else:
cda += v / da_threshhold
if total_hour_count == 0:
raise ValueError('There is 0 hours available in the schedule.')
return 100 * DA / total_hour_count, 100 * cda / total_hour_count
@staticmethod
def parse_blind_states(blinds_state_ids):
"""Parse input blind states.
The method tries to convert each state to a tuple of a list. Use this method
to parse the input from plugins.
Args:
blinds_state_ids: List of state ids for all the sources for an hour. If you
want a source to be removed set the state to -1. If not provided
a longest combination of states from sources (window groups) will
be used. Length of each item in states should be equal to number
of sources.
"""
try:
combs = [list(eval(cc)) for cc in blinds_state_ids]
except Exception as e:
ValueError('Failed to convert input blind states:\n{}'.format(e))
return combs
def unload(self):
"""Unload values and sources."""
self._values = []
self._sources = OrderedDict()
def duplicate(self):
"""Duplicate the analysis point."""
ap = AnalysisPoint(self._loc, self._dir)
# This should be good enough as most of the time an analysis point will be
# copied with no values assigned.
ap._values = copy.copy(self._values)
if len(ap._values) == len(self._sources):
ap._sources = self._sources
ap._is_directLoaded = bool(self._is_directLoaded)
ap.logic = copy.copy(self.logic)
return ap
def ToString(self):
"""Overwrite .NET ToString."""
return self.__repr__()
def to_rad_string(self):
"""Return Radiance string for a test point."""
return "%s %s" % (self.location, self.direction)
def to_json(self):
"""Create an analysis point from json object.
{"location": [x, y, z], "direction": [x, y, z]}
"""
return {"location": tuple(self.location),
"direction": tuple(self.direction),
"values": self._values}
def __repr__(self):
"""Print an analysis point."""
return 'AnalysisPoint::(%s)::(%s)' % (self.location, self.direction)
|
allanlei/django-multischema
|
refs/heads/master
|
multischema/management/commands/renameschema.py
|
1
|
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS, connections, transaction
from optparse import make_option
from multischema import namespace
class Command(BaseCommand):
help = 'Create namespace'
option_list = BaseCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to synchronize. '
'Defaults to the "default" database.'),
)
def handle(self, namespace, new_namespace, **options):
cursor = connections[options.get('database', DEFAULT_DB_ALIAS)].cursor()
namespace.rename(namespace, new_namespace, cursor=cursor)
transaction.commit_unless_managed()
|
szha/mxnet
|
refs/heads/master
|
example/recommenders/movielens_data.py
|
13
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""MovieLens data handling: download, parse, and expose as DataIter
"""
import os
import mxnet as mx
from mxnet import gluon
def load_mldataset(filename):
"""Not particularly fast code to parse the text file and load it into three NDArray's
and product an NDArrayIter
"""
user = []
item = []
score = []
with open(filename) as f:
for line in f:
tks = line.strip().split('\t')
if len(tks) != 4:
continue
user.append(int(tks[0]))
item.append(int(tks[1]))
score.append(float(tks[2]))
user = mx.nd.array(user)
item = mx.nd.array(item)
score = mx.nd.array(score)
return gluon.data.ArrayDataset(user, item, score)
def ensure_local_data(prefix):
if not os.path.exists("%s.zip" % prefix):
print("Downloading MovieLens data: %s" % prefix)
# MovieLens 100k dataset from https://grouplens.org/datasets/movielens/
# This dataset is copy right to GroupLens Research Group at the University of Minnesota,
# and licensed under their usage license.
# For full text of the usage license, see http://files.grouplens.org/datasets/movielens/ml-100k-README.txt
os.system("wget http://files.grouplens.org/datasets/movielens/%s.zip" % prefix)
os.system("unzip %s.zip" % prefix)
def get_dataset(prefix='ml-100k'):
"""Returns a pair of NDArrayDataIter, one for train, one for test.
"""
ensure_local_data(prefix)
return (load_mldataset('./%s/u1.base' % prefix),
load_mldataset('./%s/u1.test' % prefix))
def max_id(fname):
mu = 0
mi = 0
for line in open(fname):
tks = line.strip().split('\t')
if len(tks) != 4:
continue
mu = max(mu, int(tks[0]))
mi = max(mi, int(tks[1]))
return mu + 1, mi + 1
|
alphafoobar/intellij-community
|
refs/heads/master
|
python/testData/refactoring/changeSignature/nonDefaultAfterDefault.py
|
73
|
def foo(b):
pass
|
GFZ-Centre-for-Early-Warning/caravan
|
refs/heads/master
|
qmlreceiver.py
|
1
|
#!/usr/bin/env python3
import sys
import zlib
from optparse import OptionParser
from simpleclient import HMB
VERSION = "0.1 (2015.288)"
RETRY_WAIT = 10
def handleEvent(gdacs, data):
print("{dateTime} M={magnitude} {location} -> {eventID}.xml".format(**gdacs))
with open(gdacs['eventID'] + '.xml', 'wb') as f:
f.write(zlib.decompress(data))
def worker(source):
while True:
for obj in source.recv():
try:
if obj['type'] == 'QUAKEML':
handleEvent(obj['gdacs'], obj['data'])
elif obj['type'] == 'EOF':
print("Waiting for next events in real time")
except (KeyError, TypeError) as e:
print("invalid data received: " + str(e))
def main():
parser = OptionParser(usage="usage: %prog [options]", version="%prog v" + VERSION)
parser.set_defaults(timeout = 120, backfill = 10)
parser.add_option("-u", "--user", type="string", dest="user",
help="Source HMB username")
parser.add_option("-p", "--password", type="string", dest="password",
help="Source HMB password")
parser.add_option("-s", "--source", type="string", dest="source",
help="Source HMB URL")
parser.add_option("-t", "--timeout", type="int", dest="timeout",
help="Timeout in seconds (default %default)")
parser.add_option("-b", "--backfill", type="int", dest="backfill",
help="Number of messages to backfill (default %default)")
(opt, args) = parser.parse_args()
if args:
parser.error("incorrect number of arguments")
if opt.source is None:
parser.error("missing source HMB")
param = {
'heartbeat': opt.timeout//2,
'queue': {
'QUAKEML': {
'seq': -opt.backfill-1
}
}
}
auth = (opt.user, opt.password) if opt.user and opt.password else None
source = HMB(opt.source, param, retry_wait=RETRY_WAIT,
timeout=opt.timeout, auth=auth, verify=False)
print("Retrieving past {} events".format(opt.backfill))
worker(source)
if __name__ == "__main__":
main()
|
kushalbhola/MyStuff
|
refs/heads/master
|
venv/Lib/site-packages/pip/_vendor/chardet/big5freq.py
|
342
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Big5 frequency table
# by Taiwan's Mandarin Promotion Council
# <http://www.edu.tw:81/mandr/>
#
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75
#Char to FreqOrder table
BIG5_TABLE_SIZE = 5376
BIG5_CHAR_TO_FREQ_ORDER = (
1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16
3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32
1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48
63,5010,5011, 317,1614, 75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, # 64
3682, 3, 10,3973,1471, 29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, # 80
4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947, 34,3556,3204, 64, 604, # 96
5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337, 72, 406,5017, 80, # 112
630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449, 69,2987, 591, # 128
179,2096, 471, 115,2035,1844, 60, 50,2988, 134, 806,1869, 734,2036,3454, 180, # 144
995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, # 160
2502, 90,2716,1338, 663, 11, 906,1099,2553, 20,2441, 182, 532,1716,5019, 732, # 176
1376,4204,1311,1420,3206, 25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, # 192
3276, 475,1447,3683,5020, 117, 21, 656, 810,1297,2300,2334,3557,5021, 126,4205, # 208
706, 456, 150, 613,4513, 71,1118,2037,4206, 145,3092, 85, 835, 486,2115,1246, # 224
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, # 240
3558,3135,5023,1956,1153,4207, 83, 296,1199,3093, 192, 624, 93,5024, 822,1898, # 256
2823,3136, 795,2065, 991,1554,1542,1592, 27, 43,2867, 859, 139,1456, 860,4514, # 272
437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, # 288
3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, # 304
1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, # 320
5026,5027,2176,3207,3685,2682, 593, 845,1062,3277, 88,1723,2038,3978,1951, 212, # 336
266, 152, 149, 468,1899,4208,4516, 77, 187,5028,3038, 37, 5,2990,5029,3979, # 352
5030,5031, 39,2524,4517,2908,3208,2079, 55, 148, 74,4518, 545, 483,1474,1029, # 368
1665, 217,1870,1531,3138,1104,2655,4209, 24, 172,3562, 900,3980,3563,3564,4519, # 384
32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683, 4,3039,3351,1427,1789, # 400
188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, # 416
3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439, 38,5037,1063,5038, 794, # 432
3982,1435,2301, 46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804, 35, 707, # 448
324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, # 464
2129,1363,3689,1423, 697, 100,3094, 48, 70,1231, 495,3139,2196,5043,1294,5044, # 480
2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, # 496
314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, # 512
287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, # 528
3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, # 544
1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, # 560
1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, # 576
1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381, 7, # 592
2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, # 608
265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, # 624
4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, # 640
1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, # 656
5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, # 672
2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, # 688
383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, # 704
98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, # 720
523,2789,2790,2658,5061, 141,2235,1333, 68, 176, 441, 876, 907,4220, 603,2602, # 736
710, 171,3464, 404, 549, 18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, # 752
5063,2991, 368,5064, 146, 366, 99, 871,3693,1543, 748, 807,1586,1185, 22,2263, # 768
379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, # 784
1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068, 59,5069, # 800
585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, # 816
690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, # 832
5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, # 848
1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, # 864
544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, # 880
3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, # 896
4224, 57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, # 912
3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, # 928
279,3145, 51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, # 944
610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, # 960
1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, # 976
4227,2475,1436, 953,4228,2055,4545, 671,2400, 79,4229,2446,3285, 608, 567,2689, # 992
3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008
3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024
2402,5097,5098,5099,4232,3045, 0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040
5101, 233,4233,3697,1819,4550,4551,5102, 96,1777,1315,2083,5103, 257,5104,1810, # 1056
3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072
5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088
1484,5110,1712, 127, 67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104
2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120
1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136
78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152
1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168
4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184
3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200
534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216
165, 243,4559,3703,2528, 123, 683,4239, 764,4560, 36,3998,1793, 589,2916, 816, # 1232
626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248
2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264
5122, 611,1156, 854,2386,1316,2875, 2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280
1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296
2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312
1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328
1994,5135,4564,5136,5137,2198, 13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344
5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360
5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376
5149, 128,2133, 92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392
3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408
4567,2252, 94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424
4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440
2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456
5163,2337,2068, 23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472
3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488
598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504
5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863, 41, # 1520
5170,5171,4575,5172,1657,2338, 19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536
1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552
2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568
3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584
4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600
5182,2692, 733, 40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616
3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632
4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648
1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664
1871,2762,3004,5187, 435,5188, 343,1108, 596, 17,1751,4579,2239,3477,3709,5189, # 1680
4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696
1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712
240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728
1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744
1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760
3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776
619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792
5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808
2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824
1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840
1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551, 30,2268,4266, # 1856
5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872
829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888
4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904
375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920
2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936
444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952
1041,3005, 293,1168, 87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968
1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984
730,1515, 184,2840, 66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000
4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016
4021,5231,5232,1186, 15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032
1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048
3596,1342,1681,1718, 766,3297, 286, 89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064
5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080
5240,3298, 310, 313,3482,2304, 770,4278, 54,3054, 189,4611,3105,3848,4025,5241, # 2096
1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112
2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128
1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144
3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160
2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176
3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192
2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208
4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224
4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240
3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256
97, 81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272
3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288
424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304
3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320
4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336
3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352
1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368
5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384
199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400
5286, 587, 14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416
1702,1226, 102,1547, 62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432
391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448
4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294, 86,1494,1730, # 2464
4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480
397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496
2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512
2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885, 28,2695, # 2528
3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544
1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560
4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576
2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592
1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608
1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624
2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640
3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656
1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672
5313,3493,5314,5315,5316,3310,2698,1433,3311, 131, 95,1504,4049, 723,4303,3166, # 2688
1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704
4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654, 53,5320,3014,5321, # 2720
1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736
135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752
1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768
4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784
4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800
2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816
1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832
4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848
660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864
5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880
2322,3316,5346,5347,4308,5348,4309, 84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896
3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912
4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928
790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944
5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960
5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976
1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992
4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008
4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024
2699,1516,3614,1121,1082,1329,3317,4073,1449,3873, 65,1128,2848,2927,2769,1590, # 3040
3874,5370,5371, 12,2668, 45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056
3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072
2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088
1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104
4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120
3736,1859, 91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136
3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152
2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168
4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771, 61,4079,3738,1823,4080, # 3184
5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200
3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216
2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232
3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248
1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264
2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280
3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296
4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063, 56,1396,3113, # 3312
2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328
2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344
5418,1076, 49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360
1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376
2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392
1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408
3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424
4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629, 31,2851, # 3440
2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456
3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472
3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488
2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504
4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520
2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536
3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552
4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568
5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584
3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600
194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616
1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412, 42,3119, 464,5455,2642, # 3632
4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648
1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664
4701,5462,3020, 962, 588,3629, 289,3250,2644,1116, 52,5463,3067,1797,5464,5465, # 3680
5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696
510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712
5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728
5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744
2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760
3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776
2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792
2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808
681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824
1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840
4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856
3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872
3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888
838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904
2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920
625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936
2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952
4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968
1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984
4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000
1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016
3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032
574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048
3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064
5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080
5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096
3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112
3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128
1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144
2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160
5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176
1561,2674,1452,4113,1375,5549,5550, 47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192
1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208
3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224
919,2352,2975,2353,1270,4727,4115, 73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240
1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256
4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272
5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288
2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304
3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320
516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336
1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352
2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368
2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384
5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400
5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416
5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432
2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448
2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464
1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480
4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496
3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512
3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528
4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544
4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560
2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576
2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592
5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608
4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624
5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640
4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656
502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672
121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688
1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704
3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720
4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736
1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752
5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768
2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784
2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800
3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816
5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832
1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848
3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864
5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880
1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896
5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912
2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928
3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944
2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960
3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976
3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992
3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008
4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024
803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040
2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056
4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072
3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088
5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104
1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120
5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136
425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152
1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168
479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184
4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200
1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216
4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232
1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248
433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264
3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280
4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296
5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312
938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328
3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344
890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360
2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376
)
|
google/upvote
|
refs/heads/master
|
upvote/monitoring/metrics.py
|
1
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Monitoring metric constants to be used throughout Upvote."""
UPVOTE = 'upvote/'
class Metric(object):
def __init__(self, metric_name, display_name):
self.metric_name = metric_name
self.display_name = display_name
def __str__(self):
return self.metric_name
class Namespace(object):
"""A heirarchical namespace for metrics."""
def __init__(self, prefix, tuples):
self.metrics = []
for t in tuples:
metric = Metric(prefix + t[0], t[1])
setattr(self, t[0].upper(), metric)
self.metrics.append(metric)
setattr(self, 'ALL', self.metrics)
def __iter__(self):
for metric in self.metrics:
yield metric
class UpvoteNamespace(Namespace):
def __init__(self, prefix, tuples):
super(UpvoteNamespace, self).__init__(UPVOTE + prefix, tuples)
DATASTORE = UpvoteNamespace('datastore/', [
('backups', 'Datastore Backups'),
])
SANTA_API = UpvoteNamespace('santa_api/', [
('xsrf_requests', 'XSRF Requests'),
('client_validations', 'Client Validations'),
('preflight_requests', 'Preflight Requests'),
('event_upload_requests', 'Event Upload Requests'),
('rule_download_requests', 'Rule Download Requests'),
('postflight_requests', 'Postflight Requests')])
BIT9_API = UpvoteNamespace('bit9_api/', [
('events_to_pull', 'Events To Pull'),
('events_pulled', 'Events Pulled'),
('events_to_process', 'Events To Process'),
('events_processed', 'Events Processed'),
('events_skipped', 'Events Skipped'),
('pending_changes', 'Pending Changes'),
('bit9_logins', 'Bit9 Logins'),
('bit9_qps', 'Bit9 QPS'),
('bit9_requests', 'Bit9 Requests'),
('bit9_latency', 'Bit9 Latency'),
('file_instances_missing', 'File Instances Missing')])
UPVOTE_APP = UpvoteNamespace('upvote_app/', [
('blockable_requests', 'Blockable Requested'),
('event_requests', 'Event Requested'),
('host_requests', 'Host Requested'),
('lookup_requests', 'Lookup Requested'),
('report_requests', 'Report Requested'),
('rule_requests', 'Rule Requested'),
('setting_requests', 'Setting Requested'),
('user_requests', 'User Requested'),
('vote_requests', 'Vote Requested')])
ANALYSIS = UpvoteNamespace('analysis/', [
('virustotal_requests', 'VirusTotal Requests'),
('virustotal_new_lookups', 'VirusTotal Results for New Blockables')])
BIGQUERY = UpvoteNamespace('bigquery/', [
('row_insertions', 'Row Insertions')])
EXEMPTION = UpvoteNamespace('exemption/', [
('enforcement_errors', 'Enforcement Errors'),
('expired_exemptions', 'Expired Exemptions'),
('policy_check_outcomes', 'Policy Check Outcomes'),
('processing_errors', 'Processing Errors'),
('requested_exemptions', 'Requested Exemptions'),
('revocation_errors', 'Revocation Errors'),
('state_changes', 'State Changes')])
ROLES = UpvoteNamespace('roles/', [
('syncing_errors', 'Syncing Errors')])
RPC_SERVER = Namespace('/rpc/server/', [
('count', 'RPC Query Count'),
('error_count', 'RPC Error Count'),
('server_latency', 'RPC Query Latency')])
PRESENCE = Namespace('/presence/', [
('found', 'Presence')])
|
mixman/djangodev
|
refs/heads/master
|
django/core/management/commands/startproject.py
|
2
|
from django.core.management.base import copy_helper, CommandError, LabelCommand
from django.utils.importlib import import_module
import os
import re
from random import choice
class Command(LabelCommand):
help = "Creates a Django project directory structure for the given project name in the current directory."
args = "[projectname]"
label = 'project name'
requires_model_validation = False
# Can't import settings during this command, because they haven't
# necessarily been created.
can_import_settings = False
def handle_label(self, project_name, **options):
directory = os.getcwd()
# Check that the project_name cannot be imported.
try:
import_module(project_name)
except ImportError:
pass
else:
raise CommandError("%r conflicts with the name of an existing Python module and cannot be used as a project name. Please try another name." % project_name)
copy_helper(self.style, 'project', project_name, directory)
# Create a random SECRET_KEY hash, and put it in the main settings.
main_settings_file = os.path.join(directory, project_name, project_name, 'settings.py')
settings_contents = open(main_settings_file, 'r').read()
fp = open(main_settings_file, 'w')
secret_key = ''.join([choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)])
settings_contents = re.sub(r"(?<=SECRET_KEY = ')'", secret_key + "'", settings_contents)
fp.write(settings_contents)
fp.close()
|
tanty/gnome-ostree
|
refs/heads/master
|
src/ostbuild/pyostbuild/ostbuildlog.py
|
3
|
#!/usr/bin/python
#
# Copyright (C) 2011 Colin Walters <walters@verbum.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
import sys
def log(msg, prefix=None):
if prefix is None:
prefix_target = ''
else:
prefix_target = prefix
fullmsg = '%s: %s%s\n' % (os.path.basename(sys.argv[0]), prefix_target, msg)
sys.stdout.write(fullmsg)
sys.stdout.flush()
def fatal(msg):
log(msg, prefix="FATAL: ")
sys.exit(1)
|
40423221/2017springcd_hw
|
refs/heads/gh-pages
|
plugin/summary/__init__.py
|
368
|
from .summary import *
|
socialsweethearts/django-allauth
|
refs/heads/master
|
allauth/socialaccount/providers/openid/south_migrations/0001_initial.py
|
82
|
# encoding: utf-8
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
depends_on = (('socialaccount', '0001_initial'),)
def forwards(self, orm):
# Adding model 'OpenIDAccount'
db.create_table('openid_openidaccount', (
('socialaccount_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['socialaccount.SocialAccount'], unique=True, primary_key=True)),
('identity', self.gf('django.db.models.fields.URLField')(unique=True, max_length=255)),
))
db.send_create_signal('openid', ['OpenIDAccount'])
# Adding model 'OpenIDStore'
db.create_table('openid_openidstore', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('server_url', self.gf('django.db.models.fields.CharField')(max_length=255)),
('handle', self.gf('django.db.models.fields.CharField')(max_length=255)),
('secret', self.gf('django.db.models.fields.TextField')()),
('issued', self.gf('django.db.models.fields.IntegerField')()),
('lifetime', self.gf('django.db.models.fields.IntegerField')()),
('assoc_type', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('openid', ['OpenIDStore'])
# Adding model 'OpenIDNonce'
db.create_table('openid_openidnonce', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('server_url', self.gf('django.db.models.fields.CharField')(max_length=255)),
('timestamp', self.gf('django.db.models.fields.IntegerField')()),
('salt', self.gf('django.db.models.fields.CharField')(max_length=255)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('openid', ['OpenIDNonce'])
def backwards(self, orm):
# Deleting model 'OpenIDAccount'
db.delete_table('openid_openidaccount')
# Deleting model 'OpenIDStore'
db.delete_table('openid_openidstore')
# Deleting model 'OpenIDNonce'
db.delete_table('openid_openidnonce')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'openid.openidaccount': {
'Meta': {'object_name': 'OpenIDAccount', '_ormbases': ['socialaccount.SocialAccount']},
'identity': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'socialaccount_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['socialaccount.SocialAccount']", 'unique': 'True', 'primary_key': 'True'})
},
'openid.openidnonce': {
'Meta': {'object_name': 'OpenIDNonce'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'salt': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'server_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timestamp': ('django.db.models.fields.IntegerField', [], {})
},
'openid.openidstore': {
'Meta': {'object_name': 'OpenIDStore'},
'assoc_type': ('django.db.models.fields.TextField', [], {}),
'handle': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issued': ('django.db.models.fields.IntegerField', [], {}),
'lifetime': ('django.db.models.fields.IntegerField', [], {}),
'secret': ('django.db.models.fields.TextField', [], {}),
'server_url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'socialaccount.socialaccount': {
'Meta': {'object_name': 'SocialAccount'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['openid']
|
alistairlow/tensorflow
|
refs/heads/master
|
tensorflow/contrib/tensor_forest/hybrid/python/__init__.py
|
183
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Initialize tensor_forest/hybrid/python."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.python import layers
from tensorflow.contrib.tensor_forest.hybrid.python import models
from tensorflow.contrib.tensor_forest.hybrid.python.ops import training_ops
|
27o/systemd
|
refs/heads/master
|
test/rule-syntax-check.py
|
4
|
#!/usr/bin/python3
# Simple udev rules syntax checker
#
# (C) 2010 Canonical Ltd.
# Author: Martin Pitt <martin.pitt@ubuntu.com>
#
# systemd is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
# systemd is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with systemd; If not, see <http://www.gnu.org/licenses/>.
import re
import sys
import os
from glob import glob
if len(sys.argv) > 1:
# explicit rule file list
rules_files = sys.argv[1:]
else:
# take them from the build dir
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
rules_dir = os.path.join(os.environ.get('top_srcdir', root_dir), 'rules')
if not os.path.isdir(rules_dir):
sys.stderr.write('No rules files given, and %s does not exist, aborting' % rules_dir)
sys.exit(2)
rules_files = glob(os.path.join(rules_dir, '*.rules'))
no_args_tests = re.compile(r'(ACTION|DEVPATH|KERNELS?|NAME|SYMLINK|SUBSYSTEMS?|DRIVERS?|TAG|RESULT|TEST)\s*(?:=|!)=\s*"([^"]*)"$')
args_tests = re.compile(r'(ATTRS?|ENV|TEST){([a-zA-Z0-9/_.*%-]+)}\s*(?:=|!)=\s*"([^"]*)"$')
no_args_assign = re.compile(r'(NAME|SYMLINK|OWNER|GROUP|MODE|TAG|PROGRAM|RUN|LABEL|GOTO|OPTIONS|IMPORT)\s*(?:\+=|:=|=)\s*"([^"]*)"$')
args_assign = re.compile(r'(ATTR|ENV|IMPORT|RUN){([a-zA-Z0-9/_.*%-]+)}\s*(=|\+=)\s*"([^"]*)"$')
result = 0
buffer = ''
for path in rules_files:
lineno = 0
for line in open(path):
lineno += 1
# handle line continuation
if line.endswith('\\\n'):
buffer += line[:-2]
continue
else:
line = buffer + line
buffer = ''
# filter out comments and empty lines
line = line.strip()
if not line or line.startswith('#'):
continue
for clause in line.split(','):
clause = clause.strip()
if not (no_args_tests.match(clause) or args_tests.match(clause) or
no_args_assign.match(clause) or args_assign.match(clause)):
print('Invalid line %s:%i: %s' % (path, lineno, line))
print(' clause: %s' % clause)
print('')
result = 1
break
sys.exit(result)
|
JoelMaatkamp/netvenom
|
refs/heads/master
|
netvenom/utils/checks.py
|
1
|
#! /usr/bin/python3
# -*- coding: utf-8 -*-
import os
import re
import sys
from utils.printer import *
PrinterThread().start()
def check_root():
"""
check for root/admin & exit if not
"""
if os.getuid() != 0:
if sys.platform == "win32":
print_error("you must be admin")
else:
print_error("you must be root")
return False
return True
def check_regex(pattern):
"""
check regex pattern & print error message
"""
try:
re.compile(pattern)
except re.error as e:
msg = "regex error at pos: {0}: {1}"
msg = msg.format(e.pos, e.msg)
print_error("invalid regex pattern:")
print_info(msg)
return False
else:
return True
|
riteshshrv/django
|
refs/heads/master
|
django/contrib/auth/migrations/0002_alter_permission_name_max_length.py
|
586
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='permission',
name='name',
field=models.CharField(max_length=255, verbose_name='name'),
),
]
|
yaruno/SpyCity
|
refs/heads/master
|
web/assets/bower_components/leaflet-plugins/build/deps.py
|
69
|
#!/usr/bin/env python
# vim: sts=4 sw=4 et
import os, sys
printed = set()
def includes(f):
d = os.path.dirname(f)
for l in open(f):
if l.startswith('//#include'):
yield os.path.join(d, l.strip().split(None, 1)[1].strip(""""'"""))
work = list(sys.argv[1:])
while work:
f = work.pop(0)
if f in printed:
continue
i = list(filter(lambda x: x not in printed, includes(f)))
if i:
work = i + [f] + work
continue
printed.add(f)
print f
|
PyORBIT-Collaboration/py-orbit
|
refs/heads/master
|
py/orbit/py_linac/errors/__init__.py
|
2
|
## \namespace orbit::py_linac::errors
## \Classes and packages of ORBIT Linac.
##
from ErrorNodesAndControllersLib import AccErrorNode
from ErrorNodesAndControllersLib import ErrorCoordDisplacementNode
from ErrorNodesAndControllersLib import BaseErrorController
from ErrorNodesAndControllersLib import ErrorCntrlCoordDisplacement
__all__ = []
#---- Error Controllers classes
__all__.append("BaseErrorController")
__all__.append("ErrorCntrlCoordDisplacement")
#---- Error nodes classes
__all__.append("AccErrorNode")
__all__.append("ErrorCoordDisplacementNode")
|
thispc/download-manager
|
refs/heads/master
|
module/plugins/hooks/DeleteFinished.py
|
8
|
# -*- coding: utf-8 -*-
from module.database import style
from ..internal.Addon import Addon
class DeleteFinished(Addon):
__name__ = "DeleteFinished"
__type__ = "hook"
__version__ = "1.19"
__status__ = "testing"
__config__ = [("activated", "bool", "Activated", False),
("interval", "int", "Check interval in hours", 72),
("deloffline", "bool", "Delete package with offline links", False)]
__description__ = """Automatically delete all finished packages from queue"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
def periodical_task(self):
if not self.info['sleep']:
deloffline = self.config.get('deloffline')
mode = "0,1,4" if deloffline else "0,4"
msg = _(
'delete all finished packages in queue list (%s packages with offline links)')
self.log_info(
msg %
(_('including') if deloffline else _('excluding')))
self.delete_finished(mode)
self.info['sleep'] = True
self.add_event('package_finished', self.wakeup)
def deactivate(self):
self.manager.removeEvent('package_finished', self.wakeup)
def activate(self):
self.info['sleep'] = True
self.add_event('package_finished', self.wakeup)
self.periodical.start(self.config.get('interval') * 60 * 60)
## own methods ##
@style.queue
def delete_finished(self, mode):
self.c.execute(
'DELETE FROM packages WHERE NOT EXISTS(SELECT 1 FROM links WHERE package=packages.id AND status NOT IN (%s))' %
mode)
self.c.execute(
'DELETE FROM links WHERE NOT EXISTS(SELECT 1 FROM packages WHERE id=links.package)')
def wakeup(self, pypack):
self.manager.removeEvent('package_finished', self.wakeup)
self.info['sleep'] = False
## event managing ##
def add_event(self, event, func):
"""
Adds an event listener for event name
"""
if event in self.manager.events:
if func in self.manager.events[event]:
self.log_debug("Function already registered", func)
else:
self.manager.events[event].append(func)
else:
self.manager.events[event] = [func]
|
Anonymous-X6/django
|
refs/heads/master
|
tests/migrations/test_migrations_squashed_complex/3_squashed_5.py
|
770
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
replaces = [
("migrations", "3_auto"),
("migrations", "4_auto"),
("migrations", "5_auto"),
]
dependencies = [("migrations", "2_auto")]
operations = [
migrations.RunPython(migrations.RunPython.noop)
]
|
trishume/MacRanger
|
refs/heads/master
|
doc/tools/print_colors.py
|
4
|
#!/usr/bin/env python
"""
You can use this tool to display all supported colors and their color number.
It will exit after a keypress.
"""
import curses
from curses import *
@wrapper
def main(win):
def print_all_colors(attr):
for c in range(-1, curses.COLORS):
try:
init_pair(c, c, 0)
except:
pass
else:
win.addstr(str(c) + ' ', color_pair(c) | attr)
start_color()
try:
use_default_colors()
except:
pass
win.addstr("available colors: %d\n\n" % curses.COLORS)
print_all_colors(0)
win.addstr("\n\n")
print_all_colors(A_BOLD)
win.refresh()
win.getch()
|
40023154/Finalexam_0627
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/_testcapi.py
|
742
|
CHAR_MAX = 127
CHAR_MIN = -128
DBL_MAX = 1.7976931348623157e+308
DBL_MIN = 2.2250738585072014e-308
FLT_MAX = 3.4028234663852886e+38
FLT_MIN = 1.1754943508222875e-38
INT_MAX = 2147483647
INT_MIN = -2147483648
LLONG_MAX = 9223372036854775807
LLONG_MIN = -9223372036854775808
LONG_MAX = 2147483647
LONG_MIN = -2147483648
PY_SSIZE_T_MAX = 2147483647
PY_SSIZE_T_MIN = -2147483648
SHRT_MAX = 32767
SHRT_MIN = -32768
SIZEOF_PYGC_HEAD = 16
UCHAR_MAX = 255
UINT_MAX = 4294967295
ULLONG_MAX = 18446744073709551615
ULONG_MAX = 4294967295
USHRT_MAX = 65535
__loader__ = "<_frozen_importlib.ExtensionFileLoader object at 0x00C98DD0>"
def _pending_threadfunc(*args,**kw):
pass
class _test_structmembersType(object):
pass
def _test_thread_state(*args,**kw):
pass
def argparsing(*args,**kw):
pass
def code_newempty(*args,**kw):
pass
def codec_incrementaldecoder(*args,**kw):
pass
def codec_incrementalencoder(*args,**kw):
pass
def crash_no_current_thread(*args,**kw):
pass
class error(Exception):
pass
def exception_print(*args,**kw):
pass
def getargs_B(*args,**kw):
pass
def getargs_H(*args,**kw):
pass
def getargs_I(*args,**kw):
pass
def getargs_K(*args,**kw):
pass
def getargs_L(*args,**kw):
pass
def getargs_Z(*args,**kw):
pass
def getargs_Z_hash(*args,**kw):
pass
def getargs_b(*args,**kw):
pass
def getargs_c(*args,**kw):
pass
def getargs_h(*args,**kw):
pass
def getargs_i(*args,**kw):
pass
def getargs_k(*args,**kw):
pass
def getargs_keyword_only(*args,**kw):
pass
def getargs_keywords(*args,**kw):
pass
def getargs_l(*args,**kw):
pass
def getargs_n(*args,**kw):
pass
def getargs_p(*args,**kw):
pass
def getargs_s(*args,**kw):
pass
def getargs_s_hash(*args,**kw):
pass
def getargs_s_star(*args,**kw):
pass
def getargs_tuple(*args,**kw):
pass
def getargs_u(*args,**kw):
pass
def getargs_u_hash(*args,**kw):
pass
def getargs_w_star(*args,**kw):
pass
def getargs_y(*args,**kw):
pass
def getargs_y_hash(*args,**kw):
pass
def getargs_y_star(*args,**kw):
pass
def getargs_z(*args,**kw):
pass
def getargs_z_hash(*args,**kw):
pass
def getargs_z_star(*args,**kw):
pass
class instancemethod(object):
pass
def make_exception_with_doc(*args,**kw):
pass
def make_memoryview_from_NULL_pointer(*args,**kw):
pass
def parse_tuple_and_keywords(*args,**kw):
pass
def pytime_object_to_time_t(*args,**kw):
pass
def pytime_object_to_timespec(*args,**kw):
pass
def pytime_object_to_timeval(*args,**kw):
pass
def raise_exception(*args,**kw):
pass
def raise_memoryerror(*args,**kw):
pass
def run_in_subinterp(*args,**kw):
pass
def set_exc_info(*args,**kw):
pass
def test_L_code(*args,**kw):
pass
def test_Z_code(*args,**kw):
pass
def test_capsule(*args,**kw):
pass
def test_config(*args,**kw):
pass
def test_datetime_capi(*args,**kw):
pass
def test_dict_iteration(*args,**kw):
pass
def test_empty_argparse(*args,**kw):
pass
def test_k_code(*args,**kw):
pass
def test_lazy_hash_inheritance(*args,**kw):
pass
def test_list_api(*args,**kw):
pass
def test_long_and_overflow(*args,**kw):
pass
def test_long_api(*args,**kw):
pass
def test_long_as_double(*args,**kw):
pass
def test_long_as_size_t(*args,**kw):
pass
def test_long_long_and_overflow(*args,**kw):
pass
def test_long_numbits(*args,**kw):
pass
def test_longlong_api(*args,**kw):
pass
def test_null_strings(*args,**kw):
pass
def test_s_code(*args,**kw):
pass
def test_string_from_format(*args,**kw):
pass
def test_string_to_double(*args,**kw):
pass
def test_u_code(*args,**kw):
pass
def test_unicode_compare_with_ascii(*args,**kw):
pass
def test_widechar(*args,**kw):
pass
def test_with_docstring(*args,**kw):
"""This is a pretty normal docstring."""
pass
def traceback_print(*args,**kw):
pass
def unicode_aswidechar(*args,**kw):
pass
def unicode_aswidecharstring(*args,**kw):
pass
def unicode_encodedecimal(*args,**kw):
pass
def unicode_transformdecimaltoascii(*args,**kw):
pass
|
ssharm21/google-python-exercises
|
refs/heads/master
|
basic/string1.py
|
1
|
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic string exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in string2.py.
# A. donuts
# Given an int count of a number of donuts, return a string
# of the form 'Number of donuts: <count>', where <count> is the number
# passed in. However, if the count is 10 or more, then use the word 'many'
# instead of the actual count.
# So donuts(5) returns 'Number of donuts: 5'
# and donuts(23) returns 'Number of donuts: many'
def donuts(count):
if count >= 10:
count = 'many'
return 'Number of donuts: '+str(count)
# B. both_ends
# Given a string s, return a string made of the first 2
# and the last 2 chars of the original string,
# so 'spring' yields 'spng'. However, if the string length
# is less than 2, return instead the empty string.
def both_ends(s):
slen = len(s)
if slen >= 2:
return s[0]+s[1]+s[-2]+s[-1]
else:
return ''
# C. fix_start
# Given a string s, return a string
# where all occurences of its first char have
# been changed to '*', except do not change
# the first char itself.
# e.g. 'babble' yields 'ba**le'
# Assume that the string is length 1 or more.
# Hint: s.replace(stra, strb) returns a version of string s
# where all instances of stra have been replaced by strb.
def fix_start(s):
save = s[0]
s_modified = s.replace(s[0],'*')
return save+s_modified[1:]
# D. MixUp
# Given strings a and b, return a single string with a and b separated
# by a space '<a> <b>', except swap the first 2 chars of each string.
# e.g.
# 'mix', pod' -> 'pox mid'
# 'dog', 'dinner' -> 'dig donner'
# Assume a and b are length 2 or more.
def mix_up(a, b):
return b[0]+b[1]+a[2:]+" "+a[0]+a[1]+b[2:]
# Provided simple test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Provided main() calls the above functions with interesting inputs,
# using test() to check if each result is correct or not.
def main():
print 'donuts'
# Each line calls donuts, compares its result to the expected for that call.
test(donuts(4), 'Number of donuts: 4')
test(donuts(9), 'Number of donuts: 9')
test(donuts(10), 'Number of donuts: many')
test(donuts(99), 'Number of donuts: many')
print
print 'both_ends'
test(both_ends('spring'), 'spng')
test(both_ends('Hello'), 'Helo')
test(both_ends('a'), '')
test(both_ends('xyz'), 'xyyz')
print
print 'fix_start'
test(fix_start('babble'), 'ba**le')
test(fix_start('aardvark'), 'a*rdv*rk')
test(fix_start('google'), 'goo*le')
test(fix_start('donut'), 'donut')
print
print 'mix_up'
test(mix_up('mix', 'pod'), 'pox mid')
test(mix_up('dog', 'dinner'), 'dig donner')
test(mix_up('gnash', 'sport'), 'spash gnort')
test(mix_up('pezzy', 'firm'), 'fizzy perm')
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
|
github-account-because-they-want-it/django
|
refs/heads/master
|
django/contrib/postgres/lookups.py
|
199
|
from django.db.models import Lookup, Transform
class PostgresSimpleLookup(Lookup):
def as_sql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
return '%s %s %s' % (lhs, self.operator, rhs), params
class FunctionTransform(Transform):
def as_sql(self, qn, connection):
lhs, params = qn.compile(self.lhs)
return "%s(%s)" % (self.function, lhs), params
class DataContains(PostgresSimpleLookup):
lookup_name = 'contains'
operator = '@>'
class ContainedBy(PostgresSimpleLookup):
lookup_name = 'contained_by'
operator = '<@'
class Overlap(PostgresSimpleLookup):
lookup_name = 'overlap'
operator = '&&'
class HasKey(PostgresSimpleLookup):
lookup_name = 'has_key'
operator = '?'
class HasKeys(PostgresSimpleLookup):
lookup_name = 'has_keys'
operator = '?&'
class HasAnyKeys(PostgresSimpleLookup):
lookup_name = 'has_any_keys'
operator = '?|'
class Unaccent(FunctionTransform):
bilateral = True
lookup_name = 'unaccent'
function = 'UNACCENT'
|
advancedtelematic/docker-launcher
|
refs/heads/master
|
tests/test_attach.py
|
2
|
# encoding: utf-8
#
# Copyright © 2015 ATS Advanced Telematic Systems GmbH
#
# This file is part of Docker Launcher.
#
# Docker Launcher is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# Docker Launcher is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# Docker Launcher. If not, see <http://www.gnu.org/licenses/>.
#
from launcher.util.stack_config import StackConf
import pytest
@pytest.fixture(scope="function")
def valid_service():
return {
'services': [{
'name': 'test_service',
'repo': 'test/repo',
'attach': True
}]
}
def test_attach_is_valid(valid_service):
StackConf(valid_service)
|
alash3al/rethinkdb
|
refs/heads/next
|
test/performance/util.py
|
50
|
from __future__ import print_function
import os
import random
import time
import uuid
try:
xrange
except NameError:
xrange = range
def gen_doc(size_doc, i):
if size_doc == "small":
return {
"field0": str(i // 1000),
"field1": str(i),
}
elif size_doc == "big":
# Size between 17 and 18k
return {
"field0": str(i // 1000),
"field1": str(i),
"string": str(uuid.uuid1()),
"int": i,
"float": i / 3.,
"boolean": (random.random() > 0.5),
"null": None,
"array_num": [int(random.random() * 10000) for i in xrange(int(random.random() * 100))],
"array_str": [str(uuid.uuid1()) for i in xrange(int(random.random() * 100))],
"obj": {
"nested0": str(uuid.uuid1()),
"nested1": str(uuid.uuid1()),
"nested2": str(uuid.uuid1())
},
"longstr1": "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Etiam tincidunt metus justo, in faucibus magna facilisis in. Sed adipiscing massa cursus, laoreet quam sed, dignissim urna. Nullam a pellentesque dolor. Aliquam nunc tortor, posuere ac tempus a, rhoncus non felis. Donec vel ante ornare, fermentum mauris quis, rhoncus nisi. Duis placerat nunc sit amet ipsum ultricies, eu euismod sapien fringilla. In id sapien ut arcu dignissim pellentesque sit amet non ante. Phasellus eget fermentum nunc, et condimentum libero. Quisque porttitor, erat eget gravida feugiat, odio purus congue dui, nec varius purus turpis eget urna. Fusce facilisis est libero. Proin vitae libero vitae urna laoreet vulputate. Duis commodo, quam congue sodales posuere, neque ligula rhoncus nulla, cursus tristique neque ante et nunc. Donec placerat suscipit nulla vel faucibus. Vestibulum vehicula id diam eget feugiat. Donec vel diam fermentum, rutrum lectus id, vulputate dui. Donec turpis risus, suscipit eu risus at, commodo suscipit massa. Quisque vel cursus leo, vitae tincidunt lacus. Vivamus fermentum tristique leo, vitae tempus diam faucibus eu. Nullam condimentum, est vitae vehicula facilisis, risus nulla viverra magna, quis elementum nunc nunc id mauris. Aliquam ante urna, volutpat accumsan lectus sit amet, scelerisque tristique orci. Sed sodales commodo purus ac ultrices. Mauris imperdiet ullamcorper luctus. Mauris faucibus metus a turpis blandit placerat. Donec interdum sem vitae quam convallis euismod. Donec a magna elit. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Ut blandit nisi augue, non porttitor dolor fringilla quis. Donec placerat a odio quis fringilla. Cras vitae aliquet nisl. Sed consequat dolor massa, et vulputate nibh dignissim eu. Donec dignissim cursus risus vel rutrum. Aliquam a faucibus nulla, sit amet blandit justo. Pellentesque id tortor sagittis, suscipit diam sed, imperdiet augue. Integer sit amet sem ac velit fermentum pharetra id a erat. In iaculis enim nec malesuada blandit. Aenean malesuada sem non felis bibendum, blandit rhoncus turpis faucibus. Nam interdum massa dolor. Phasellus scelerisque rhoncus orci. Nullam hendrerit leo eget sem rutrum, viverra ultricies tortor congue. Suspendisse venenatis, augue id scelerisque molestie, dui arcu vestibulum eros, vitae facilisis augue massa at lectus. Maecenas at pulvinar magna. Suspendisse consequat diam vel augue molestie vehicula. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Vivamus ac commodo eros. Donec sit amet magna eget nibh dictum congue. Cras sapien odio, aliquam quis ullamcorper ut, interdum sed lectus. Aliquam risus justo, pellentesque vel magna in, fringilla porttitor magna. Pellentesque eleifend a augue nec rutrum. Nullam et lectus eu diam placerat semper. Pellentesque eget aliquam dui. Nulla ultrices neque tincidunt, adipiscing leo eget, auctor augue. Sed ac metus convallis, consectetur eros eu, adipiscing lacus. Sed pellentesque ac sem nec tristique. Mauris imperdiet orci id nisl ullamcorper, non euismod erat tincidunt. Duis blandit facilisis dignissim. Quisque at tempus ligula. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nullam tincidunt nibh felis, ut congue ligula lacinia nec. Sed ut ipsum vel elit tristique laoreet quis in diam. Etiam tempor erat eu aliquam tristique. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Nulla facilisi. Maecenas cursus elit at varius lacinia. Etiam feugiat arcu sodales felis feugiat, et lobortis quam varius. Fusce et libero vitae dolor tincidunt tempor id ac lectus. Nam mollis viverra cursus. Nullam ut commodo mi, sit amet pretium lorem. Etiam tempus, velit sit amet lacinia lobortis, metus tellus vulputate orci, eu adipiscing metus dui et mauris. Nunc egestas consectetur nisi ut porta. Donec nec vehicula ligula. Nulla volutpat mi ac ornare elementum. Nullam risus justo, fringilla id tincidunt sit amet, elementum at purus. Cras a ullamcorper tellus, ac congue mi. Etiam malesuada leo a dui convallis pulvinar. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Morbi at ullamcorper nulla. Curabitur eu molestie orci, porttitor feugiat quam. Pellentesque neque turpis, ullamcorper adipiscing scelerisque a, facilisis quis magna. Quisque nulla elit, luctus eget scelerisque non, scelerisque quis massa. Ut porttitor ante at mauris scelerisque adipiscing. Integer vel leo magna. Phasellus quam enim, malesuada et dignissim a, tempus id lorem. Nullam mattis tincidunt venenatis. Sed quam arcu, molestie sed ante vel, pulvinar fermentum mi. Nam malesuada id nibh sit amet dictum. Aliquam mi augue, mattis sit amet congue sed, dignissim ut odio. Mauris scelerisque libero eget metus venenatis, ut mollis eros consectetur. Duis metus augue, molestie eget tincidunt vitae, volutpat vel lacus. Mauris fringilla imperdiet fermentum. Sed sit amet diam ut risus vulputate feugiat. Nulla vitae adipiscing quam. Duis non libero urna. Aenean ut ligula sed erat dictum dignissim aliquet non libero. Praesent quis neque varius lorem porta pulvinar. Integer aliquet elit vitae pretium mattis. Ut egestas nunc quis molestie commodo. Cras augue quam, cursus tristique sollicitudin sed, sagittis non velit. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec malesuada in enim sed aliquam. Curabitur lobortis fermentum purus. Maecenas vitae nibh ut libero congue interdum. Donec viverra ligula quis nibh volutpat, non luctus est dignissim. Suspendisse molestie, enim tempor consectetur gravida, ante sem porta mauris, a blandit velit quam suscipit justo. Etiam placerat euismod enim a rutrum. Praesent a imperdiet urna. Morbi quis vehicula leo. Nullam dictum fermentum nulla. Mauris blandit pretium ultricies. Morbi ultrices est non sem suscipit mollis. Nam consequat ac ligula nec commodo. Ut mattis, tortor in laoreet tristique, quam dolor ornare massa, non luctus lacus ante eu massa. Nulla facilisi. Aliquam fringilla, felis non faucibus tempor, lorem sapien imperdiet mauris, rhoncus fermentum tellus nibh ut purus. Sed luctus risus quis mi interdum mollis. Duis sit amet nibh vel sem tincidunt vestibulum sed non eros. Duis laoreet orci dignissim est luctus, et pellentesque felis pulvinar. Nam interdum massa eros, eu fringilla augue condimentum quis. Vestibulum pharetra mi quis felis hendrerit, eget malesuada nisl sagittis. Aliquam sit amet urna eu mauris dictum pharetra. Sed dignissim dignissim metus et elementum. Maecenas gravida lobortis tincidunt. Nulla dignissim, risus eu aliquam eleifend, lacus mi lobortis neque, sed venenatis erat ante at purus. Aliquam erat volutpat. Nam eu eros a nisi mollis pretium vel vitae massa. Donec vulputate, ligula at fringilla ultrices, purus metus vehicula risus, ac sagittis purus metus sit amet libero. Curabitur eu dapibus urna, sed pharetra mauris. Mauris eget lacinia libero, vitae turpis duis.",
"longstr2": "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Etiam tincidunt metus justo, in faucibus magna facilisis in. Sed adipiscing massa cursus, laoreet quam sed, dignissim urna. Nullam a pellentesque dolor. Aliquam nunc tortor, posuere ac tempus a, rhoncus non felis. Donec vel ante ornare, fermentum mauris quis, rhoncus nisi. Duis placerat nunc sit amet ipsum ultricies, eu euismod sapien fringilla. In id sapien ut arcu dignissim pellentesque sit amet non ante. Phasellus eget fermentum nunc, et condimentum libero. Quisque porttitor, erat eget gravida feugiat, odio purus congue dui, nec varius purus turpis eget urna. Fusce facilisis est libero. Proin vitae libero vitae urna laoreet vulputate. Duis commodo, quam congue sodales posuere, neque ligula rhoncus nulla, cursus tristique neque ante et nunc. Donec placerat suscipit nulla vel faucibus. Vestibulum vehicula id diam eget feugiat. Donec vel diam fermentum, rutrum lectus id, vulputate dui. Donec turpis risus, suscipit eu risus at, commodo suscipit massa. Quisque vel cursus leo, vitae tincidunt lacus. Vivamus fermentum tristique leo, vitae tempus diam faucibus eu. Nullam condimentum, est vitae vehicula facilisis, risus nulla viverra magna, quis elementum nunc nunc id mauris. Aliquam ante urna, volutpat accumsan lectus sit amet, scelerisque tristique orci. Sed sodales commodo purus ac ultrices. Mauris imperdiet ullamcorper luctus. Mauris faucibus metus a turpis blandit placerat. Donec interdum sem vitae quam convallis euismod. Donec a magna elit. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Ut blandit nisi augue, non porttitor dolor fringilla quis. Donec placerat a odio quis fringilla. Cras vitae aliquet nisl. Sed consequat dolor massa, et vulputate nibh dignissim eu. Donec dignissim cursus risus vel rutrum. Aliquam a faucibus nulla, sit amet blandit justo. Pellentesque id tortor sagittis, suscipit diam sed, imperdiet augue. Integer sit amet sem ac velit fermentum pharetra id a erat. In iaculis enim nec malesuada blandit. Aenean malesuada sem non felis bibendum, blandit rhoncus turpis faucibus. Nam interdum massa dolor. Phasellus scelerisque rhoncus orci. Nullam hendrerit leo eget sem rutrum, viverra ultricies tortor congue. Suspendisse venenatis, augue id scelerisque molestie, dui arcu vestibulum eros, vitae facilisis augue massa at lectus. Maecenas at pulvinar magna. Suspendisse consequat diam vel augue molestie vehicula. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Vivamus ac commodo eros. Donec sit amet magna eget nibh dictum congue. Cras sapien odio, aliquam quis ullamcorper ut, interdum sed lectus. Aliquam risus justo, pellentesque vel magna in, fringilla porttitor magna. Pellentesque eleifend a augue nec rutrum. Nullam et lectus eu diam placerat semper. Pellentesque eget aliquam dui. Nulla ultrices neque tincidunt, adipiscing leo eget, auctor augue. Sed ac metus convallis, consectetur eros eu, adipiscing lacus. Sed pellentesque ac sem nec tristique. Mauris imperdiet orci id nisl ullamcorper, non euismod erat tincidunt. Duis blandit facilisis dignissim. Quisque at tempus ligula. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nullam tincidunt nibh felis, ut congue ligula lacinia nec. Sed ut ipsum vel elit tristique laoreet quis in diam. Etiam tempor erat eu aliquam tristique. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Nulla facilisi. Maecenas cursus elit at varius lacinia. Etiam feugiat arcu sodales felis feugiat, et lobortis quam varius. Fusce et libero vitae dolor tincidunt tempor id ac lectus. Nam mollis viverra cursus. Nullam ut commodo mi, sit amet pretium lorem. Etiam tempus, velit sit amet lacinia lobortis, metus tellus vulputate orci, eu adipiscing metus dui et mauris. Nunc egestas consectetur nisi ut porta. Donec nec vehicula ligula. Nulla volutpat mi ac ornare elementum. Nullam risus justo, fringilla id tincidunt sit amet, elementum at purus. Cras a ullamcorper tellus, ac congue mi. Etiam malesuada leo a dui convallis pulvinar. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Morbi at ullamcorper nulla. Curabitur eu molestie orci, porttitor feugiat quam. Pellentesque neque turpis, ullamcorper adipiscing scelerisque a, facilisis quis magna. Quisque nulla elit, luctus eget scelerisque non, scelerisque quis massa. Ut porttitor ante at mauris scelerisque adipiscing. Integer vel leo magna. Phasellus quam enim, malesuada et dignissim a, tempus id lorem. Nullam mattis tincidunt venenatis. Sed quam arcu, molestie sed ante vel, pulvinar fermentum mi. Nam malesuada id nibh sit amet dictum. Aliquam mi augue, mattis sit amet congue sed, dignissim ut odio. Mauris scelerisque libero eget metus venenatis, ut mollis eros consectetur. Duis metus augue, molestie eget tincidunt vitae, volutpat vel lacus. Mauris fringilla imperdiet fermentum. Sed sit amet diam ut risus vulputate feugiat. Nulla vitae adipiscing quam. Duis non libero urna. Aenean ut ligula sed erat dictum dignissim aliquet non libero. Praesent quis neque varius lorem porta pulvinar. Integer aliquet elit vitae pretium mattis. Ut egestas nunc quis molestie commodo. Cras augue quam, cursus tristique sollicitudin sed, sagittis non velit. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec malesuada in enim sed aliquam. Curabitur lobortis fermentum purus. Maecenas vitae nibh ut libero congue interdum. Donec viverra ligula quis nibh volutpat, non luctus est dignissim. Suspendisse molestie, enim tempor consectetur gravida, ante sem porta mauris, a blandit velit quam suscipit justo. Etiam placerat euismod enim a rutrum. Praesent a imperdiet urna. Morbi quis vehicula leo. Nullam dictum fermentum nulla. Mauris blandit pretium ultricies. Morbi ultrices est non sem suscipit mollis. Nam consequat ac ligula nec commodo. Ut mattis, tortor in laoreet tristique, quam dolor ornare massa, non luctus lacus ante eu massa. Nulla facilisi. Aliquam fringilla, felis non faucibus tempor, lorem sapien imperdiet mauris, rhoncus fermentum tellus nibh ut purus. Sed luctus risus quis mi interdum mollis. Duis sit amet nibh vel sem tincidunt vestibulum sed non eros. Duis laoreet orci dignissim est luctus, et pellentesque felis pulvinar. Nam interdum massa eros, eu fringilla augue condimentum quis. Vestibulum pharetra mi quis felis hendrerit, eget malesuada nisl sagittis. Aliquam sit amet urna eu mauris dictum pharetra. Sed dignissim dignissim metus et elementum. Maecenas gravida lobortis tincidunt. Nulla dignissim, risus eu aliquam eleifend, lacus mi lobortis neque, sed venenatis erat ante at purus. Aliquam erat volutpat. Nam eu eros a nisi mollis pretium vel vitae massa. Donec vulputate, ligula at fringilla ultrices, purus metus vehicula risus, ac sagittis purus metus sit amet libero. Curabitur eu dapibus urna, sed pharetra mauris. Mauris eget lacinia libero, vitae turpis duis."
}
def gen_num_docs(size_doc):
if size_doc == "small":
# 335.000 fits in memory for the table with the small cache
# 21.000.000 fits in memory for the table with the big cache
return 1000000
else:
# 1000 fits in memory for the table with the small cache
# 58000 fits in memory for the table with the big cache
return 30000
def compare(new_results, previous_results):
str_date = time.strftime("%y.%m.%d-%H:%M:%S")
if not os.path.exists('comparisons'):
os.mkdir('comparisons')
elif not os.path.isdir('comparisons'):
raise Exception('Unable to write the results as there is a non-folder named "comparisons"')
f = open("comparisons/comparison_" + str_date + ".html", "w")
f.write('''<html>
<head>
<style>
table {padding: 0px; margin: 0px;border-collapse:collapse;}
th {cursor: hand}
td, th {border: 1px solid #000; padding: 5px 8px; margin: 0px; text-align: right;}
</style>
<script type='text/javascript' src='jquery-latest.js'></script>
<script type='text/javascript' src='jquery.tablesorter.js'></script>
<script type='text/javascript' src='main.js'></script>
</head>
<body>
%(previous_hash)s
Current hash: %(current_hash)s</br>
</br>
<table>
<thead><tr>
<th>Query</th>
<th>Previous avg q/s</th>
<th>Avg q/s</th>
<th>Previous 1st centile q/s</th>
<th>1st centile q/s</th>
<th>Previous 99 centile q/s</th>
<th>99 centile q/s</th>
<th>Diff</th>
<th>Status</th>
</tr></thead>
<tbody>
''' % {
'previous_hash': "Previous hash: " + previous_results["hash"] + "<br/>" if "hash" in previous_results else '',
'current_hash': new_results["hash"]
})
for key in new_results:
if key != "hash":
reportValues = {
'key50': str(key)[:50], 'status_color':'gray', 'status': 'Unknown', 'diff': 'undefined',
'inverse_prev_average': 'Unknown', 'inverse_new_average': "%.2f" % (1 / new_results[key]["average"]),
'inverse_prev_first_centile': 'Unknown', 'inverse_new_first_centile': "%.2f" % (1 / new_results[key]["first_centile"]),
'inverse_prev_last_centile': 'Unknown', 'inverse_new_last_centile': "%.2f" % (1 / new_results[key]["last_centile"])
}
if key in previous_results:
if new_results[key]["average"] > 0:
reportValues['diff'] = 1.0 * (1 / previous_results[key]["average"] - 1 / new_results[key]["average"]) / (1 / new_results[key]["average"])
print(reportValues['diff'], type(reportValues['diff']))
if type(reportValues['diff']) == type(0.):
if(reportValues['diff'] < 0.2):
reportValues['status'] = "Success"
reportValues['status_color'] = "green"
else:
reportValues['status'] = "Fail"
reportValues['status_color'] = "red"
reportValues['diff'] = "%.4f" % reportValues['diff']
else:
reportValues['status'] = "Bug"
reportValues['inverse_prev_average'] = "%.2f" % (1 / previous_results[key]["average"])
reportValues['inverse_prev_first_centile'] = "%.2f" % (1 / previous_results[key]["first_centile"])
reportValues['inverse_prev_last_centile'] = "%.2f" % (1 / previous_results[key]["last_centile"])
try:
f.write(''' <tr>
<td>%(key50)s</td>
<td>%(inverse_prev_average)s</td>
<td>%(inverse_new_average)s</td>
<td>%(inverse_prev_first_centile)s</td>
<td>%(inverse_new_first_centile)s</td>
<td>%(inverse_prev_last_centile)s</td>
<td>%(inverse_new_last_centile)s</td>
<td>%(diff)s</td>
<td style='background: %(status_color)s'>%(status)s</td>
</tr>
''' % reportValues)
except Exception as e:
print(key, str(e))
f.write(''' </tbody>
</table>
</body>
</html>
''')
f.close()
print("HTML file saved in comparisons/comparison_" + str_date + ".html")
|
ozburo/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/malltv.py
|
6
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
clean_html,
dict_get,
float_or_none,
int_or_none,
merge_dicts,
parse_duration,
try_get,
)
class MallTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:www|sk)\.)?mall\.tv/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://www.mall.tv/18-miliard-pro-neziskovky-opravdu-jsou-sportovci-nebo-clovek-v-tisni-pijavice',
'md5': '1c4a37f080e1f3023103a7b43458e518',
'info_dict': {
'id': 't0zzt0',
'display_id': '18-miliard-pro-neziskovky-opravdu-jsou-sportovci-nebo-clovek-v-tisni-pijavice',
'ext': 'mp4',
'title': '18 miliard pro neziskovky. Opravdu jsou sportovci nebo Člověk v tísni pijavice?',
'description': 'md5:db7d5744a4bd4043d9d98324aa72ab35',
'duration': 216,
'timestamp': 1538870400,
'upload_date': '20181007',
'view_count': int,
}
}, {
'url': 'https://www.mall.tv/kdo-to-plati/18-miliard-pro-neziskovky-opravdu-jsou-sportovci-nebo-clovek-v-tisni-pijavice',
'only_matching': True,
}, {
'url': 'https://sk.mall.tv/gejmhaus/reklamacia-nehreje-vyrobnik-tepla-alebo-spekacka',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(
url, display_id, headers=self.geo_verification_headers())
video = self._parse_json(self._search_regex(
r'videoObject\s*=\s*JSON\.parse\(JSON\.stringify\(({.+?})\)\);',
webpage, 'video object'), display_id)
video_source = video['VideoSource']
video_id = self._search_regex(
r'/([\da-z]+)/index\b', video_source, 'video id')
formats = self._extract_m3u8_formats(
video_source + '.m3u8', video_id, 'mp4', 'm3u8_native')
self._sort_formats(formats)
subtitles = {}
for s in (video.get('Subtitles') or {}):
s_url = s.get('Url')
if not s_url:
continue
subtitles.setdefault(s.get('Language') or 'cz', []).append({
'url': s_url,
})
entity_counts = video.get('EntityCounts') or {}
def get_count(k):
v = entity_counts.get(k + 's') or {}
return int_or_none(dict_get(v, ('Count', 'StrCount')))
info = self._search_json_ld(webpage, video_id, default={})
return merge_dicts({
'id': video_id,
'display_id': display_id,
'title': video.get('Title'),
'description': clean_html(video.get('Description')),
'thumbnail': video.get('ThumbnailUrl'),
'formats': formats,
'subtitles': subtitles,
'duration': int_or_none(video.get('DurationSeconds')) or parse_duration(video.get('Duration')),
'view_count': get_count('View'),
'like_count': get_count('Like'),
'dislike_count': get_count('Dislike'),
'average_rating': float_or_none(try_get(video, lambda x: x['EntityRating']['AvarageRate'])),
'comment_count': get_count('Comment'),
}, info)
|
osvalr/odoo
|
refs/heads/8.0
|
addons/crm_partner_assign/crm_lead.py
|
221
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval as eval
class crm_lead(osv.osv):
_inherit = 'crm.lead'
def get_interested_action(self, cr, uid, interested, context=None):
try:
model, action_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'crm_partner_assign', 'crm_lead_channel_interested_act')
except ValueError:
raise osv.except_osv(_('Error!'), _("The CRM Channel Interested Action is missing"))
action = self.pool[model].read(cr, uid, [action_id], context=context)[0]
action_context = eval(action['context'])
action_context['interested'] = interested
action['context'] = str(action_context)
return action
def case_interested(self, cr, uid, ids, context=None):
return self.get_interested_action(cr, uid, True, context=context)
def case_disinterested(self, cr, uid, ids, context=None):
return self.get_interested_action(cr, uid, False, context=context)
def assign_salesman_of_assigned_partner(self, cr, uid, ids, context=None):
salesmans_leads = {}
for lead in self.browse(cr, uid, ids, context=context):
if (lead.stage_id.probability > 0 and lead.stage_id.probability < 100) or lead.stage_id.sequence == 1:
if lead.partner_assigned_id and lead.partner_assigned_id.user_id and lead.partner_assigned_id.user_id != lead.user_id:
salesman_id = lead.partner_assigned_id.user_id.id
if salesmans_leads.get(salesman_id):
salesmans_leads[salesman_id].append(lead.id)
else:
salesmans_leads[salesman_id] = [lead.id]
for salesman_id, lead_ids in salesmans_leads.items():
salesteam_id = self.on_change_user(cr, uid, lead_ids, salesman_id, context=None)['value'].get('section_id')
self.write(cr, uid, lead_ids, {'user_id': salesman_id, 'section_id': salesteam_id}, context=context)
|
markkerzner/nn_kove
|
refs/heads/master
|
hadoop/src/contrib/hod/hodlib/Common/xmlrpc.py
|
182
|
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import xmlrpclib, time, random, signal
from hodlib.Common.util import hodInterrupt, HodInterruptException
class hodXRClient(xmlrpclib.ServerProxy):
def __init__(self, uri, transport=None, encoding=None, verbose=0,
allow_none=0, installSignalHandlers=1, retryRequests=True, timeOut=15):
xmlrpclib.ServerProxy.__init__(self, uri, transport, encoding, verbose,
allow_none)
self.__retryRequests = retryRequests
self.__timeOut = timeOut
if (installSignalHandlers!=0):
self.__set_alarm()
def __set_alarm(self):
def alarm_handler(sigNum, sigHandler):
raise Exception("XML-RPC socket timeout.")
signal.signal(signal.SIGALRM, alarm_handler)
def __request(self, methodname, params):
response = None
retryWaitTime = 5 + random.randint(0, 5)
for i in range(0, 30):
signal.alarm(self.__timeOut)
try:
response = self._ServerProxy__request(methodname, params)
signal.alarm(0)
break
except Exception:
if self.__retryRequests:
if hodInterrupt.isSet():
raise HodInterruptException()
time.sleep(retryWaitTime)
else:
raise Exception("hodXRClientTimeout")
return response
def __getattr__(self, name):
# magic method dispatcher
return xmlrpclib._Method(self.__request, name)
|
ZiminGrigory/qreal
|
refs/heads/master
|
plugins/tools/visualInterpreter/examples/robotsCodeGeneration/reactionsStorage/InitialBlockGenerator.py
|
12
|
template = "#include \"kernel.h\"\n#include \"ecrobot_interface.h\"\n@@BALANCER@@\n@@VARIABLES@@\n\nvoid ecrobot_device_initialize(void)\n{\n@@INITHOOKS@@\n}\n\nvoid ecrobot_device_terminate(void)\n{\n@@TERMINATEHOOKS@@\n}\n\n/* nxtOSEK hook to be invoked from an ISR in category 2 */\nvoid user_1ms_isr_type2(void){ /* do nothing */ }\n\n@@CODE@@"
task_template = "TASK(OSEK_Task_Number_0)\n{\n@@CODE@@\n}"
template = template.replace("@@CODE@@", task_template)
number_of_ports = 4
port_values = [initBlock.port_1, initBlock.port_2, initBlock.port_3, initBlock.port_4]
for i in xrange(number_of_ports):
init_ecrobot_color_sensor_port_s = "ecrobot_init_nxtcolorsensor(NXT_PORT_S"
if port_values[i] == "Ультразвуковой сенсор":
init_code.append("ecrobot_init_sonar_sensor(NXT_PORT_S" + str(i + 1) + ");\n")
terminate_code.append("ecrobot_term_sonar_sensor(NXT_PORT_S" + str(i + 1) + ");\n")
port_values[i] = "ecrobot_get_sonar_sensor(NXT_PORT_S"
elif port_values[i] == "Сенсор цвета (все цвета)":
init_code.append(init_ecrobot_color_sensor_port_s + str(i + 1) +", NXT_LIGHTSENSOR_WHITE);\n")
terminate_code.append("ecrobot_term_nxtcolorsensor(NXT_PORT_S" + str(i + 1) + ");\n")
port_values[i] = "ecrobot_get_light_sensor(NXT_PORT_S"
elif port_values[i] == "Сенсор цвета (красный)":
init_code.append(init_ecrobot_color_sensor_port_s + str(i + 1) + ", NXT_LIGHTSENSOR_RED);\n")
terminate_code.append("ecrobot_term_nxtcolorsensor(NXT_PORT_S" + str(i + 1) + ");\n")
port_values[i] = "ecrobot_get_light_sensor(NXT_PORT_S"
elif port_values[i] == "Сенсор цвета (зеленый)":
init_code.append(init_ecrobot_color_sensor_port_s + str(i + 1) + ", NXT_LIGHTSENSOR_GREEN);\n")
terminate_code.append("ecrobot_term_nxtcolorsensor(NXT_PORT_S" + str(i + 1) + ");\n")
port_values[i] = "ecrobot_get_light_sensor(NXT_PORT_S"
elif port_values[i] == "Сенсор цвета (синий)":
init_code.append(init_ecrobot_color_sensor_port_s + str(i + 1) + ", NXT_LIGHTSENSOR_BLUE);\n")
terminate_code.append("ecrobot_term_nxtcolorsensor(NXT_PORT_S" + str(i + 1) + ");\n")
port_values[i] = "ecrobot_get_light_sensor(NXT_PORT_S"
elif port_values[i] == "Сенсор цвета (пассивный)":
init_code.append(init_ecrobot_color_sensor_port_s + str(i + 1) + ", NXT_COLORSENSOR);\n")
terminate_code.append("ecrobot_term_nxtcolorsensor(NXT_PORT_S" + str(i + 1) + ");\n")
port_values[i] = "ecrobot_get_light_sensor(NXT_PORT_S"
else:
port_values[i] = "ecrobot_get_touch_sensor(NXT_PORT_S"
initBlock.id = max_used_id
cur_node_is_processed = True
|
Teamxrtc/webrtc-streaming-node
|
refs/heads/master
|
third_party/webrtc/src/chromium/src/third_party/pycoverage/coverage/__init__.py
|
208
|
"""Code coverage measurement for Python.
Ned Batchelder
http://nedbatchelder.com/code/coverage
"""
from coverage.version import __version__, __url__
from coverage.control import coverage, process_startup
from coverage.data import CoverageData
from coverage.cmdline import main, CoverageScript
from coverage.misc import CoverageException
# Module-level functions. The original API to this module was based on
# functions defined directly in the module, with a singleton of the coverage()
# class. That design hampered programmability, so the current api uses
# explicitly-created coverage objects. But for backward compatibility, here we
# define the top-level functions to create the singleton when they are first
# called.
# Singleton object for use with module-level functions. The singleton is
# created as needed when one of the module-level functions is called.
_the_coverage = None
def _singleton_method(name):
"""Return a function to the `name` method on a singleton `coverage` object.
The singleton object is created the first time one of these functions is
called.
"""
# Disable pylint msg W0612, because a bunch of variables look unused, but
# they're accessed via locals().
# pylint: disable=W0612
def wrapper(*args, **kwargs):
"""Singleton wrapper around a coverage method."""
global _the_coverage
if not _the_coverage:
_the_coverage = coverage(auto_data=True)
return getattr(_the_coverage, name)(*args, **kwargs)
import inspect
meth = getattr(coverage, name)
args, varargs, kw, defaults = inspect.getargspec(meth)
argspec = inspect.formatargspec(args[1:], varargs, kw, defaults)
docstring = meth.__doc__
wrapper.__doc__ = ("""\
A first-use-singleton wrapper around coverage.%(name)s.
This wrapper is provided for backward compatibility with legacy code.
New code should use coverage.%(name)s directly.
%(name)s%(argspec)s:
%(docstring)s
""" % locals()
)
return wrapper
# Define the module-level functions.
use_cache = _singleton_method('use_cache')
start = _singleton_method('start')
stop = _singleton_method('stop')
erase = _singleton_method('erase')
exclude = _singleton_method('exclude')
analysis = _singleton_method('analysis')
analysis2 = _singleton_method('analysis2')
report = _singleton_method('report')
annotate = _singleton_method('annotate')
# On Windows, we encode and decode deep enough that something goes wrong and
# the encodings.utf_8 module is loaded and then unloaded, I don't know why.
# Adding a reference here prevents it from being unloaded. Yuk.
import encodings.utf_8
# Because of the "from coverage.control import fooey" lines at the top of the
# file, there's an entry for coverage.coverage in sys.modules, mapped to None.
# This makes some inspection tools (like pydoc) unable to find the class
# coverage.coverage. So remove that entry.
import sys
try:
del sys.modules['coverage.coverage']
except KeyError:
pass
# COPYRIGHT AND LICENSE
#
# Copyright 2001 Gareth Rees. All rights reserved.
# Copyright 2004-2013 Ned Batchelder. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
|
seann1/portfolio5
|
refs/heads/master
|
.meteor/dev_bundle/python/Lib/distutils/command/bdist_dumb.py
|
151
|
"""distutils.command.bdist_dumb
Implements the Distutils 'bdist_dumb' command (create a "dumb" built
distribution -- i.e., just an archive to be unpacked under $prefix or
$exec_prefix)."""
__revision__ = "$Id$"
import os
from sysconfig import get_python_version
from distutils.util import get_platform
from distutils.core import Command
from distutils.dir_util import remove_tree, ensure_relative
from distutils.errors import DistutilsPlatformError
from distutils import log
class bdist_dumb (Command):
description = 'create a "dumb" built distribution'
user_options = [('bdist-dir=', 'd',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('format=', 'f',
"archive format to create (tar, ztar, gztar, zip)"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('relative', None,
"build the archive using relative paths"
"(default: false)"),
('owner=', 'u',
"Owner name used when creating a tar file"
" [default: current user]"),
('group=', 'g',
"Group name used when creating a tar file"
" [default: current group]"),
]
boolean_options = ['keep-temp', 'skip-build', 'relative']
default_format = { 'posix': 'gztar',
'nt': 'zip',
'os2': 'zip' }
def initialize_options (self):
self.bdist_dir = None
self.plat_name = None
self.format = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = None
self.relative = 0
self.owner = None
self.group = None
def finalize_options(self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'dumb')
if self.format is None:
try:
self.format = self.default_format[os.name]
except KeyError:
raise DistutilsPlatformError, \
("don't know how to create dumb built distributions " +
"on platform %s") % os.name
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('plat_name', 'plat_name'),
('skip_build', 'skip_build'))
def run(self):
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.root = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = 0
log.info("installing to %s" % self.bdist_dir)
self.run_command('install')
# And make an archive relative to the root of the
# pseudo-installation tree.
archive_basename = "%s.%s" % (self.distribution.get_fullname(),
self.plat_name)
# OS/2 objects to any ":" characters in a filename (such as when
# a timestamp is used in a version) so change them to hyphens.
if os.name == "os2":
archive_basename = archive_basename.replace(":", "-")
pseudoinstall_root = os.path.join(self.dist_dir, archive_basename)
if not self.relative:
archive_root = self.bdist_dir
else:
if (self.distribution.has_ext_modules() and
(install.install_base != install.install_platbase)):
raise DistutilsPlatformError, \
("can't make a dumb built distribution where "
"base and platbase are different (%s, %s)"
% (repr(install.install_base),
repr(install.install_platbase)))
else:
archive_root = os.path.join(self.bdist_dir,
ensure_relative(install.install_base))
# Make the archive
filename = self.make_archive(pseudoinstall_root,
self.format, root_dir=archive_root,
owner=self.owner, group=self.group)
if self.distribution.has_ext_modules():
pyversion = get_python_version()
else:
pyversion = 'any'
self.distribution.dist_files.append(('bdist_dumb', pyversion,
filename))
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
|
blademainer/intellij-community
|
refs/heads/master
|
python/testData/refactoring/move/moveNamespacePackage1/before/src/b.py
|
237
|
import nspkg.nssubpkg.a
print(nspkg.nssubpkg.a.VAR)
|
ltilve/ChromiumGStreamerBackend
|
refs/heads/master
|
third_party/mojo/src/mojo/public/tools/gn/last_commit_timestamp.py
|
21
|
#!/usr/bin/env python
#
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Outputs the timestamp of the last commit in a Git repository."""
import argparse
import subprocess
import sys
def get_timestamp(directory):
return subprocess.check_output(["git", "log", "-1", "--pretty=format:%ct"],
cwd=directory)
def main():
parser = argparse.ArgumentParser(description="Prints the timestamp of the "
"last commit in a git repository")
parser.add_argument("--directory", nargs='?',
help="Directory of the git repository", default=".")
parser.add_argument("--output", nargs='?',
help="Output file, or stdout if omitted")
args = parser.parse_args()
output_file = sys.stdout
if args.output:
output_file = open(args.output, 'w')
with output_file:
# Print without newline so GN can read it.
output_file.write(get_timestamp(args.directory))
if __name__ == '__main__':
sys.exit(main())
|
crosswalk-project/chromium-crosswalk-efl
|
refs/heads/efl/crosswalk-10/39.0.2171.19
|
third_party/closure_linter/closure_linter/gjslint.py
|
95
|
#!/usr/bin/env python
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checks JavaScript files for common style guide violations.
gjslint.py is designed to be used as a PRESUBMIT script to check for javascript
style guide violations. As of now, it checks for the following violations:
* Missing and extra spaces
* Lines longer than 80 characters
* Missing newline at end of file
* Missing semicolon after function declaration
* Valid JsDoc including parameter matching
Someday it will validate to the best of its ability against the entirety of the
JavaScript style guide.
This file is a front end that parses arguments and flags. The core of the code
is in tokenizer.py and checker.py.
"""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)',
'nnaze@google.com (Nathan Naze)',)
import errno
import itertools
import os
import platform
import re
import sys
import time
import gflags as flags
from closure_linter import errorrecord
from closure_linter import runner
from closure_linter.common import erroraccumulator
from closure_linter.common import simplefileflags as fileflags
# Attempt import of multiprocessing (should be available in Python 2.6 and up).
try:
# pylint: disable=g-import-not-at-top
import multiprocessing
except ImportError:
multiprocessing = None
FLAGS = flags.FLAGS
flags.DEFINE_boolean('unix_mode', False,
'Whether to emit warnings in standard unix format.')
flags.DEFINE_boolean('beep', True, 'Whether to beep when errors are found.')
flags.DEFINE_boolean('time', False, 'Whether to emit timing statistics.')
flags.DEFINE_boolean('quiet', False, 'Whether to minimize logged messages. '
'Most useful for per-file linting, such as that performed '
'by the presubmit linter service.')
flags.DEFINE_boolean('check_html', False,
'Whether to check javascript in html files.')
flags.DEFINE_boolean('summary', False,
'Whether to show an error count summary.')
flags.DEFINE_list('additional_extensions', None, 'List of additional file '
'extensions (not js) that should be treated as '
'JavaScript files.')
flags.DEFINE_boolean('multiprocess',
platform.system() is 'Linux' and bool(multiprocessing),
'Whether to attempt parallelized linting using the '
'multiprocessing module. Enabled by default on Linux '
'if the multiprocessing module is present (Python 2.6+). '
'Otherwise disabled by default. '
'Disabling may make debugging easier.')
flags.ADOPT_module_key_flags(fileflags)
flags.ADOPT_module_key_flags(runner)
GJSLINT_ONLY_FLAGS = ['--unix_mode', '--beep', '--nobeep', '--time',
'--check_html', '--summary', '--quiet']
def _MultiprocessCheckPaths(paths):
"""Run _CheckPath over mutltiple processes.
Tokenization, passes, and checks are expensive operations. Running in a
single process, they can only run on one CPU/core. Instead,
shard out linting over all CPUs with multiprocessing to parallelize.
Args:
paths: paths to check.
Yields:
errorrecord.ErrorRecords for any found errors.
"""
pool = multiprocessing.Pool()
path_results = pool.imap(_CheckPath, paths)
for results in path_results:
for result in results:
yield result
# Force destruct before returning, as this can sometimes raise spurious
# "interrupted system call" (EINTR), which we can ignore.
try:
pool.close()
pool.join()
del pool
except OSError as err:
if err.errno is not errno.EINTR:
raise err
def _CheckPaths(paths):
"""Run _CheckPath on all paths in one thread.
Args:
paths: paths to check.
Yields:
errorrecord.ErrorRecords for any found errors.
"""
for path in paths:
results = _CheckPath(path)
for record in results:
yield record
def _CheckPath(path):
"""Check a path and return any errors.
Args:
path: paths to check.
Returns:
A list of errorrecord.ErrorRecords for any found errors.
"""
error_handler = erroraccumulator.ErrorAccumulator()
runner.Run(path, error_handler)
make_error_record = lambda err: errorrecord.MakeErrorRecord(path, err)
return map(make_error_record, error_handler.GetErrors())
def _GetFilePaths(argv):
suffixes = ['.js']
if FLAGS.additional_extensions:
suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
if FLAGS.check_html:
suffixes += ['.html', '.htm']
return fileflags.GetFileList(argv, 'JavaScript', suffixes)
# Error printing functions
def _PrintFileSummary(paths, records):
"""Print a detailed summary of the number of errors in each file."""
paths = list(paths)
paths.sort()
for path in paths:
path_errors = [e for e in records if e.path == path]
print '%s: %d' % (path, len(path_errors))
def _PrintFileSeparator(path):
print '----- FILE : %s -----' % path
def _PrintSummary(paths, error_records):
"""Print a summary of the number of errors and files."""
error_count = len(error_records)
all_paths = set(paths)
all_paths_count = len(all_paths)
if error_count is 0:
print '%d files checked, no errors found.' % all_paths_count
new_error_count = len([e for e in error_records if e.new_error])
error_paths = set([e.path for e in error_records])
error_paths_count = len(error_paths)
no_error_paths_count = all_paths_count - error_paths_count
if (error_count or new_error_count) and not FLAGS.quiet:
error_noun = 'error' if error_count == 1 else 'errors'
new_error_noun = 'error' if new_error_count == 1 else 'errors'
error_file_noun = 'file' if error_paths_count == 1 else 'files'
ok_file_noun = 'file' if no_error_paths_count == 1 else 'files'
print ('Found %d %s, including %d new %s, in %d %s (%d %s OK).' %
(error_count,
error_noun,
new_error_count,
new_error_noun,
error_paths_count,
error_file_noun,
no_error_paths_count,
ok_file_noun))
def _PrintErrorRecords(error_records):
"""Print error records strings in the expected format."""
current_path = None
for record in error_records:
if current_path != record.path:
current_path = record.path
if not FLAGS.unix_mode:
_PrintFileSeparator(current_path)
print record.error_string
def _FormatTime(t):
"""Formats a duration as a human-readable string.
Args:
t: A duration in seconds.
Returns:
A formatted duration string.
"""
if t < 1:
return '%dms' % round(t * 1000)
else:
return '%.2fs' % t
def main(argv=None):
"""Main function.
Args:
argv: Sequence of command line arguments.
"""
if argv is None:
argv = flags.FLAGS(sys.argv)
if FLAGS.time:
start_time = time.time()
suffixes = ['.js']
if FLAGS.additional_extensions:
suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
if FLAGS.check_html:
suffixes += ['.html', '.htm']
paths = fileflags.GetFileList(argv, 'JavaScript', suffixes)
if FLAGS.multiprocess:
records_iter = _MultiprocessCheckPaths(paths)
else:
records_iter = _CheckPaths(paths)
records_iter, records_iter_copy = itertools.tee(records_iter, 2)
_PrintErrorRecords(records_iter_copy)
error_records = list(records_iter)
_PrintSummary(paths, error_records)
exit_code = 0
# If there are any errors
if error_records:
exit_code += 1
# If there are any new errors
if [r for r in error_records if r.new_error]:
exit_code += 2
if exit_code:
if FLAGS.summary:
_PrintFileSummary(paths, error_records)
if FLAGS.beep:
# Make a beep noise.
sys.stdout.write(chr(7))
# Write out instructions for using fixjsstyle script to fix some of the
# reported errors.
fix_args = []
for flag in sys.argv[1:]:
for f in GJSLINT_ONLY_FLAGS:
if flag.startswith(f):
break
else:
fix_args.append(flag)
if not FLAGS.quiet:
print """
Some of the errors reported by GJsLint may be auto-fixable using the script
fixjsstyle. Please double check any changes it makes and report any bugs. The
script can be run by executing:
fixjsstyle %s """ % ' '.join(fix_args)
if FLAGS.time:
print 'Done in %s.' % _FormatTime(time.time() - start_time)
sys.exit(exit_code)
if __name__ == '__main__':
main()
|
partofthething/home-assistant
|
refs/heads/dev
|
homeassistant/components/home_connect/light.py
|
7
|
"""Provides a light for Home Connect."""
import logging
from math import ceil
from homeconnect.api import HomeConnectError
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
LightEntity,
)
from homeassistant.const import CONF_ENTITIES
import homeassistant.util.color as color_util
from .const import (
ATTR_VALUE,
BSH_AMBIENT_LIGHT_BRIGHTNESS,
BSH_AMBIENT_LIGHT_COLOR,
BSH_AMBIENT_LIGHT_COLOR_CUSTOM_COLOR,
BSH_AMBIENT_LIGHT_CUSTOM_COLOR,
BSH_AMBIENT_LIGHT_ENABLED,
COOKING_LIGHTING,
COOKING_LIGHTING_BRIGHTNESS,
DOMAIN,
)
from .entity import HomeConnectEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Home Connect light."""
def get_entities():
"""Get a list of entities."""
entities = []
hc_api = hass.data[DOMAIN][config_entry.entry_id]
for device_dict in hc_api.devices:
entity_dicts = device_dict.get(CONF_ENTITIES, {}).get("light", [])
entity_list = [HomeConnectLight(**d) for d in entity_dicts]
entities += entity_list
return entities
async_add_entities(await hass.async_add_executor_job(get_entities), True)
class HomeConnectLight(HomeConnectEntity, LightEntity):
"""Light for Home Connect."""
def __init__(self, device, desc, ambient):
"""Initialize the entity."""
super().__init__(device, desc)
self._state = None
self._brightness = None
self._hs_color = None
self._ambient = ambient
if self._ambient:
self._brightness_key = BSH_AMBIENT_LIGHT_BRIGHTNESS
self._key = BSH_AMBIENT_LIGHT_ENABLED
self._custom_color_key = BSH_AMBIENT_LIGHT_CUSTOM_COLOR
self._color_key = BSH_AMBIENT_LIGHT_COLOR
else:
self._brightness_key = COOKING_LIGHTING_BRIGHTNESS
self._key = COOKING_LIGHTING
self._custom_color_key = None
self._color_key = None
@property
def is_on(self):
"""Return true if the light is on."""
return bool(self._state)
@property
def brightness(self):
"""Return the brightness of the light."""
return self._brightness
@property
def hs_color(self):
"""Return the color property."""
return self._hs_color
@property
def supported_features(self):
"""Flag supported features."""
if self._ambient:
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR
return SUPPORT_BRIGHTNESS
async def async_turn_on(self, **kwargs):
"""Switch the light on, change brightness, change color."""
if self._ambient:
_LOGGER.debug("Switching ambient light on for: %s", self.name)
try:
await self.hass.async_add_executor_job(
self.device.appliance.set_setting, self._key, True
)
except HomeConnectError as err:
_LOGGER.error("Error while trying to turn on ambient light: %s", err)
return
if ATTR_BRIGHTNESS in kwargs or ATTR_HS_COLOR in kwargs:
try:
await self.hass.async_add_executor_job(
self.device.appliance.set_setting,
self._color_key,
BSH_AMBIENT_LIGHT_COLOR_CUSTOM_COLOR,
)
except HomeConnectError as err:
_LOGGER.error("Error while trying selecting customcolor: %s", err)
if self._brightness is not None:
brightness = 10 + ceil(self._brightness / 255 * 90)
if ATTR_BRIGHTNESS in kwargs:
brightness = 10 + ceil(kwargs[ATTR_BRIGHTNESS] / 255 * 90)
hs_color = kwargs.get(ATTR_HS_COLOR, self._hs_color)
if hs_color is not None:
rgb = color_util.color_hsv_to_RGB(*hs_color, brightness)
hex_val = color_util.color_rgb_to_hex(rgb[0], rgb[1], rgb[2])
try:
await self.hass.async_add_executor_job(
self.device.appliance.set_setting,
self._custom_color_key,
f"#{hex_val}",
)
except HomeConnectError as err:
_LOGGER.error(
"Error while trying setting the color: %s", err
)
elif ATTR_BRIGHTNESS in kwargs:
_LOGGER.debug("Changing brightness for: %s", self.name)
brightness = 10 + ceil(kwargs[ATTR_BRIGHTNESS] / 255 * 90)
try:
await self.hass.async_add_executor_job(
self.device.appliance.set_setting, self._brightness_key, brightness
)
except HomeConnectError as err:
_LOGGER.error("Error while trying set the brightness: %s", err)
else:
_LOGGER.debug("Switching light on for: %s", self.name)
try:
await self.hass.async_add_executor_job(
self.device.appliance.set_setting, self._key, True
)
except HomeConnectError as err:
_LOGGER.error("Error while trying to turn on light: %s", err)
self.async_entity_update()
async def async_turn_off(self, **kwargs):
"""Switch the light off."""
_LOGGER.debug("Switching light off for: %s", self.name)
try:
await self.hass.async_add_executor_job(
self.device.appliance.set_setting, self._key, False
)
except HomeConnectError as err:
_LOGGER.error("Error while trying to turn off light: %s", err)
self.async_entity_update()
async def async_update(self):
"""Update the light's status."""
if self.device.appliance.status.get(self._key, {}).get(ATTR_VALUE) is True:
self._state = True
elif self.device.appliance.status.get(self._key, {}).get(ATTR_VALUE) is False:
self._state = False
else:
self._state = None
_LOGGER.debug("Updated, new light state: %s", self._state)
if self._ambient:
color = self.device.appliance.status.get(self._custom_color_key, {})
if not color:
self._hs_color = None
self._brightness = None
else:
colorvalue = color.get(ATTR_VALUE)[1:]
rgb = color_util.rgb_hex_to_rgb_list(colorvalue)
hsv = color_util.color_RGB_to_hsv(rgb[0], rgb[1], rgb[2])
self._hs_color = [hsv[0], hsv[1]]
self._brightness = ceil((hsv[2] - 10) * 255 / 90)
_LOGGER.debug("Updated, new brightness: %s", self._brightness)
else:
brightness = self.device.appliance.status.get(self._brightness_key, {})
if brightness is None:
self._brightness = None
else:
self._brightness = ceil((brightness.get(ATTR_VALUE) - 10) * 255 / 90)
_LOGGER.debug("Updated, new brightness: %s", self._brightness)
|
maxive/erp
|
refs/heads/master
|
addons/sms/models/res_partner.py
|
25
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models
class ResPartner(models.Model):
_inherit = 'res.partner'
def _get_default_sms_recipients(self):
""" Override of mail.thread method.
SMS recipients on partners are the partners themselves.
"""
return self
|
saurabh6790/omnitech-libs
|
refs/heads/master
|
webnotes/utils/file_lock.py
|
33
|
import os
from time import time
from webnotes.utils import get_site_path
class LockTimeoutError(Exception):
pass
def create_lock(name):
lock_path = get_lock_path(name)
if not check_lock(lock_path):
return touch_file(lock_path)
else:
return False
def touch_file(path):
with open(path, 'a'):
os.utime(path, None)
return True
def check_lock(path):
if not os.path.exists(path):
return False
if time() - os.path.getmtime(path) > 600:
raise LockTimeoutError(path)
return True
def delete_lock(name):
lock_path = get_lock_path(name)
try:
os.remove(lock_path)
except OSError:
pass
return True
def get_lock_path(name):
name = name.lower()
lock_path = get_site_path(name + '.lock')
return lock_path
|
jjmleiro/hue
|
refs/heads/master
|
desktop/core/ext-py/pycrypto-2.6.1/lib/Crypto/Random/__init__.py
|
126
|
# -*- coding: utf-8 -*-
#
# Random/__init__.py : PyCrypto random number generation
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
__revision__ = "$Id$"
__all__ = ['new']
from Crypto.Random import OSRNG
from Crypto.Random import _UserFriendlyRNG
def new(*args, **kwargs):
"""Return a file-like object that outputs cryptographically random bytes."""
return _UserFriendlyRNG.new(*args, **kwargs)
def atfork():
"""Call this whenever you call os.fork()"""
_UserFriendlyRNG.reinit()
def get_random_bytes(n):
"""Return the specified number of cryptographically-strong random bytes."""
return _UserFriendlyRNG.get_random_bytes(n)
# vim:set ts=4 sw=4 sts=4 expandtab:
|
elainenaomi/sciwonc-dataflow-examples
|
refs/heads/master
|
dissertation2017/Experiment 1B/instances/10_2_workflow_full_10files_secondary_wmj_3sh_3rs_with_annot_with_proj_3s_range_old/work/ubuntu/pegasus/example_workflow/20170106T133456+0000/ConfigDB_TaskEvent_6.py
|
11
|
HOST = "ip-172-31-29-102.us-west-2.compute.internal:27017,ip-172-31-29-103.us-west-2.compute.internal:27017,ip-172-31-29-104.us-west-2.compute.internal:27017,ip-172-31-29-105.us-west-2.compute.internal:27017,ip-172-31-29-101.us-west-2.compute.internal:27017,ip-172-31-29-106.us-west-2.compute.internal:27017,ip-172-31-29-107.us-west-2.compute.internal:27017,ip-172-31-29-108.us-west-2.compute.internal:27017,ip-172-31-29-109.us-west-2.compute.internal:27017"
PORT = ""
USER = ""
PASSWORD = ""
DATABASE = "googler"
READ_PREFERENCE = "secondary"
WRITE_CONCERN = "majority"
COLLECTION_INPUT = "task_events"
COLLECTION_OUTPUT = "task_events_info"
PREFIX_COLUMN = "g_"
ATTRIBUTES = ["event type", "CPU request", "memory request"]
SORT = ["_id.filepath", "_id.numline"]
OPERATION_TYPE = "GROUP_BY_COLUMN"
COLUMN = "event type"
VALUE = ["6"]
INPUT_FILE = "task_events.dat"
OUTPUT_FILE = "task_events_info_6.dat"
|
mollstam/UnrealPy
|
refs/heads/master
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/pip-7.1.0/pip/_vendor/html5lib/filters/optionaltags.py
|
1727
|
from __future__ import absolute_import, division, unicode_literals
from . import _base
class Filter(_base.Filter):
def slider(self):
previous1 = previous2 = None
for token in self.source:
if previous1 is not None:
yield previous2, previous1, token
previous2 = previous1
previous1 = token
yield previous2, previous1, None
def __iter__(self):
for previous, token, next in self.slider():
type = token["type"]
if type == "StartTag":
if (token["data"] or
not self.is_optional_start(token["name"], previous, next)):
yield token
elif type == "EndTag":
if not self.is_optional_end(token["name"], next):
yield token
else:
yield token
def is_optional_start(self, tagname, previous, next):
type = next and next["type"] or None
if tagname in 'html':
# An html element's start tag may be omitted if the first thing
# inside the html element is not a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname == 'head':
# A head element's start tag may be omitted if the first thing
# inside the head element is an element.
# XXX: we also omit the start tag if the head element is empty
if type in ("StartTag", "EmptyTag"):
return True
elif type == "EndTag":
return next["name"] == "head"
elif tagname == 'body':
# A body element's start tag may be omitted if the first thing
# inside the body element is not a space character or a comment,
# except if the first thing inside the body element is a script
# or style element and the node immediately preceding the body
# element is a head element whose end tag has been omitted.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we do not look at the preceding event, so we never omit
# the body element's start tag if it's followed by a script or
# a style element.
return next["name"] not in ('script', 'style')
else:
return True
elif tagname == 'colgroup':
# A colgroup element's start tag may be omitted if the first thing
# inside the colgroup element is a col element, and if the element
# is not immediately preceeded by another colgroup element whose
# end tag has been omitted.
if type in ("StartTag", "EmptyTag"):
# XXX: we do not look at the preceding event, so instead we never
# omit the colgroup element's end tag when it is immediately
# followed by another colgroup element. See is_optional_end.
return next["name"] == "col"
else:
return False
elif tagname == 'tbody':
# A tbody element's start tag may be omitted if the first thing
# inside the tbody element is a tr element, and if the element is
# not immediately preceeded by a tbody, thead, or tfoot element
# whose end tag has been omitted.
if type == "StartTag":
# omit the thead and tfoot elements' end tag when they are
# immediately followed by a tbody element. See is_optional_end.
if previous and previous['type'] == 'EndTag' and \
previous['name'] in ('tbody', 'thead', 'tfoot'):
return False
return next["name"] == 'tr'
else:
return False
return False
def is_optional_end(self, tagname, next):
type = next and next["type"] or None
if tagname in ('html', 'head', 'body'):
# An html element's end tag may be omitted if the html element
# is not immediately followed by a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname in ('li', 'optgroup', 'tr'):
# A li element's end tag may be omitted if the li element is
# immediately followed by another li element or if there is
# no more content in the parent element.
# An optgroup element's end tag may be omitted if the optgroup
# element is immediately followed by another optgroup element,
# or if there is no more content in the parent element.
# A tr element's end tag may be omitted if the tr element is
# immediately followed by another tr element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] == tagname
else:
return type == "EndTag" or type is None
elif tagname in ('dt', 'dd'):
# A dt element's end tag may be omitted if the dt element is
# immediately followed by another dt element or a dd element.
# A dd element's end tag may be omitted if the dd element is
# immediately followed by another dd element or a dt element,
# or if there is no more content in the parent element.
if type == "StartTag":
return next["name"] in ('dt', 'dd')
elif tagname == 'dd':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'p':
# A p element's end tag may be omitted if the p element is
# immediately followed by an address, article, aside,
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
# nav, ol, p, pre, section, table, or ul, element, or if
# there is no more content in the parent element.
if type in ("StartTag", "EmptyTag"):
return next["name"] in ('address', 'article', 'aside',
'blockquote', 'datagrid', 'dialog',
'dir', 'div', 'dl', 'fieldset', 'footer',
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hr', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul')
else:
return type == "EndTag" or type is None
elif tagname == 'option':
# An option element's end tag may be omitted if the option
# element is immediately followed by another option element,
# or if it is immediately followed by an <code>optgroup</code>
# element, or if there is no more content in the parent
# element.
if type == "StartTag":
return next["name"] in ('option', 'optgroup')
else:
return type == "EndTag" or type is None
elif tagname in ('rt', 'rp'):
# An rt element's end tag may be omitted if the rt element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
# An rp element's end tag may be omitted if the rp element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('rt', 'rp')
else:
return type == "EndTag" or type is None
elif tagname == 'colgroup':
# A colgroup element's end tag may be omitted if the colgroup
# element is not immediately followed by a space character or
# a comment.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we also look for an immediately following colgroup
# element. See is_optional_start.
return next["name"] != 'colgroup'
else:
return True
elif tagname in ('thead', 'tbody'):
# A thead element's end tag may be omitted if the thead element
# is immediately followed by a tbody or tfoot element.
# A tbody element's end tag may be omitted if the tbody element
# is immediately followed by a tbody or tfoot element, or if
# there is no more content in the parent element.
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] in ['tbody', 'tfoot']
elif tagname == 'tbody':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'tfoot':
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] == 'tbody'
else:
return type == "EndTag" or type is None
elif tagname in ('td', 'th'):
# A td element's end tag may be omitted if the td element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
# A th element's end tag may be omitted if the th element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('td', 'th')
else:
return type == "EndTag" or type is None
return False
|
jcchin/Hyperloop_v2
|
refs/heads/master
|
src/hyperloop/Python/tests/test_tube_power.py
|
4
|
from openmdao.api import Group, Problem, Component
import numpy as np
from hyperloop.Python.tube import tube_power
def create_problem(component):
root = Group()
prob = Problem(root)
prob.root.add('comp',component)
return prob
class TestTubePower(object):
#Test for TubePower component
def test_case1(self):
comp = tube_power.TubePower()
prob = create_problem(comp)
prob.setup()
prob['comp.vac_power'] = 40.0
prob['comp.vac_energy_day'] = 40.0*24.0*60.0*60.0
prob['comp.prop_power'] = 300000.0
prob['comp.num_thrust'] = 5.0
prob['comp.time_thrust'] = 1.5
prob['comp.tube_temp'] = 320.0
prob['comp.elec_price'] = 0.13
prob.run()
assert np.isclose(prob['comp.tot_power'], 1540.00, rtol=0.1)
assert np.isclose(prob['comp.tot_energy'], 144000.625, rtol=0.1)
assert np.isclose(prob['comp.cost_pwr'],200.200, rtol=.01)
|
lmorchard/django
|
refs/heads/master
|
tests/max_lengths/models.py
|
438
|
from django.db import models
class PersonWithDefaultMaxLengths(models.Model):
email = models.EmailField()
vcard = models.FileField(upload_to='/tmp')
homepage = models.URLField()
avatar = models.FilePathField()
class PersonWithCustomMaxLengths(models.Model):
email = models.EmailField(max_length=250)
vcard = models.FileField(upload_to='/tmp', max_length=250)
homepage = models.URLField(max_length=250)
avatar = models.FilePathField(max_length=250)
|
mvsaha/blahb
|
refs/heads/master
|
blahb/test/test_timsort.py
|
1
|
import numpy as np
from numpy.testing import assert_array_equal as AAE
from ..timsort import timsort_
from .utils import N_TESTS
def test_timsort_rand():
for i in range(N_TESTS * 5):
n = np.random.randint(1, 1000)
labels = np.random.randint(-10, 10, size=n)
result = timsort_(labels.copy())
ref = np.argsort(labels, kind="mergesort")
AAE(result, ref)
|
fusionbox/django-shop
|
refs/heads/master
|
shop/views/__init__.py
|
15
|
# -*- coding: utf-8 -*-
from django import VERSION as django_version
from django.views.generic import (TemplateView, ListView, DetailView, View)
from django.views.generic.base import TemplateResponseMixin
class ShopTemplateView(TemplateView):
"""
A class-based view for use within the shop (this allows to keep the above
import magic in only one place)
As defined by
http://docs.djangoproject.com/en/dev/topics/class-based-views/
Stuff defined here (A.K.A this is a documentation proxy for the above
link):
---------------------------------------------------------------------
self.template_name : Name of the template to use for rendering
self.get_context_data(): Returns the context {} to render the template with
self.get(request, *args, **kwargs): called for GET methods
"""
class ShopListView(ListView):
"""
This is just to abstract the "Django version switching magic happening up
there
"""
class ShopDetailView(DetailView):
"""
This is just to abstract the "Django version switching magic happening up
there
"""
class ShopView(View):
"""
An abstraction of the basic view
"""
class ShopTemplateResponseMixin(TemplateResponseMixin):
"""
An abstraction to solve the import problem for the template response mixin
"""
|
sergei-maertens/django
|
refs/heads/master
|
django/db/models/__init__.py
|
9
|
from functools import wraps
from django.core.exceptions import ObjectDoesNotExist # NOQA
from django.db.models import signals # NOQA
from django.db.models.aggregates import * # NOQA
from django.db.models.deletion import ( # NOQA
CASCADE, DO_NOTHING, PROTECT, SET, SET_DEFAULT, SET_NULL, ProtectedError,
)
from django.db.models.expressions import ( # NOQA
Case, Expression, ExpressionWrapper, F, Func, Value, When,
)
from django.db.models.fields import * # NOQA
from django.db.models.fields.files import FileField, ImageField # NOQA
from django.db.models.fields.proxy import OrderWrt # NOQA
from django.db.models.indexes import * # NOQA
from django.db.models.lookups import Lookup, Transform # NOQA
from django.db.models.manager import Manager # NOQA
from django.db.models.query import ( # NOQA
Prefetch, Q, QuerySet, prefetch_related_objects,
)
# Imports that would create circular imports if sorted
from django.db.models.base import DEFERRED, Model # NOQA isort:skip
from django.db.models.fields.related import ( # NOQA isort:skip
ForeignKey, ForeignObject, OneToOneField, ManyToManyField,
ManyToOneRel, ManyToManyRel, OneToOneRel,
)
def permalink(func):
"""
Decorator that calls urls.reverse() to return a URL using parameters
returned by the decorated function "func".
"func" should be a function that returns a tuple in one of the
following formats:
(viewname, viewargs)
(viewname, viewargs, viewkwargs)
"""
from django.urls import reverse
@wraps(func)
def inner(*args, **kwargs):
bits = func(*args, **kwargs)
return reverse(bits[0], None, *bits[1:3])
return inner
|
cosmopod/MIT_6001
|
refs/heads/master
|
QUIZ_PROBLEM8.py
|
1
|
# -*- coding: utf-8 -*-
'''
Successive approximation is a general method in which on each iteration of an algorithm,
we find a closer estimate of the answer for which we are seeking.
One class of successive approximation algorithms uses the idea of a fixed point.
If f(x) is a mathematical function, then finding the x such that f(x) = x gives us the fixed point of f.
'''
def fixedPoint(f, epsilon):
"""
f: a function of one argument that returns a float
epsilon: a small float
returns the best guess when that guess is less than epsilon
away from f(guess) or after 100 trials, whichever comes first.
"""
guess = 1.0
for i in range(100):
if abs(f(guess) - guess) < epsilon:
return guess
else:
guess = f(guess)
return guess
'''
Assuming you have corrected the implementation of the fixedPoint function,
we can use it to compute other useful things such as square roots.
'''
def sqrt(a):
def tryit(x):
return 0.5 * (a/x + x)
return fixedPoint(tryit, 0.0001)
'''
This code has a bug in it. You can fix this by correcting exactly one
line of the definition. Please do so in the box below.
'''
def babylon(a):
def test(x):
return 0.5 * ((a / x) + x)
return test
def sqrt(a):
return fixedPoint(babylon(a), 0.0001)
|
tima/ansible
|
refs/heads/devel
|
lib/ansible/playbook/attribute.py
|
39
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from copy import deepcopy
class Attribute:
def __init__(
self,
isa=None,
private=False,
default=None,
required=False,
listof=None,
priority=0,
class_type=None,
always_post_validate=False,
inherit=True,
alias=None,
extend=False,
prepend=False,
):
"""
:class:`Attribute` specifies constraints for attributes of objects which
derive from playbook data. The attributes of the object are basically
a schema for the yaml playbook.
:kwarg isa: The type of the attribute. Allowable values are a string
representation of any yaml basic datatype, python class, or percent.
(Enforced at post-validation time).
:kwarg private: (not used)
:kwarg default: Default value if unspecified in the YAML document.
:kwarg required: Whether or not the YAML document must contain this field.
If the attribute is None when post-validated, an error will be raised.
:kwarg listof: If isa is set to "list", this can optionally be set to
ensure that all elements in the list are of the given type. Valid
values here are the same as those for isa.
:kwarg priority: The order in which the fields should be parsed. Generally
this does not need to be set, it is for rare situations where another
field depends on the fact that another field was parsed first.
:kwarg class_type: If isa is set to "class", this can be optionally set to
a class (not a string name). The YAML data for this field will be
passed to the __init__ method of that class during post validation and
the field will be an instance of that class.
:kwarg always_post_validate: Controls whether a field should be post
validated or not (default: True).
:kwarg inherit: A boolean value, which controls whether the object
containing this field should attempt to inherit the value from its
parent object if the local value is None.
:kwarg alias: An alias to use for the attribute name, for situations where
the attribute name may conflict with a Python reserved word.
"""
self.isa = isa
self.private = private
self.default = default
self.required = required
self.listof = listof
self.priority = priority
self.class_type = class_type
self.always_post_validate = always_post_validate
self.inherit = inherit
self.alias = alias
self.extend = extend
self.prepend = prepend
if default is not None and self.isa in ('list', 'dict', 'set'):
self.default = deepcopy(default)
else:
self.default = default
def __eq__(self, other):
return other.priority == self.priority
def __ne__(self, other):
return other.priority != self.priority
# NB: higher priority numbers sort first
def __lt__(self, other):
return other.priority < self.priority
def __gt__(self, other):
return other.priority > self.priority
def __le__(self, other):
return other.priority <= self.priority
def __ge__(self, other):
return other.priority >= self.priority
class FieldAttribute(Attribute):
pass
|
pombredanne/redis-py
|
refs/heads/master
|
tests/test_pubsub.py
|
43
|
from __future__ import with_statement
import pytest
import time
import redis
from redis.exceptions import ConnectionError
from redis._compat import basestring, u, unichr
from .conftest import r as _redis_client
def wait_for_message(pubsub, timeout=0.1, ignore_subscribe_messages=False):
now = time.time()
timeout = now + timeout
while now < timeout:
message = pubsub.get_message(
ignore_subscribe_messages=ignore_subscribe_messages)
if message is not None:
return message
time.sleep(0.01)
now = time.time()
return None
def make_message(type, channel, data, pattern=None):
return {
'type': type,
'pattern': pattern and pattern.encode('utf-8') or None,
'channel': channel.encode('utf-8'),
'data': data.encode('utf-8') if isinstance(data, basestring) else data
}
def make_subscribe_test_data(pubsub, type):
if type == 'channel':
return {
'p': pubsub,
'sub_type': 'subscribe',
'unsub_type': 'unsubscribe',
'sub_func': pubsub.subscribe,
'unsub_func': pubsub.unsubscribe,
'keys': ['foo', 'bar', u('uni') + unichr(4456) + u('code')]
}
elif type == 'pattern':
return {
'p': pubsub,
'sub_type': 'psubscribe',
'unsub_type': 'punsubscribe',
'sub_func': pubsub.psubscribe,
'unsub_func': pubsub.punsubscribe,
'keys': ['f*', 'b*', u('uni') + unichr(4456) + u('*')]
}
assert False, 'invalid subscribe type: %s' % type
class TestPubSubSubscribeUnsubscribe(object):
def _test_subscribe_unsubscribe(self, p, sub_type, unsub_type, sub_func,
unsub_func, keys):
for key in keys:
assert sub_func(key) is None
# should be a message for each channel/pattern we just subscribed to
for i, key in enumerate(keys):
assert wait_for_message(p) == make_message(sub_type, key, i + 1)
for key in keys:
assert unsub_func(key) is None
# should be a message for each channel/pattern we just unsubscribed
# from
for i, key in enumerate(keys):
i = len(keys) - 1 - i
assert wait_for_message(p) == make_message(unsub_type, key, i)
def test_channel_subscribe_unsubscribe(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), 'channel')
self._test_subscribe_unsubscribe(**kwargs)
def test_pattern_subscribe_unsubscribe(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), 'pattern')
self._test_subscribe_unsubscribe(**kwargs)
def _test_resubscribe_on_reconnection(self, p, sub_type, unsub_type,
sub_func, unsub_func, keys):
for key in keys:
assert sub_func(key) is None
# should be a message for each channel/pattern we just subscribed to
for i, key in enumerate(keys):
assert wait_for_message(p) == make_message(sub_type, key, i + 1)
# manually disconnect
p.connection.disconnect()
# calling get_message again reconnects and resubscribes
# note, we may not re-subscribe to channels in exactly the same order
# so we have to do some extra checks to make sure we got them all
messages = []
for i in range(len(keys)):
messages.append(wait_for_message(p))
unique_channels = set()
assert len(messages) == len(keys)
for i, message in enumerate(messages):
assert message['type'] == sub_type
assert message['data'] == i + 1
assert isinstance(message['channel'], bytes)
channel = message['channel'].decode('utf-8')
unique_channels.add(channel)
assert len(unique_channels) == len(keys)
for channel in unique_channels:
assert channel in keys
def test_resubscribe_to_channels_on_reconnection(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), 'channel')
self._test_resubscribe_on_reconnection(**kwargs)
def test_resubscribe_to_patterns_on_reconnection(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), 'pattern')
self._test_resubscribe_on_reconnection(**kwargs)
def _test_subscribed_property(self, p, sub_type, unsub_type, sub_func,
unsub_func, keys):
assert p.subscribed is False
sub_func(keys[0])
# we're now subscribed even though we haven't processed the
# reply from the server just yet
assert p.subscribed is True
assert wait_for_message(p) == make_message(sub_type, keys[0], 1)
# we're still subscribed
assert p.subscribed is True
# unsubscribe from all channels
unsub_func()
# we're still technically subscribed until we process the
# response messages from the server
assert p.subscribed is True
assert wait_for_message(p) == make_message(unsub_type, keys[0], 0)
# now we're no longer subscribed as no more messages can be delivered
# to any channels we were listening to
assert p.subscribed is False
# subscribing again flips the flag back
sub_func(keys[0])
assert p.subscribed is True
assert wait_for_message(p) == make_message(sub_type, keys[0], 1)
# unsubscribe again
unsub_func()
assert p.subscribed is True
# subscribe to another channel before reading the unsubscribe response
sub_func(keys[1])
assert p.subscribed is True
# read the unsubscribe for key1
assert wait_for_message(p) == make_message(unsub_type, keys[0], 0)
# we're still subscribed to key2, so subscribed should still be True
assert p.subscribed is True
# read the key2 subscribe message
assert wait_for_message(p) == make_message(sub_type, keys[1], 1)
unsub_func()
# haven't read the message yet, so we're still subscribed
assert p.subscribed is True
assert wait_for_message(p) == make_message(unsub_type, keys[1], 0)
# now we're finally unsubscribed
assert p.subscribed is False
def test_subscribe_property_with_channels(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), 'channel')
self._test_subscribed_property(**kwargs)
def test_subscribe_property_with_patterns(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), 'pattern')
self._test_subscribed_property(**kwargs)
def test_ignore_all_subscribe_messages(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
checks = (
(p.subscribe, 'foo'),
(p.unsubscribe, 'foo'),
(p.psubscribe, 'f*'),
(p.punsubscribe, 'f*'),
)
assert p.subscribed is False
for func, channel in checks:
assert func(channel) is None
assert p.subscribed is True
assert wait_for_message(p) is None
assert p.subscribed is False
def test_ignore_individual_subscribe_messages(self, r):
p = r.pubsub()
checks = (
(p.subscribe, 'foo'),
(p.unsubscribe, 'foo'),
(p.psubscribe, 'f*'),
(p.punsubscribe, 'f*'),
)
assert p.subscribed is False
for func, channel in checks:
assert func(channel) is None
assert p.subscribed is True
message = wait_for_message(p, ignore_subscribe_messages=True)
assert message is None
assert p.subscribed is False
class TestPubSubMessages(object):
def setup_method(self, method):
self.message = None
def message_handler(self, message):
self.message = message
def test_published_message_to_channel(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
p.subscribe('foo')
assert r.publish('foo', 'test message') == 1
message = wait_for_message(p)
assert isinstance(message, dict)
assert message == make_message('message', 'foo', 'test message')
def test_published_message_to_pattern(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
p.subscribe('foo')
p.psubscribe('f*')
# 1 to pattern, 1 to channel
assert r.publish('foo', 'test message') == 2
message1 = wait_for_message(p)
message2 = wait_for_message(p)
assert isinstance(message1, dict)
assert isinstance(message2, dict)
expected = [
make_message('message', 'foo', 'test message'),
make_message('pmessage', 'foo', 'test message', pattern='f*')
]
assert message1 in expected
assert message2 in expected
assert message1 != message2
def test_channel_message_handler(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
p.subscribe(foo=self.message_handler)
assert r.publish('foo', 'test message') == 1
assert wait_for_message(p) is None
assert self.message == make_message('message', 'foo', 'test message')
def test_pattern_message_handler(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
p.psubscribe(**{'f*': self.message_handler})
assert r.publish('foo', 'test message') == 1
assert wait_for_message(p) is None
assert self.message == make_message('pmessage', 'foo', 'test message',
pattern='f*')
def test_unicode_channel_message_handler(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
channel = u('uni') + unichr(4456) + u('code')
channels = {channel: self.message_handler}
p.subscribe(**channels)
assert r.publish(channel, 'test message') == 1
assert wait_for_message(p) is None
assert self.message == make_message('message', channel, 'test message')
def test_unicode_pattern_message_handler(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
pattern = u('uni') + unichr(4456) + u('*')
channel = u('uni') + unichr(4456) + u('code')
p.psubscribe(**{pattern: self.message_handler})
assert r.publish(channel, 'test message') == 1
assert wait_for_message(p) is None
assert self.message == make_message('pmessage', channel,
'test message', pattern=pattern)
class TestPubSubAutoDecoding(object):
"These tests only validate that we get unicode values back"
channel = u('uni') + unichr(4456) + u('code')
pattern = u('uni') + unichr(4456) + u('*')
data = u('abc') + unichr(4458) + u('123')
def make_message(self, type, channel, data, pattern=None):
return {
'type': type,
'channel': channel,
'pattern': pattern,
'data': data
}
def setup_method(self, method):
self.message = None
def message_handler(self, message):
self.message = message
@pytest.fixture()
def r(self, request):
return _redis_client(request=request, decode_responses=True)
def test_channel_subscribe_unsubscribe(self, r):
p = r.pubsub()
p.subscribe(self.channel)
assert wait_for_message(p) == self.make_message('subscribe',
self.channel, 1)
p.unsubscribe(self.channel)
assert wait_for_message(p) == self.make_message('unsubscribe',
self.channel, 0)
def test_pattern_subscribe_unsubscribe(self, r):
p = r.pubsub()
p.psubscribe(self.pattern)
assert wait_for_message(p) == self.make_message('psubscribe',
self.pattern, 1)
p.punsubscribe(self.pattern)
assert wait_for_message(p) == self.make_message('punsubscribe',
self.pattern, 0)
def test_channel_publish(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
p.subscribe(self.channel)
r.publish(self.channel, self.data)
assert wait_for_message(p) == self.make_message('message',
self.channel,
self.data)
def test_pattern_publish(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
p.psubscribe(self.pattern)
r.publish(self.channel, self.data)
assert wait_for_message(p) == self.make_message('pmessage',
self.channel,
self.data,
pattern=self.pattern)
def test_channel_message_handler(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
p.subscribe(**{self.channel: self.message_handler})
r.publish(self.channel, self.data)
assert wait_for_message(p) is None
assert self.message == self.make_message('message', self.channel,
self.data)
# test that we reconnected to the correct channel
p.connection.disconnect()
assert wait_for_message(p) is None # should reconnect
new_data = self.data + u('new data')
r.publish(self.channel, new_data)
assert wait_for_message(p) is None
assert self.message == self.make_message('message', self.channel,
new_data)
def test_pattern_message_handler(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
p.psubscribe(**{self.pattern: self.message_handler})
r.publish(self.channel, self.data)
assert wait_for_message(p) is None
assert self.message == self.make_message('pmessage', self.channel,
self.data,
pattern=self.pattern)
# test that we reconnected to the correct pattern
p.connection.disconnect()
assert wait_for_message(p) is None # should reconnect
new_data = self.data + u('new data')
r.publish(self.channel, new_data)
assert wait_for_message(p) is None
assert self.message == self.make_message('pmessage', self.channel,
new_data,
pattern=self.pattern)
class TestPubSubRedisDown(object):
def test_channel_subscribe(self, r):
r = redis.Redis(host='localhost', port=6390)
p = r.pubsub()
with pytest.raises(ConnectionError):
p.subscribe('foo')
|
phihag/adhocracy
|
refs/heads/develop
|
src/adhocracy/lib/sentry.py
|
4
|
from raven import Client
from raven.conf import setup_logging
from raven.handlers.logging import SentryHandler
from raven.middleware import Sentry
from adhocracy import config as aconfig
from adhocracy.lib import version
class SentryMiddleware(Sentry):
"""
As raven.middleware.Sentry doesn't really do what we need, we build our
own. It merely extends Sentry in order to reuse the get_http_context
method.
"""
def __init__(self, app, config):
self.app = app
dsn = aconfig.get('adhocracy.sentry.dsn', config=config)
if not dsn:
raise Exception(
'Sentry misconfigured. Please add adhocracy.sentry.dsn '
'to your adhocracy config.')
self.client = Client(dsn)
handler = SentryHandler(
self.client, level=aconfig.get('adhocracy.sentry.loglevel'))
setup_logging(handler)
def __call__(self, environ, start_response):
self.client.tags_context({'version': version.get_version()})
self.client.http_context(self.get_http_context(environ))
return self.app(environ, start_response)
|
suutari/shoop
|
refs/heads/master
|
shuup/admin/modules/service_providers/views/_delete.py
|
1
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from django.core.urlresolvers import reverse_lazy
from django.views.generic import DeleteView
from shuup.core.models import ServiceProvider
class ServiceProviderDeleteView(DeleteView):
model = ServiceProvider
success_url = reverse_lazy("shuup_admin:service_provider.list")
|
matrixise/odoo
|
refs/heads/8.0
|
addons/account/account_bank.py
|
258
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tools.translate import _
from openerp.osv import fields, osv
class bank(osv.osv):
_inherit = "res.partner.bank"
_columns = {
'journal_id': fields.many2one('account.journal', 'Account Journal', help="This journal will be created automatically for this bank account when you save the record"),
'currency_id': fields.related('journal_id', 'currency', type="many2one", relation='res.currency', readonly=True,
string="Currency", help="Currency of the related account journal."),
}
def create(self, cr, uid, data, context=None):
result = super(bank, self).create(cr, uid, data, context=context)
self.post_write(cr, uid, [result], context=context)
return result
def write(self, cr, uid, ids, data, context=None):
result = super(bank, self).write(cr, uid, ids, data, context=context)
self.post_write(cr, uid, ids, context=context)
return result
def _prepare_name(self, bank):
"Return the name to use when creating a bank journal"
return (bank.bank_name or '') + ' ' + (bank.acc_number or '')
def _prepare_name_get(self, cr, uid, bank_dicts, context=None):
"""Add ability to have %(currency_name)s in the format_layout of res.partner.bank.type"""
currency_ids = list(set(data['currency_id'][0] for data in bank_dicts if data.get('currency_id')))
currencies = self.pool.get('res.currency').browse(cr, uid, currency_ids, context=context)
currency_name = dict((currency.id, currency.name) for currency in currencies)
for data in bank_dicts:
data['currency_name'] = data.get('currency_id') and currency_name[data['currency_id'][0]] or ''
return super(bank, self)._prepare_name_get(cr, uid, bank_dicts, context=context)
def post_write(self, cr, uid, ids, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
obj_acc = self.pool.get('account.account')
obj_data = self.pool.get('ir.model.data')
for bank in self.browse(cr, uid, ids, context):
if bank.company_id and not bank.journal_id:
# Find the code and parent of the bank account to create
dig = 6
current_num = 1
ids = obj_acc.search(cr, uid, [('type','=','liquidity'), ('company_id', '=', bank.company_id.id), ('parent_id', '!=', False)], context=context)
# No liquidity account exists, no template available
if not ids: continue
ref_acc_bank = obj_acc.browse(cr, uid, ids[0], context=context).parent_id
while True:
new_code = str(ref_acc_bank.code.ljust(dig-len(str(current_num)), '0')) + str(current_num)
ids = obj_acc.search(cr, uid, [('code', '=', new_code), ('company_id', '=', bank.company_id.id)])
if not ids:
break
current_num += 1
name = self._prepare_name(bank)
acc = {
'name': name,
'code': new_code,
'type': 'liquidity',
'user_type': ref_acc_bank.user_type.id,
'reconcile': False,
'parent_id': ref_acc_bank.id,
'company_id': bank.company_id.id,
}
acc_bank_id = obj_acc.create(cr,uid,acc,context=context)
jour_obj = self.pool.get('account.journal')
new_code = 1
while True:
code = _('BNK')+str(new_code)
ids = jour_obj.search(cr, uid, [('code','=',code)], context=context)
if not ids:
break
new_code += 1
#create the bank journal
vals_journal = {
'name': name,
'code': code,
'type': 'bank',
'company_id': bank.company_id.id,
'analytic_journal_id': False,
'default_credit_account_id': acc_bank_id,
'default_debit_account_id': acc_bank_id,
}
journal_id = jour_obj.create(cr, uid, vals_journal, context=context)
self.write(cr, uid, [bank.id], {'journal_id': journal_id}, context=context)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
jaxkodex/odoo
|
refs/heads/8.0
|
addons/account_analytic_default/account_analytic_default.py
|
256
|
# -*- coding: utf-8 -*-
###############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_analytic_default(osv.osv):
_name = "account.analytic.default"
_description = "Analytic Distribution"
_rec_name = "analytic_id"
_order = "sequence"
_columns = {
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of analytic distribution"),
'analytic_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'product_id': fields.many2one('product.product', 'Product', ondelete='cascade', help="Select a product which will use analytic account specified in analytic default (e.g. create new customer invoice or Sales order if we select this product, it will automatically take this as an analytic account)"),
'partner_id': fields.many2one('res.partner', 'Partner', ondelete='cascade', help="Select a partner which will use analytic account specified in analytic default (e.g. create new customer invoice or Sales order if we select this partner, it will automatically take this as an analytic account)"),
'user_id': fields.many2one('res.users', 'User', ondelete='cascade', help="Select a user which will use analytic account specified in analytic default."),
'company_id': fields.many2one('res.company', 'Company', ondelete='cascade', help="Select a company which will use analytic account specified in analytic default (e.g. create new customer invoice or Sales order if we select this company, it will automatically take this as an analytic account)"),
'date_start': fields.date('Start Date', help="Default start date for this Analytic Account."),
'date_stop': fields.date('End Date', help="Default end date for this Analytic Account."),
}
def account_get(self, cr, uid, product_id=None, partner_id=None, user_id=None, date=None, company_id=None, context=None):
domain = []
if product_id:
domain += ['|', ('product_id', '=', product_id)]
domain += [('product_id','=', False)]
if partner_id:
domain += ['|', ('partner_id', '=', partner_id)]
domain += [('partner_id', '=', False)]
if company_id:
domain += ['|', ('company_id', '=', company_id)]
domain += [('company_id', '=', False)]
if user_id:
domain += ['|',('user_id', '=', user_id)]
domain += [('user_id','=', False)]
if date:
domain += ['|', ('date_start', '<=', date), ('date_start', '=', False)]
domain += ['|', ('date_stop', '>=', date), ('date_stop', '=', False)]
best_index = -1
res = False
for rec in self.browse(cr, uid, self.search(cr, uid, domain, context=context), context=context):
index = 0
if rec.product_id: index += 1
if rec.partner_id: index += 1
if rec.company_id: index += 1
if rec.user_id: index += 1
if rec.date_start: index += 1
if rec.date_stop: index += 1
if index > best_index:
res = rec
best_index = index
return res
class account_invoice_line(osv.osv):
_inherit = "account.invoice.line"
_description = "Invoice Line"
def product_id_change(self, cr, uid, ids, product, uom_id, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, company_id=None, context=None):
res_prod = super(account_invoice_line, self).product_id_change(cr, uid, ids, product, uom_id, qty, name, type, partner_id, fposition_id, price_unit, currency_id=currency_id, company_id=company_id, context=context)
rec = self.pool.get('account.analytic.default').account_get(cr, uid, product, partner_id, uid, time.strftime('%Y-%m-%d'), company_id=company_id, context=context)
if rec:
res_prod['value'].update({'account_analytic_id': rec.analytic_id.id})
else:
res_prod['value'].update({'account_analytic_id': False})
return res_prod
class stock_picking(osv.osv):
_inherit = "stock.picking"
def _get_account_analytic_invoice(self, cursor, user, picking, move_line):
partner_id = picking.partner_id and picking.partner_id.id or False
rec = self.pool.get('account.analytic.default').account_get(cursor, user, move_line.product_id.id, partner_id, user, time.strftime('%Y-%m-%d'))
if rec:
return rec.analytic_id.id
return super(stock_picking, self)._get_account_analytic_invoice(cursor, user, picking, move_line)
class sale_order_line(osv.osv):
_inherit = "sale.order.line"
# Method overridden to set the analytic account by default on criterion match
def invoice_line_create(self, cr, uid, ids, context=None):
create_ids = super(sale_order_line, self).invoice_line_create(cr, uid, ids, context=context)
if not ids:
return create_ids
sale_line = self.browse(cr, uid, ids[0], context=context)
inv_line_obj = self.pool.get('account.invoice.line')
anal_def_obj = self.pool.get('account.analytic.default')
for line in inv_line_obj.browse(cr, uid, create_ids, context=context):
rec = anal_def_obj.account_get(cr, uid, line.product_id.id, sale_line.order_id.partner_id.id, sale_line.order_id.user_id.id, time.strftime('%Y-%m-%d'), context=context)
if rec:
inv_line_obj.write(cr, uid, [line.id], {'account_analytic_id': rec.analytic_id.id}, context=context)
return create_ids
class product_product(osv.Model):
_inherit = 'product.product'
def _rules_count(self, cr, uid, ids, field_name, arg, context=None):
Analytic = self.pool['account.analytic.default']
return {
product_id: Analytic.search_count(cr, uid, [('product_id', '=', product_id)], context=context)
for product_id in ids
}
_columns = {
'rules_count': fields.function(_rules_count, string='# Analytic Rules', type='integer'),
}
class product_template(osv.Model):
_inherit = 'product.template'
def _rules_count(self, cr, uid, ids, field_name, arg, context=None):
Analytic = self.pool['account.analytic.default']
res = {}
for product_tmpl_id in self.browse(cr, uid, ids, context=context):
res[product_tmpl_id.id] = sum([p.rules_count for p in product_tmpl_id.product_variant_ids])
return res
_columns = {
'rules_count': fields.function(_rules_count, string='# Analytic Rules', type='integer'),
}
def action_view_rules(self, cr, uid, ids, context=None):
products = self._get_products(cr, uid, ids, context=context)
result = self._get_act_window_dict(cr, uid, 'account_analytic_default.action_product_default_list', context=context)
result['domain'] = "[('product_id','in',[" + ','.join(map(str, products)) + "])]"
# Remove context so it is not going to filter on product_id with active_id of template
result['context'] = "{}"
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
SpheMakh/Stimela
|
refs/heads/master
|
stimela/dismissable.py
|
1
|
# -*- coding: future_fstrings -*-
class dismissable:
'''
Wrapper for optional parameters to stimela
Initialize with val == None to force stimela to skip
parsing parameter.
'''
def __init__(self, val=None):
self.__val = val
def __call__(self):
return self.__val
|
last-g/qoala
|
refs/heads/master
|
lib/qserver/quest/__init__.py
|
2
|
#!/usr/bin/env python2
from .quest import QuestDescriptor
from .xmlquest import XMLQuestProvider
from .scriptquest import ScriptQuestProvider
|
bklang/GO2
|
refs/heads/master
|
babel/messages/plurals.py
|
151
|
# -*- coding: utf-8 -*-
"""
babel.messages.plurals
~~~~~~~~~~~~~~~~~~~~~~
Plural form definitions.
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
from babel.core import default_locale, Locale
from operator import itemgetter
# XXX: remove this file, duplication with babel.plural
LC_CTYPE = default_locale('LC_CTYPE')
PLURALS = {
# Afar
# 'aa': (),
# Abkhazian
# 'ab': (),
# Avestan
# 'ae': (),
# Afrikaans - From Pootle's PO's
'af': (2, '(n != 1)'),
# Akan
# 'ak': (),
# Amharic
# 'am': (),
# Aragonese
# 'an': (),
# Arabic - From Pootle's PO's
'ar': (6, '(n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : n>=3 && n<=10 ? 3 : n>=11 && n<=99 ? 4 : 5)'),
# Assamese
# 'as': (),
# Avaric
# 'av': (),
# Aymara
# 'ay': (),
# Azerbaijani
# 'az': (),
# Bashkir
# 'ba': (),
# Belarusian
# 'be': (),
# Bulgarian - From Pootle's PO's
'bg': (2, '(n != 1)'),
# Bihari
# 'bh': (),
# Bislama
# 'bi': (),
# Bambara
# 'bm': (),
# Bengali - From Pootle's PO's
'bn': (2, '(n != 1)'),
# Tibetan - as discussed in private with Andrew West
'bo': (1, '0'),
# Breton
# 'br': (),
# Bosnian
# 'bs': (),
# Catalan - From Pootle's PO's
'ca': (2, '(n != 1)'),
# Chechen
# 'ce': (),
# Chamorro
# 'ch': (),
# Corsican
# 'co': (),
# Cree
# 'cr': (),
# Czech
'cs': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
# Church Slavic
# 'cu': (),
# Chuvash
'cv': (1, '0'),
# Welsh
'cy': (5, '(n==1 ? 1 : n==2 ? 2 : n==3 ? 3 : n==6 ? 4 : 0)'),
# Danish
'da': (2, '(n != 1)'),
# German
'de': (2, '(n != 1)'),
# Divehi
# 'dv': (),
# Dzongkha
'dz': (1, '0'),
# Greek
'el': (2, '(n != 1)'),
# English
'en': (2, '(n != 1)'),
# Esperanto
'eo': (2, '(n != 1)'),
# Spanish
'es': (2, '(n != 1)'),
# Estonian
'et': (2, '(n != 1)'),
# Basque - From Pootle's PO's
'eu': (2, '(n != 1)'),
# Persian - From Pootle's PO's
'fa': (1, '0'),
# Finnish
'fi': (2, '(n != 1)'),
# French
'fr': (2, '(n > 1)'),
# Friulian - From Pootle's PO's
'fur': (2, '(n > 1)'),
# Irish
'ga': (3, '(n==1 ? 0 : n==2 ? 1 : 2)'),
# Galician - From Pootle's PO's
'gl': (2, '(n != 1)'),
# Hausa - From Pootle's PO's
'ha': (2, '(n != 1)'),
# Hebrew
'he': (2, '(n != 1)'),
# Hindi - From Pootle's PO's
'hi': (2, '(n != 1)'),
# Croatian
'hr': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
# Hungarian
'hu': (1, '0'),
# Armenian - From Pootle's PO's
'hy': (1, '0'),
# Icelandic - From Pootle's PO's
'is': (2, '(n != 1)'),
# Italian
'it': (2, '(n != 1)'),
# Japanese
'ja': (1, '0'),
# Georgian - From Pootle's PO's
'ka': (1, '0'),
# Kongo - From Pootle's PO's
'kg': (2, '(n != 1)'),
# Khmer - From Pootle's PO's
'km': (1, '0'),
# Korean
'ko': (1, '0'),
# Kurdish - From Pootle's PO's
'ku': (2, '(n != 1)'),
# Lao - Another member of the Tai language family, like Thai.
'lo': (1, '0'),
# Lithuanian
'lt': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2)'),
# Latvian
'lv': (3, '(n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2)'),
# Maltese - From Pootle's PO's
'mt': (4, '(n==1 ? 0 : n==0 || ( n%100>1 && n%100<11) ? 1 : (n%100>10 && n%100<20 ) ? 2 : 3)'),
# Norwegian Bokmål
'nb': (2, '(n != 1)'),
# Dutch
'nl': (2, '(n != 1)'),
# Norwegian Nynorsk
'nn': (2, '(n != 1)'),
# Norwegian
'no': (2, '(n != 1)'),
# Punjabi - From Pootle's PO's
'pa': (2, '(n != 1)'),
# Polish
'pl': (3, '(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
# Portuguese
'pt': (2, '(n != 1)'),
# Brazilian
'pt_BR': (2, '(n > 1)'),
# Romanian - From Pootle's PO's
'ro': (3, '(n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 1 : 2)'),
# Russian
'ru': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
# Slovak
'sk': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
# Slovenian
'sl': (4, '(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3)'),
# Serbian - From Pootle's PO's
'sr': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
# Southern Sotho - From Pootle's PO's
'st': (2, '(n != 1)'),
# Swedish
'sv': (2, '(n != 1)'),
# Thai
'th': (1, '0'),
# Turkish
'tr': (1, '0'),
# Ukrainian
'uk': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
# Venda - From Pootle's PO's
've': (2, '(n != 1)'),
# Vietnamese - From Pootle's PO's
'vi': (1, '0'),
# Xhosa - From Pootle's PO's
'xh': (2, '(n != 1)'),
# Chinese - From Pootle's PO's
'zh_CN': (1, '0'),
'zh_HK': (1, '0'),
'zh_TW': (1, '0'),
}
DEFAULT_PLURAL = (2, '(n != 1)')
class _PluralTuple(tuple):
"""A tuple with plural information."""
__slots__ = ()
num_plurals = property(itemgetter(0), doc="""
The number of plurals used by the locale.""")
plural_expr = property(itemgetter(1), doc="""
The plural expression used by the locale.""")
plural_forms = property(lambda x: 'npurals=%s; plural=%s' % x, doc="""
The plural expression used by the catalog or locale.""")
def __str__(self):
return self.plural_forms
def get_plural(locale=LC_CTYPE):
"""A tuple with the information catalogs need to perform proper
pluralization. The first item of the tuple is the number of plural
forms, the second the plural expression.
>>> get_plural(locale='en')
(2, '(n != 1)')
>>> get_plural(locale='ga')
(3, '(n==1 ? 0 : n==2 ? 1 : 2)')
The object returned is a special tuple with additional members:
>>> tup = get_plural("ja")
>>> tup.num_plurals
1
>>> tup.plural_expr
'0'
>>> tup.plural_forms
'npurals=1; plural=0'
Converting the tuple into a string prints the plural forms for a
gettext catalog:
>>> str(tup)
'npurals=1; plural=0'
"""
locale = Locale.parse(locale)
try:
tup = PLURALS[str(locale)]
except KeyError:
try:
tup = PLURALS[locale.language]
except KeyError:
tup = DEFAULT_PLURAL
return _PluralTuple(tup)
|
toshywoshy/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/onyx/onyx_aaa.py
|
8
|
#!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: onyx_aaa
version_added: "2.10"
author: "Sara Touqan (@sarato)"
short_description: Configures AAA parameters
description:
- This module provides declarative management of AAA protocol params
on Mellanox ONYX network devices.
options:
tacacs_accounting_enabled:
description:
- Configures accounting settings.
type: bool
auth_default_user:
description:
- Sets local user default mapping.
type: str
choices: ['admin', 'monitor']
auth_order:
description:
- Sets the order on how to handle remote to local user mappings.
type: str
choices: ['local-only', 'remote-first', 'remote-only']
auth_fallback_enabled:
description:
- Enables/Disables fallback server-err option.
type: bool
"""
EXAMPLES = """
- name: configures aaa
onyx_aaa:
tacacs_accounting_enabled: yes
auth_default_user: monitor
auth_order: local-only
auth_fallback_enabled: false
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device.
returned: always
type: list
sample:
- aaa accounting changes default stop-only tacacs+
- no aaa accounting changes default stop-only tacacs+
- aaa authorization map default-user <user>
- aaa authorization map order <order>
- aaa authorization map fallback server-err
- no aaa authorization map fallback server-err
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.onyx.onyx import show_cmd
from ansible.module_utils.network.onyx.onyx import BaseOnyxModule
class OnyxAAAModule(BaseOnyxModule):
def init_module(self):
""" initialize module
"""
element_spec = dict(
tacacs_accounting_enabled=dict(type='bool'),
auth_default_user=dict(type='str', choices=['admin', 'monitor']),
auth_order=dict(type='str', choices=['local-only', 'remote-first', 'remote-only']),
auth_fallback_enabled=dict(type='bool')
)
argument_spec = dict()
argument_spec.update(element_spec)
self._module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
def get_required_config(self):
module_params = self._module.params
self._required_config = dict(module_params)
self.validate_param_values(self._required_config)
def _set_aaa_config(self, all_aaa_config):
aaa_config = all_aaa_config[0]
self._current_config['auth_default_user'] = aaa_config.get("Default User")
self._current_config['auth_order'] = aaa_config.get("Map Order")
auth_fallback_enabled = aaa_config.get("Fallback on server-err")
if auth_fallback_enabled == "yes":
self._current_config['auth_fallback_enabled'] = True
else:
self._current_config['auth_fallback_enabled'] = False
aaa_config_2 = all_aaa_config[2]
accounting_message = aaa_config_2.get("message")
if accounting_message == "No accounting methods configured.":
self._current_config['tacacs_accounting_enabled'] = False
else:
self._current_config['tacacs_accounting_enabled'] = True
def _show_aaa_config(self):
cmd = "show aaa"
return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False)
def load_current_config(self):
self._current_config = dict()
aaa_config = self._show_aaa_config()
if aaa_config:
self._set_aaa_config(aaa_config)
def generate_commands(self):
tacacs_accounting_enabled = self._required_config.get("tacacs_accounting_enabled")
if tacacs_accounting_enabled is not None:
current_accounting_enabled = self._current_config.get("tacacs_accounting_enabled")
if current_accounting_enabled != tacacs_accounting_enabled:
if tacacs_accounting_enabled is True:
self._commands.append('aaa accounting changes default stop-only tacacs+')
else:
self._commands.append('no aaa accounting changes default stop-only tacacs+')
auth_default_user = self._required_config.get("auth_default_user")
if auth_default_user is not None:
current_user = self._current_config.get("auth_default_user")
if current_user != auth_default_user:
self._commands.append('aaa authorization map default-user {0}' .format(auth_default_user))
auth_order = self._required_config.get("auth_order")
if auth_order is not None:
current_order = self._current_config.get("auth_order")
if current_order != auth_order:
self._commands.append('aaa authorization map order {0}' .format(auth_order))
auth_fallback_enabled = self._required_config.get("auth_fallback_enabled")
if auth_fallback_enabled is not None:
current_fallback = self._current_config.get("auth_fallback_enabled")
if current_fallback != auth_fallback_enabled:
if auth_fallback_enabled is True:
self._commands.append('aaa authorization map fallback server-err')
else:
self._commands.append('no aaa authorization map fallback server-err')
def main():
""" main entry point for module execution
"""
OnyxAAAModule.main()
if __name__ == '__main__':
main()
|
MechanisM/ajenti
|
refs/heads/master
|
plugins/nginx/main_single.py
|
17
|
from ajenti.api import *
from ajenti.com import *
from ajenti.utils import *
from ajenti import apis
import os
import re
import glob
import sys
class NginxSingleConfigBackend(Plugin):
implements(IConfigurable)
platform = ['freebsd', 'nginx', 'arch']
config_file = ''
name = 'nginx'
id = 'nginx'
def __init__(self):
self.config_file = self.app.get_config(self).cfg_file
if not os.path.exists(self.config_file):
raise ConfigurationError('Can\'t find config file')
def list_files(self):
return [self.config_file]
def read(self):
return ConfManager.get().load('nginx', self.config_file)
def save(self, t):
ConfManager.get().save('nginx', self.config_file, t)
ConfManager.get().commit('nginx')
def get_hosts(self):
res = {}
text = self.read()
pat = re.compile('server\s*\{\s*', re.S)
last = 0
for m in pat.finditer(text):
item = apis.webserver.VirtualHost()
t = m.start()
if t <= last:
continue
while(text[t] != '\n' and t > 0 and text[t] != '#'):
t = t - 1
enabled = text[t] != '#'
item.start = t
beg = m.start()
open = 1;
if enabled:
pat_brack = re.compile('[\{\}]', re.S)
for bracket in pat_brack.finditer(text, m.end() + 1):
if bracket.group() == '{':
open = open + 1
else:
open = open - 1
last = bracket.start()
if open == 0:
break
if open != 0:
continue
item.end = last + 2
item.config = text[beg:last + 1]
else:
pat_brack = re.compile('\s*#([\{\}])', re.S)
_last = 0;
for bracket in pat_brack.finditer(text, m.end() + 1):
print bracket.group(1)
if bracket.group(1) == '{':
open = open + 1
else:
open = open - 1
_last = bracket.end()
if open == 0:
break
if open != 0:
continue;
config = text[m.end():_last - 1]
lines = config.split('\n');
bad = False
for line in lines:
line = line.strip()
if(line != '' and line[0] != '#'):
bad = True
break
if bad:
continue
config = text[beg:_last]
last = item.end = _last + 1
item.config = re.sub('\ *#\s*', '', config)
pat_name = re.compile('listen\s*(.+?);', re.S)
name = pat_name.search(item.config)
pat_name = re.compile('server_name\s*(.+?);', re.S)
servername = pat_name.search(item.config)
if(not name or not servername):
continue
item.name = name.group(1) + " " + servername.group(1)
item.enabled = enabled
res[item.name] = item
return res
def delete_host(self, id):
text = self.read()
try:
host = self.get_hosts()[id]
except KeyError:
return
text = text[:host.start] + text[host.end:]
self.save(text)
def save_host(self, host):
text = self.read()
try:
oldhost = self.get_hosts()[host.name]
text = text[:oldhost.start] + "\n" + host.config + text[oldhost.end:]
except KeyError:
text = text + "\n" + host.config
self.save(text)
def disable_host(self, id):
text = self.read()
try:
host = self.get_hosts()[id]
except KeyError:
return
if not host.enabled:
return
config = text[host.start:host.end].replace('\n', '\n#')
text = text[:host.start] + config[:-1] + text[host.end:]
self.save(text)
def enable_host(self, id):
text = self.read()
try:
host = self.get_hosts()[id]
except KeyError:
return
if host.enabled:
return
config = text[host.start:host.end].replace('\n#', '\n')
text = text[:host.start] + config[1:] + text[host.end:]
self.save(text)
host_template = """
server {
listen 80;
server_name %s;
access_log /var/log/nginx/localhost.access_log main;
error_log /var/log/nginx/localhost.error_log info;
root /var/www/localhost/htdocs;
}
"""
class NginxSCPPlugin(apis.webserver.WebserverPlugin):
platform = ['freebsd', 'arch', 'gentoo', 'centos', 'mandriva']
text = 'nginx'
icon = '/dl/nginx/icon.png'
folder = 'servers'
ws_service = 'nginx'
ws_name = 'nginx'
ws_icon = '/dl/nginx/icon.png'
ws_title = 'nginx'
ws_backend = NginxSingleConfigBackend
|
vaginessa/yowlayer-store
|
refs/heads/master
|
yowsup_ext/layers/store/models/conversation.py
|
1
|
from yowsup_ext.layers.store import db
import peewee
import datetime
from contact import Contact
from group import Group
from broadcast import Broadcast
TYPE_CONTACT = "contact"
TYPE_GROUP = "group"
TYPE_BROADCAST = "broadcast"
class Conversation(db.get_base_model()):
contact = peewee.ForeignKeyField(Contact, null=True)
group = peewee.ForeignKeyField(Group, null = True)
broadcast = peewee.ForeignKeyField(Broadcast, null=True)
created = peewee.DateTimeField(default=datetime.datetime.now())
def getType(self):
if self.contact:
return TYPE_CONTACT
if self.group:
return TYPE_GROUP
if self.broadcast:
return TYPE_BROADCAST
def toDict(self):
return {
"contact": self.contact.toDict() if self.contact else None,
"group": self.group.toDict() if self.group else None,
"broadcast": self.broadcast.toDict() if self.broadcast else None,
"type": self.getType(),
"created": self.created
}
|
XiaosongWei/crosswalk-test-suite
|
refs/heads/master
|
webapi/tct-csp-w3c-tests/csp-py/csp_default-src_self_script.py
|
25
|
def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
response.headers.set(
"Content-Security-Policy",
"default-src 'self' 'unsafe-inline'")
response.headers.set(
"X-Content-Security-Policy",
"default-src 'self' 'unsafe-inline'")
response.headers.set("X-WebKit-CSP", "default-src 'self' 'unsafe-inline'")
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <yunfeix.hao@intel.com>
-->
<html>
<head>
<title>CSP Test: csp_default-src_self_script</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#default-src"/>
<meta name="flags" content=""/>
<meta name="assert" content="default-src 'self' 'unsafe-inline'"/>
<meta charset="utf-8"/>
<script src="../resources/testharness.js"></script>
<script src="../resources/testharnessreport.js"></script>
</head>
<body>
<div id="log"></div>
<script src="support/csp.js"></script>
<script src='""" + url1 + """/tests/csp/support/test.js'></script>
<script>
test(function() {
assert_true(typeof X == "number", "attribute defined internal");
}, document.title + "_allowed");
test(function() {
assert_true(typeof getVideoURI != "function", "Function getVideoURI is defined");
}, document.title + "_blocked");
</script>
</body>
</html> """
|
pieleric/dacontrol-a3
|
refs/heads/master
|
andorcam3.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 6 Mar 2012
@author: Éric Piel
Copyright © 2012 Éric Piel, Delmic
This file is part of Delmic Acquisition Software.
Delmic Acquisition Software is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version.
Delmic Acquisition Software is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with Delmic Acquisition Software. If not, see http://www.gnu.org/licenses/.
'''
from ctypes import *
import numpy
import os
import threading
import time
# Neo encodings (selectable depending on gain selection):
#0 Mono12
#1 Mono12Packed
#2 Mono16
#3 RGB8Packed
#4 Mono12Coded
#5 Mono12CodedPacked
#6 Mono22Parallel
#7 Mono22PackedParallel
#8 Mono8 -> error code 19
#9 Mono32
class ATError(Exception):
pass
class ATDLL(CDLL):
"""
Subclass of CDLL specific to atcore library, which handles error codes for
all the functions automatically.
It works by setting a default _FuncPtr.errcheck.
"""
# various defines from atcore.h
HANDLE_SYSTEM = 1
INFINITE = 0xFFFFFFFF # "infinite" time
@staticmethod
def at_errcheck(result, func, args):
"""
Analyse the return value of a call and raise an exception in case of
error.
Follows the ctypes.errcheck callback convention
"""
if result != 0:
if result in ATDLL.err_code:
raise ATError("Call to %s failed with error code %d: %s" %
(str(func.__name__), result, ATDLL.err_code[result]))
else:
raise ATError("Call to %s failed with unknown error code %d" %
(str(func.__name__), result))
return result
def __getitem__(self, name):
func = CDLL.__getitem__(self, name)
func.__name__ = name
func.errcheck = self.at_errcheck
return func
err_code = {
1: """AT_ERR_NONINITIALISED
Function called with an uninitialised handle""",
2: """AT_ERR_NOTIMPLEMENTED
Feature has not been implemented for the chosen camera""",
3: """AT_ERR_READONLY
Feature is read only""",
4: """AT_ERR_NOTREADABLE
Feature is currently not readable""",
5: """AT_ERR_NOTWRITABLE
Feature is currently not writable""",
6: """AT_ERR_OUTOFRANGE
Value is outside the maximum and minimum limits""",
7: """AT_ERR_INDEXNOTAVAILABLE
Index is currently not available""",
8: """AT_ERR_INDEXNOTIMPLEMENTED
Index is not implemented for the chosen camera""",
9: """AT_ERR_#EXCEEDEDMAXSTRINGLENGTH
String value provided exceeds the maximum allowed length""",
10: """AT_ERR_CONNECTION
Error connecting to or disconnecting from hardware""",
11: """AT_ERR_NODATA""",
12: """AT_ERR_INVALIDHANDLE""",
13: """AT_ERR_TIMEDOUT
The AT_WaitBuffer function timed out while waiting for data arrive in output
queue""",
14: """AT_ERR_BUFFERFULL
The input queue has reached its capacity""",
15: """AT_ERR_INVALIDSIZE
The size of a queued buffer did not match the frame size""",
16: """AT_ERR_INVALIDALIGNMENT
A queued buffer was not aligned on an 8-byte boundary""",
17: """AT_ERR_COMM
An error has occurred while communicating with hardware""",
18: """AT_ERR_STRINGNOTAVAILABLE
Index / String is not available""",
19: """AT_ERR_STRINGNOTIMPLEMENTED
Index / String is not implemented for the chosen camera""",
20: """AT_ERR_NULL_FEATURE""",
21: """AT_ERR_NULL_HANDLE
Null device handle passed to function""",
# All kind of null pointer passed
38: """AT_ERR_DEVICEINUSE
Function failed to connect to a device because it is already being used""",
100: """AT_ERR_HARDWARE_OVERFLOW
The software was not able to retrieve data from the card or camera fast enough
to avoid the internal hardware buffer bursting.""",
}
class AndorCam3(object):
"""
Represents one Andor camera and provides all the basic interfaces typical of
a CCD/CMOS camera.
This implementation is for the Andor SDK v3.
It offers mostly two main high level methods: acquire() and acquireFlow(),
which respectively offer the possibility to get one and several images from
the camera.
It also provide low-level methods corresponding to the SDK functions.
"""
def __init__(self, device=None):
"""
Initialises the device
device (None or int): number of the device to open, as defined by Andor, cd scan()
if None, uses the system handle, which allows very limited access to some information
Raise an exception if the device cannot be opened.
"""
if os.name == "nt":
# That's not gonna fly... need to put this into ATDLL
self.atcore = windll.LoadLibrary('libatcore.dll') # TODO check it works
else:
# Global so that its sub-libraries can access it
self.atcore = ATDLL("libatcore.so", RTLD_GLOBAL) # libatcore.so.3
self.InitialiseLibrary()
self.handle = self.Open(device)
if device is None:
# nothing else to initialise
return
# Maximum cooling for lowest (image) noise
self.setTargetTemperature(-100) # very low (automatically adjusted)
self.setFanSpeed(1.0)
self.is_acquiring = False
self.acquire_must_stop = False
self.acquire_thread = None
# low level methods, wrapper to the actual SDK functions
# TODO: not _everything_ is implemented, just what we need
def InitialiseLibrary(self):
self.atcore.AT_InitialiseLibrary()
def FinaliseLibrary(self):
self.atcore.AT_FinaliseLibrary()
def Open(self, device):
"""
device (None or int): number of the device to open, as defined by Andor, cd scan()
if None, uses the system handle, which allows very limited access to some information
return a c_int, the handle
"""
if device is None:
return c_int(ATDLL.HANDLE_SYSTEM)
handle = c_int()
self.atcore.AT_Open(device, byref(handle))
return handle
def Close(self):
self.atcore.AT_Close(self.handle)
def Command(self, command):
self.atcore.AT_Command(self.handle, command)
def QueueBuffer(self, cbuffer):
"""
cbuffer (ctypes.array): the buffer to queue
"""
self.atcore.AT_QueueBuffer(self.handle, cbuffer, sizeof(cbuffer))
def WaitBuffer(self, timeout=None):
"""
timeout (float or None): maximum time to wait in second (None for infinite)
return (ctypes.POINTER(c_byte), int): pointer to buffer, size of buffer
"""
pbuffer = POINTER(c_byte)() # null pointer to c_bytes
buffersize = c_int()
if timeout is None:
timeout_ms = ATDLL.INFINITE
else:
timeout_ms = c_uint(int(round(timeout * 1e3))) # ms
self.atcore.AT_WaitBuffer(self.handle, byref(pbuffer),
byref(buffersize), timeout_ms)
return pbuffer, buffersize.value
def Flush(self):
self.atcore.AT_Flush(self.handle)
def GetString(self, prop):
"""
Return a unicode string corresponding to the given property
"""
len_str = c_int()
self.atcore.AT_GetStringMaxLength(self.handle, prop, byref(len_str))
string = create_unicode_buffer(len_str.value)
self.atcore.AT_GetString(self.handle, prop, string, len_str)
return string.value
def SetInt(self, prop, value):
self.atcore.AT_SetInt(self.handle, prop, c_longlong(value))
def GetInt(self, prop):
result = c_longlong()
self.atcore.AT_GetInt(self.handle, prop, byref(result))
return result.value
def GetEnumIndex(self, prop):
result = c_longlong()
self.atcore.AT_GetEnumIndex(self.handle, prop, byref(result))
return result.value
def GetIntMax(self, prop):
"""
Return the max of an integer property.
Return (2-tuple int)
"""
result = c_longlong()
self.atcore.AT_GetIntMax(self.handle, prop, byref(result))
return result.value
def GetIntRanges(self, prop):
"""
Return the (min, max) of an integer property.
Return (2-tuple int)
"""
result = (c_longlong(), c_longlong())
self.atcore.AT_GetIntMin(self.handle, prop, byref(result[0]))
self.atcore.AT_GetIntMax(self.handle, prop, byref(result[1]))
return (result[0].value, result[1].value)
def SetFloat(self, prop, value):
self.atcore.AT_SetFloat(self.handle, prop, c_double(value))
def GetFloat(self, prop):
result = c_double()
self.atcore.AT_GetFloat(self.handle, prop, byref(result))
return result.value
def GetFloatRanges(self, prop):
"""
Return the (min, max) of an float property.
Return (2-tuple int)
"""
result = (c_double(), c_double())
self.atcore.AT_GetFloatMin(self.handle, prop, byref(result[0]))
self.atcore.AT_GetFloatMax(self.handle, prop, byref(result[1]))
return (result[0].value, result[1].value)
def SetBool(self, prop, value):
if value:
int_val = c_int(1)
else:
int_val = c_int(0)
self.atcore.AT_SetBool(self.handle, prop, int_val)
def GetBool(self, prop):
result = c_int()
self.atcore.AT_GetBool(self.handle, prop, byref(result))
return (result.value != 0)
def isImplemented(self, prop):
"""
return bool
"""
implemented = c_int()
self.atcore.AT_IsImplemented(self.handle, prop, byref(implemented))
return (implemented.value != 0)
def isWritable(self, prop):
"""
return bool
"""
writable = c_int()
self.atcore.AT_IsWritable(self.handle, prop, byref(writable))
return (writable.value != 0)
def SetEnumString(self, prop, value):
"""
Set a unicode string corresponding for the given property
"""
self.atcore.AT_SetEnumString(self.handle, prop, value)
def GetEnumStringByIndex(self, prop, index):
"""
Return a unicode string corresponding to the given property and index
"""
string = create_unicode_buffer(128) # no way to know the max size
self.atcore.AT_GetEnumStringByIndex(self.handle, prop, index, string, len(string))
return string.value
def GetEnumStringAvailable(self, prop):
"""
Return in a list the strings corresponding of each possible value of an enum
"""
num_values = c_int()
self.atcore.AT_GetEnumCount(self.handle, prop, byref(num_values))
result = []
for i in range(num_values.value):
result.append(self.GetEnumStringByIndex(prop, i))
return result
# High level methods
def getSensorResolution(self):
"""
return (2-tuple int): size of the sensor (width, height) in pixel
"""
return (self.GetInt(u"SensorWidth"), self.GetInt(u"SensorHeight"))
def setTargetTemperature(self, temp):
"""
Change the targeted temperature of the CCD.
The cooler the less dark noise. Not everything is possible, but it will
try to accommodate by targeting the closest temperature possible.
temp (-400 < float < 100): temperature in C
"""
assert((-400 <= temp) and (temp <= 100))
# TODO apparently the Neo also has a "Temperature Control" which might be
# better to use
ranges = self.GetFloatRanges(u"TargetSensorTemperature")
temp = sorted(ranges + (temp,))[1]
self.SetFloat(u"TargetSensorTemperature", temp)
if temp > 20:
self.SetBool(u"SensorCooling", False)
else:
self.SetBool(u"SensorCooling", True)
# TODO: a more generic function which set up the fan to the right speed
# according to the target temperature?
def setFanSpeed(self, speed):
"""
Change the fan speed. Will accommodate to whichever speed is possible.
speed (0<=float<= 1): ratio of full speed -> 0 is slowest, 1.0 is fastest
"""
assert((0 <= speed) and (speed <= 1))
if not self.isImplemented(u"FanSpeed"):
return
# Let's assume it's linearly distributed in speed... at least it's true
# for the Neo and the SimCam. Looks like this for Neo:
# [u"Off", u"Low", u"On"]
values = self.GetEnumStringAvailable(u"FanSpeed")
val = values[int(round(speed * (len(values) - 1)))]
self.SetEnumString(u"FanSpeed", val)
@staticmethod
def find_closest(val, l):
"""
finds in a list the closest existing value from a given value
"""
return min(l, key=lambda x:abs(x - val))
def setReadoutRate(self, frequency):
"""
frequency (100*1e6, 200*1e6, 280*1e6, 550*1e6): the pixel readout rate in Hz
return (int): actual readout rate in Hz
"""
assert((0 <= frequency))
# returns strings like u"550 MHz"
rates = self.GetEnumStringAvailable(u"PixelReadoutRate")
values = (int(r.rstrip(u" MHz")) for r in rates)
closest = self.find_closest(frequency / 1e6, values)
self.SetEnumString(u"PixelReadoutRate", u"%d MHz" % closest)
return closest * 1e6
def _setBinning(self, binning):
"""
binning (int 1, 2, 3, 4, or 8): how many pixels horizontally and vertically
are combined to create "super pixels"
Note: super pixels are always square
return (tuple): metadata corresponding to the setup
"""
values = [1, 2, 3, 4, 8]
assert(binning in values)
# Nicely the API is different depending on cameras...
if self.isImplemented(u"AOIBinning"):
# Typically for the Neo
binning_str = u"%dx%d" % (binning, binning)
self.SetEnumString(u"AOIBinning", binning_str)
elif self.isImplemented(u"AOIHBin"):
if self.isWritable(u"AOIHBin"):
self.SetInt(u"AOIHBin", binning)
self.SetInt(u"AOIVBin", binning)
else:
# Typically for the simcam
act_binning = (self.GetInt(u"AOIHBin"), self.GetInt(u"AOIVBin"))
if act_binning != (binning, binning):
raise IOError("AndorCam3: Requested binning " +
str((binning, binning)) +
" does not match fixed binning " +
str(act_binning))
metadata = {}
metadata["Camera binning"] = "%dx%d" % (binning, binning)
return metadata
def getCameraMetadata(self):
"""
return the metadata corresponding to the camera in general (common to
many pictures)
return (dict : string -> string): the metadata
"""
metadata = {}
model = "Andor " + self.GetString(u"CameraModel")
metadata["Camera name"] = model
# TODO there seems to be a bug in SimCam v3.1: => check v3.3
# self.atcore.isImplemented(self.handle, u"SerialNumber") return true
# but self.atcore.GetInt(self.handle, u"SerialNumber") fail with error code 2 = AT_ERR_NOTIMPLEMENTED
try:
serial = self.GetInt(u"SerialNumber")
metadata["Camera serial"] = str(serial)
except ATError:
pass # unknown value
try:
# Doesn't work on the normal camera, need to access the "System"
system = AndorCam3()
sdk = system.GetString(u"SoftwareVersion")
except ATError:
sdk = "unknown"
try:
firmware = self.GetString(u"FirmwareVersion")
except ATError:
firmware = "unknown" # Simcam has no firmware
metadata["Camera version"] = "firmware: '%s', driver:'%s'" % (firmware, sdk)
try:
psize = (self.GetFloat(u"PixelWidth"),
self.GetFloat(u"PixelHeight"))
metadata["Captor pixel width"] = psize[0] * 1e-6 # m
metadata["Captor pixel height"] = psize[1] * 1e-6 # m
except ATError:
pass # unknown value
return metadata
def _setSize(self, size):
"""
Change the acquired image size (and position)
size (2-tuple int): Width and height of the image. It will centred
on the captor. It depends on the binning, so the same region as a size
twice smaller if the binning is 2 instead of 1. It must be a allowed
resolution.
"""
# TODO how to pass information on what is allowed?
resolution = (self.GetInt(u"SensorWidth"),
self.GetInt(u"SensorHeight"))
assert((1 <= size[0]) and (size[0] <= resolution[0]) and
(1 <= size[1]) and (size[1] <= resolution[1]))
# If the camera doesn't support Area of Interest, then it has to be the
# size of the sensor
if (not self.isImplemented(u"AOIWidth") or
not self.isWritable(u"AOIWidth")):
if size != resolution:
raise IOError("AndorCam3: Requested image size " + str(size) +
" does not match sensor resolution " + str(resolution))
return
# AOI
ranges = (self.GetIntRanges("AOIWidth"),
self.GetIntRanges("AOIHeight"))
assert((ranges[0][0] <= size[0]) and (size[0] <= ranges[0][1]) and
(ranges[1][0] <= size[1]) and (size[1] <= ranges[1][1]))
# TODO the Neo docs says "Sub images are all mid-point centered."
# So it might require specific computation for the left/top ?
# TODO check whether on Neo ranges[0][1] is 2592 or 2560, if 2592, it should be + 16
lt = ((ranges[0][1] - size[0]) / 2 + 1,
(ranges[1][1] - size[1]) / 2 + 1)
self.SetInt(u"AOIWidth", c_uint64(size[0]))
self.SetInt(u"AOILeft", c_uint64(lt[0]))
self.SetInt(u"AOIHeight", c_uint64(size[1]))
self.SetInt(u"AOITop", c_uint64(lt[1]))
def _setExposureTime(self, exp):
"""
Set the exposure time. It's automatically adapted to a working one.
exp (0<float): exposure time in seconds
return (tuple): metadata corresponding to the setup
"""
assert(0.0 < exp)
self.SetFloat(u"ExposureTime", exp)
metadata = {}
actual_exp = self.GetFloat(u"ExposureTime")
metadata["Exposure time"] = actual_exp # s
return metadata
def _setupBestQuality(self):
"""
Select parameters for the camera for the best quality
return (tuple): metadata corresponding to the setup
"""
metadata = {}
# we are not in a hurry, so we can set up to the slowest and less noise
# parameters:
# slow read out
# rolling shutter (global avoids tearing but it's unlikely to happen)
# 16 bit - Gain 1+4 (maximum)
# SpuriousNoiseFilter On (this is actually a software based method)
rate = self.setReadoutRate(100)
metadata["Pixel readout rate"] = rate # Hz
# print self.atcore.GetEnumStringAvailable(self.handle, u"ElectronicShutteringMode")
self.SetEnumString(u"ElectronicShutteringMode", u"Rolling")
#print self.atcore.GetEnumStringAvailable(self.handle, u"PreAmpGainControl")
if self.isImplemented(u"PreAmpGainControl"):
# If not, we are on a SimCam so it doesn't matter
self.SetEnumString(u"PreAmpGainControl", u"Gain 1 Gain 4 (16 bit)")
# self.SetEnumString(u"PreAmpGainSelector", u"Low")
# self.SetEnumString(u"PreAmpGain", u"x1")
# self.SetEnumString(u"PreAmpGainSelector", u"High")
# self.SetEnumString(u"PreAmpGain", u"x30")
# self.SetEnumString(u"PreAmpGainChannel", u"Low")
# Allowed values of PixelEncoding depends on Gain: "Both" => Mono12Coded
try:
self.SetEnumString(u"PixelEncoding", u"Mono16")
metadata['Bits per pixel'] = 16
except ATError:
# Fallback to 12 bits (represented on 16 bits)
try:
self.SetEnumString(u"PixelEncoding", u"Mono12")
metadata['Bits per pixel'] = 12
except ATError:
self.SetEnumString(u"PixelEncoding", u"Mono12Coded")
metadata['Bits per pixel'] = 12
if self.isImplemented(u"SpuriousNoiseFilter"):
self.SetBool(u"SpuriousNoiseFilter", True)
metadata['Filter'] = "Spurious noise filter"
# Software is much slower than Internal (0.05 instead of 0.015 s)
self.SetEnumString(u"TriggerMode", u"Internal")
return metadata
def _allocate_buffer(self, size):
"""
returns a cbuffer of the right size for an image
"""
image_size_bytes = self.GetInt(u"ImageSizeBytes")
# The buffer might be bigger than AOIStride * AOIHeight if there is metadata
assert image_size_bytes >= (size[0] * size[1] * 2)
# allocating directly a numpy array doesn't work if there is metadata:
# ndbuffer = numpy.empty(shape=(stride / 2, size[1]), dtype="uint16")
# cbuffer = numpy.ctypeslib.as_ctypes(ndbuffer)
cbuffer = (c_byte * image_size_bytes)() # empty array
assert(addressof(cbuffer) % 8 == 0) # the SDK wants it aligned
return cbuffer
def _buffer_as_array(self, cbuffer, size):
"""
Converts the buffer allocated for the image as an ndarray. zero-copy
return an ndarray
"""
# actual size of a line in bytes (not pixel)
try:
stride = self.GetInt( u"AOIStride")
except ATError:
# SimCam doesn't support stride
stride = self.GetInt( u"AOIWidth") * 2
p = cast(cbuffer, POINTER(c_uint16))
ndbuffer = numpy.ctypeslib.as_array(p, (stride / 2, size[1]))
# crop the array in case of stride (should not cause copy)
return ndbuffer[:size[0],:]
def acquire(self, size, exp, binning=1):
"""
Set up the camera and acquire one image at the best quality for the given
parameters.
size (2-tuple int): Width and height of the image. It will be centred
on the captor. It depends on the binning, so the same region has a size
twice smaller if the binning is 2 instead of 1. It must be a allowed
resolution.
exp (float): exposure time in second
binning (int 1, 2, 3, 4, or 8): how many pixels horizontally and vertically
are combined to create "super pixels"
return (2-tuple: numpy.ndarray, metadata): an array containing the image,
and a dict (string -> base types) containing the metadata
"""
assert not self.is_acquiring
assert not self.GetBool(u"CameraAcquiring")
self.is_acquiring = True
metadata = self.getCameraMetadata()
metadata.update(self._setupBestQuality())
# Binning affects max size, so change first
metadata.update(self._setBinning(binning))
self._setSize(size)
metadata.update(self._setExposureTime(exp))
cbuffer = self._allocate_buffer(size)
self.QueueBuffer(cbuffer)
# Acquire the image
self.Command(u"AcquisitionStart")
exposure_time = metadata["Exposure time"]
readout_time = size[0] * size[1] / metadata["Pixel readout rate"] # s
metadata["Acquisition date"] = time.time() # time at the beginning
pbuffer, buffersize = self.WaitBuffer(exposure_time + readout_time + 1)
metadata["Camera temperature"] = self.GetFloat(u"SensorTemperature")
# Cannot directly use pbuffer because we'd lose the reference to the
# memory allocation... and it'd get free'd at the end of the method
# So rely on the assumption cbuffer is used as is
assert(addressof(pbuffer.contents) == addressof(cbuffer))
array = self._buffer_as_array(cbuffer, size)
self.Command(u"AcquisitionStop")
self.Flush()
self.is_acquiring = False
return array, metadata
def acquireFlow(self, callback, size, exp, binning=1, num=None):
"""
Set up the camera and acquire a flow of images at the best quality for the given
parameters. Should not be called if already a flow is being acquired.
callback (callable (camera, numpy.ndarray, dict (string -> base types)) no return):
function called for each image acquired
size (2-tuple int): Width and height of the image. It will be centred
on the captor. It depends on the binning, so the same region as a size
twice smaller if the binning is 2 instead of 1. It must be a allowed
resolution.
exp (float): exposure time in second
binning (int 1, 2, 3, 4, or 8): how many pixels horizontally and vertically
are combined to create "super pixels"
num (None or int): number of images to acquire, or infinite if None
returns immediately. To stop acquisition, call stopAcquireFlow()
"""
assert not self.is_acquiring
assert not self.GetBool(u"CameraAcquiring")
self.is_acquiring = True
metadata = self.getCameraMetadata()
metadata.update(self._setupBestQuality())
# Binning affects max size, so change first
metadata.update(self._setBinning(binning))
self._setSize(size)
metadata.update(self._setExposureTime(exp))
exposure_time = metadata["Exposure time"]
# Set up thread
self.acquire_thread = threading.Thread(target=self._acquire_thread_run,
name="andorcam acquire flow thread",
args=(callback, size, exposure_time, metadata, num))
self.acquire_thread.start()
def _acquire_thread_run(self, callback, size, exp, metadata, num=None):
"""
The core of the acquisition thread. Runs until it has acquired enough
images or acquire_must_stop is True.
"""
assert (self.isImplemented(u"CycleMode") and
self.isWritable(u"CycleMode"))
self.SetEnumString(u"CycleMode", u"Continuous")
# We don't use the framecount feature as it's not always present, and
# easy to do in software.
# Allocates a pipeline of two buffers in a pipe, so that when we are
# processing one buffer, the driver can already acquire the next image.
buffers = []
nbuffers = 2
for i in range(nbuffers):
cbuffer = self._allocate_buffer(size)
self.QueueBuffer(cbuffer)
buffers.append(cbuffer)
readout_time = size[0] * size[1] / metadata["Pixel readout rate"] # s
# Acquire the images
self.Command(u"AcquisitionStart")
while (not self.acquire_must_stop and (num is None or num > 0)):
metadata["Acquisition date"] = time.time() # time at the beginning
pbuffer, buffersize = self.WaitBuffer(exp + readout_time + 1)
# Cannot directly use pbuffer because we'd lose the reference to the
# memory allocation... and it'd get free'd at the end of the method
# So rely on the assumption cbuffer is used as is
cbuffer = buffers.pop(0)
assert(addressof(pbuffer.contents) == addressof(cbuffer))
array = self._buffer_as_array(cbuffer, size)
# next buffer
cbuffer = self._allocate_buffer(size)
self.QueueBuffer(cbuffer)
buffers.append(cbuffer)
callback(self, array, metadata)
if num is not None:
num -= 1
self.Command(u"AcquisitionStop")
self.Flush()
self.is_acquiring = False
def stopAcquireFlow(self, sync=False):
"""
Stop the acquisition of a flow of images.
sync (boolean): if True, wait that the acquisition is finished before returning.
Calling with this flag activated from the acquisition callback is not
permitted (it would cause a dead-lock).
"""
self.acquire_must_stop = True
if sync:
self.waitAcquireFlow()
def waitAcquireFlow(self):
"""
Waits until the end acquisition of a flow of images. Calling from the
acquisition callback is not permitted (it would cause a dead-lock).
"""
# "while" is mostly to not wait if it's already finished
while self.is_acquiring:
# join() already checks that we are not the current_thread()
#assert threading.current_thread() != self.acquire_thread
self.acquire_thread.join() # XXX timeout for safety?
def __del__(self):
self.Close()
self.FinaliseLibrary()
def selfTest(self):
"""
Check whether the connection to the camera works.
return (boolean): False if it detects any problem
"""
try:
model = self.GetString(u"CameraModel")
except Exception, err:
print("Failed to read camera model: " + str(err))
return False
# Try to get an image with the default resolution
try:
resolution = (self.GetInt(u"SensorWidth"),
self.GetInt(u"SensorHeight"))
except Exception, err:
print("Failed to read camera resolution: " + str(err))
return False
try:
im, metadata = self.acquire(resolution, 0.01)
except Exception, err:
print("Failed to acquire an image: " + str(err))
return False
return True
@staticmethod
def scan():
"""
List all the available cameras.
Note: it's not recommended to call this method when cameras are being used
return (set of 3-tuple: device number (int), name (string), max resolution (2-tuple int))
"""
camera = AndorCam3() # system
dc = camera.GetInt(u"Device Count")
# print "found %d devices." % dc
# we reuse the same object to avoid init/del all the time
system_handle = camera.handle
cameras = set()
for i in range(dc):
camera.handle = camera.Open(i)
model = "Andor " + camera.GetString(u"CameraModel")
resolution = (camera.GetInt(u"SensorWidth"),
camera.GetInt(u"SensorHeight"))
cameras.add((i, model, resolution))
camera.Close()
camera.handle = system_handle # for the del() to work fine
return cameras
# vim:tabstop=4:shiftwidth=4:expandtab:spelllang=en_gb:spell:
|
brandond/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/ovirt/ovirt_template_facts.py
|
55
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_template_facts
short_description: Retrieve facts about one or more oVirt/RHV templates
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt/RHV templates."
notes:
- "This module creates a new top-level C(ovirt_templates) fact, which
contains a list of templates."
options:
pattern:
description:
- "Search term which is accepted by oVirt/RHV search backend."
- "For example to search template X from datacenter Y use following pattern:
name=X and datacenter=Y"
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all templates which names start with C(centos) and
# belongs to data center C(west):
- ovirt_template_facts:
pattern: name=centos* and datacenter=west
- debug:
var: ovirt_templates
'''
RETURN = '''
ovirt_templates:
description: "List of dictionaries describing the templates. Template attributes are mapped to dictionary keys,
all templates attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/template."
returned: On success.
type: list
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
pattern=dict(default='', required=False),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
templates_service = connection.system_service().templates_service()
templates = templates_service.list(search=module.params['pattern'])
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_templates=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in templates
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
|
tensorflow/profiler
|
refs/heads/master
|
plugin/tensorboard_plugin_profile/convert/tf_data_stats_proto_to_gviz.py
|
1
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""For conversion of CombinedTfDataStats protos to GViz DataTables.
Usage:
gviz_data_tables = generate_all_chart_tables(combined_tf_data_stats)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gviz_api
from tensorboard_plugin_profile.protobuf import tf_data_stats_pb2
def format_iterator_stat(iterator_metadata, iterator_stat):
return ("<div style='padding: 1px;'><b>{}</b><br/><div style='text-align: "
"left;'>Start Time: {} us<br/>Total Duration: {} us<br/>Self "
"Duration: {} us<br/># Calls: {}</div></div>").format(
iterator_metadata.name,
int(iterator_stat.start_time_ps / 1000_000),
int(iterator_stat.duration_ps / 1000_000),
int(iterator_stat.self_time_ps / 1000_000),
iterator_stat.num_calls)
def get_graph_table_args(combined_tf_data_stats):
"""Creates a gviz DataTable object from a CombinedTfDataStats proto.
Args:
combined_tf_data_stats: A tf_data_stats_pb2.CombinedTfDataStats.
Returns:
Returns a gviz_api.DataTable
"""
table_description = [
("host", "string", "Host"),
("input_pipeline", "string", "Input Pipeline"),
("rank", "number", "Rank"),
("name", "string", "Name"),
("parent", "string", "Parent"),
("type", "number", "Type"),
]
data = []
for host in combined_tf_data_stats.tf_data_stats:
iterator_metadata_map = combined_tf_data_stats.tf_data_stats[
host].iterator_metadata
input_pipelines = combined_tf_data_stats.tf_data_stats[host].input_pipelines
for input_pipeline_id in input_pipelines:
input_pipeline_stats = input_pipelines[input_pipeline_id]
rank = 0
for input_pipeline_stat in input_pipeline_stats.stats:
for iterator_id in sorted(input_pipeline_stat.iterator_stats):
iterator_stat = input_pipeline_stat.iterator_stats[iterator_id]
iterator_metadata = iterator_metadata_map[iterator_id]
blocking_type = 0
if iterator_stat.is_blocking:
if iterator_id == input_pipeline_stat.bottleneck_iterator_id:
blocking_type = 2
else:
blocking_type = 1
row = [
host,
input_pipeline_stats.metadata.name,
rank,
(str(iterator_id),
format_iterator_stat(iterator_metadata, iterator_stat)),
str(iterator_metadata.parent_id)
if iterator_metadata.parent_id else "",
blocking_type,
]
data.append(row)
rank += 1
return (table_description, data, {})
def generate_graph_table(combined_tf_data_stats):
(table_description, data,
custom_properties) = get_graph_table_args(combined_tf_data_stats)
return gviz_api.DataTable(table_description, data, custom_properties)
def get_summary_table_args(combined_tf_data_stats):
"""Creates a gviz DataTable object from a CombinedTfDataStats proto.
Args:
combined_tf_data_stats: A tf_data_stats_pb2.CombinedTfDataStats.
Returns:
Returns a gviz_api.DataTable
"""
table_description = [
("host", "string", "Host"),
("input_pipeline", "string", "Input Pipeline"),
("min", "number", "Min (us)"),
("mean", "number", "Avg (us)"),
("max", "number", "Max (us)"),
("num_calls", "number", "# calls"),
("num_slow_calls", "number", "# slow calls"),
]
data = []
for host in combined_tf_data_stats.tf_data_stats:
input_pipelines = combined_tf_data_stats.tf_data_stats[host].input_pipelines
for input_pipeline_id in input_pipelines:
input_pipeline_stats = input_pipelines[input_pipeline_id]
row = [
host,
input_pipeline_stats.metadata.name,
int(input_pipeline_stats.min_latency_ps / 1000_000),
int(input_pipeline_stats.avg_latency_ps / 1000_000),
int(input_pipeline_stats.max_latency_ps / 1000_000),
len(input_pipeline_stats.stats),
input_pipeline_stats.num_slow_calls,
]
data.append(row)
return (table_description, data, {})
def generate_summary_table(combined_tf_data_stats):
(table_description, data,
custom_properties) = get_summary_table_args(combined_tf_data_stats)
return gviz_api.DataTable(table_description, data, custom_properties)
def format_bottleneck(iterator_name, iterator_long_name, iterator_latency_ps):
return ("<u>Iterator Type</u>: <b>{}</b><br/><u>Long Name</u>: "
"{}<br/><u>Latency</u>: {:,} us").format(
iterator_name, iterator_long_name,
int(iterator_latency_ps / 1000_000))
def get_bottleneck_analysis_table_args(combined_tf_data_stats):
"""Creates a gviz DataTable object from a CombinedTfDataStats proto.
Args:
combined_tf_data_stats: A tf_data_stats_pb2.CombinedTfDataStats.
Returns:
Returns a gviz_api.DataTable
"""
table_description = [
("host", "string", "Host"),
("input_pipeline", "string", "Input Pipeline"),
("max_latency", "number", "Max Latency (us)"),
("bottleneck", "string", "Bottleneck"),
("suggestion", "string", "Suggestion"),
]
data = []
for bottleneck_analysis in combined_tf_data_stats.bottleneck_analysis:
row = [
bottleneck_analysis.host,
bottleneck_analysis.input_pipeline,
int(bottleneck_analysis.max_latency_ps / 1000_000),
format_bottleneck(bottleneck_analysis.iterator_name,
bottleneck_analysis.iterator_long_name,
bottleneck_analysis.iterator_latency_ps),
bottleneck_analysis.suggestion,
]
data.append(row)
custom_properties = {
"is_input_bound":
"true" if combined_tf_data_stats.is_input_bound else "false",
"summary_message":
combined_tf_data_stats.summary,
}
return (table_description, data, custom_properties)
def generate_bottleneck_analysis_table(bottleneck_analysis):
(table_description, data,
custom_properties) = get_bottleneck_analysis_table_args(bottleneck_analysis)
return gviz_api.DataTable(table_description, data, custom_properties)
def generate_all_chart_tables(combined_tf_data_stats):
"""Converts a CombinedTfDataStats proto to gviz DataTables."""
return [
generate_graph_table(combined_tf_data_stats),
generate_summary_table(combined_tf_data_stats),
generate_bottleneck_analysis_table(combined_tf_data_stats)
]
def to_json(raw_data):
"""Converts a serialized CombinedTfDataStats string to json."""
combined_tf_data_stats = tf_data_stats_pb2.CombinedTfDataStats()
combined_tf_data_stats.ParseFromString(raw_data)
all_chart_tables = generate_all_chart_tables(combined_tf_data_stats)
json_join = ",".join(x.ToJSon() if x else "{}" for x in all_chart_tables)
return "[" + json_join + "]"
|
budach/seqan
|
refs/heads/master
|
util/py_lib/seqan/dox/test/test_proc_doc.py
|
9
|
#!/usr/bin/env python
"""Tests for the proc_doc module."""
import sys
import os.path
import unittest
import seqan.dox.lexer as lexer
import seqan.dox.dox_tokens as dox_tokens
import seqan.dox.dox_parser as dox_parser
import seqan.dox.proc_doc as proc_doc
import seqan.dox.raw_doc as raw_doc
import seqan.dox.pure as pure
class TextNodeTest(unittest.TestCase):
def testRenderSimple(self):
parent = proc_doc.TextNode(text='This is some text.')
self.assertEqual(parent.toHtmlLike(), 'This is some text.')
def testRenderNested(self):
parent = proc_doc.TextNode(
type='a', attrs={'href': 'http://www.example.com'})
parent.addChild(proc_doc.TextNode(text='A word'))
parent.addChild(proc_doc.TextNode(text=' does not make a '))
parent.addChild(proc_doc.TextNode(text='sentence'))
self.assertEqual(parent.toHtmlLike(),
'<a href="http://www.example.com">A word does '
'not make a sentence</a>')
class TestTextNodeConversion(unittest.TestCase):
def setUp(self):
base_dir = os.path.dirname(os.path.realpath(__file__))
self.lexer = lexer.Lexer(dox_tokens.LEXER_TOKENS, skip_whitespace=False)
self.doc_proc = proc_doc.DocProcessor(include_dirs=[base_dir],
expected_tags=pure.EXPECTED_TAGS)
self.conv = proc_doc.RawTextToTextNodeConverter(doc_proc=self.doc_proc,
expected_tags=pure.EXPECTED_TAGS)
def strToTokens(self, s):
self.lexer.input(s)
tokens = [t for t in self.lexer.tokens()]
return tokens[:-1]
def testConversionPlain(self):
r = raw_doc.RawText(self.strToTokens('This is some example.'))
n = self.conv.run(r)
self.assertEqual(n.toHtmlLike(), '<div>This is some example.</div>')
def testConversionOneLevel(self):
r = raw_doc.RawText(self.strToTokens('This <b>is</b> some example.'))
n = self.conv.run(r)
self.assertEqual(n.toHtmlLike(), '<div>This <b>is</b> some example.</div>')
def testConversionOneLevel(self):
txt = 'This is a list: <ul><li>foo</li><li>ba</li></ul>'
r = raw_doc.RawText(self.strToTokens(txt))
n = self.conv.run(r)
self.assertEqual(n.toHtmlLike(), '<div>This is a list: <ul><li>foo</li><li>ba</li></ul></div>')
def testConversionList(self):
txt = '<ul><li>Lists</li><li>Lists again!</li></ul>'
r = raw_doc.RawText(self.strToTokens(txt))
n = self.conv.run(r)
self.assertEqual(n.toHtmlLike(), '<div>%s</div>' % txt)
class TestConverterBase(unittest.TestCase):
def createDocProcessor(self):
base_dir = os.path.dirname(os.path.realpath(__file__))
return proc_doc.DocProcessor(include_dirs=[base_dir],
expected_tags=pure.EXPECTED_TAGS)
def createLexer(self, text):
"""Create a lexer.Lexer object with the given text."""
lex = lexer.Lexer(dox_tokens.LEXER_TOKENS, skip_whitespace=False)
lex.input(text)
return lex
def parseText(self, text):
"""Create a dox parser and let it parse the given text.
Return the created parser.
"""
parser = dox_parser.Parser()
parser.parse(self.createLexer(text))
return parser.documentation
class TestConvertPageWithIncludes(TestConverterBase):
"""Tests for @page with @include and @snippet commands."""
def setUp(self):
self.proc = self.createDocProcessor()
self.conv = self.proc.converters['page']
def testInclude(self):
txt = ('@page Page Page Title\n'
'@section Example\n'
'@include example.cpp')
raw_page = self.parseText(txt).entries[0]
proc_page = self.conv.process(raw_page)
txt = ('<div><h1>Example</h1><dox:code source="include" type=".cpp" path="example.cpp">#include <iostream>\n'
'\n'
'int main(int arg, char const ** argv)\n'
'{\n'
' //![Print to stdout]\n'
' std::cout << "This is an example.\\n";\n'
' //![Print to stdout]\n'
' return 0;\n'
'}\n'
'</dox:code></div>')
self.assertMultiLineEqual(proc_page.body.toHtmlLike(), txt)
def testSnippet(self):
txt = ('@page Page Page Title\n'
'@section Example\n'
'@snippet example.cpp Print to stdout')
raw_page = self.parseText(txt).entries[0]
proc_page = self.conv.process(raw_page)
txt = ('<div><h1>Example</h1><dox:code source="snippet" type=".cpp" path="example.cpp">'
' std::cout << "This is an example.\\n";'
'</dox:code></div>')
self.assertMultiLineEqual(proc_page.body.toHtmlLike(), txt)
class TestConvertPageWithLink(TestConverterBase):
"""Tests for @page with @link command."""
def setUp(self):
base_dir = os.path.dirname(os.path.realpath(__file__))
self.proc = self.createDocProcessor()
self.conv = self.proc.converters['page']
def testLinkWithTitle(self):
txt = ('@page Page Page Title\n'
'\n'
'A link with @link OtherPage a title @endlink.\n')
raw_page = self.parseText(txt).entries[0]
proc_page = self.conv.process(raw_page)
txt = ('<div><p>A link with <a href="seqan:OtherPage">a title</a>.'
'</p></div>')
self.assertMultiLineEqual(proc_page.body.toHtmlLike(), txt)
def testLinkWithoutTitle(self):
txt = ('@page Page Page Title\n'
'\n'
'And a link without a title: @link OtherPage @endlink.\n')
raw_page = self.parseText(txt).entries[0]
proc_page = self.conv.process(raw_page)
txt = ('<div><p>And a link without a title: <a href="seqan:OtherPage">OtherPage</a>'
'.</p></div>')
self.assertMultiLineEqual(proc_page.body.toHtmlLike(), txt)
class TestConvertPageWithImage(TestConverterBase):
"""Tests for @page with <img> tag."""
def setUp(self):
self.proc = self.createDocProcessor()
self.conv = self.proc.converters['page']
def testLinkWithTitle(self):
txt = ('@page Page Page Title\n'
'\n'
'Here is an image: <img src="img.png" title="My image" />.\n')
raw_page = self.parseText(txt).entries[0]
proc_page = self.conv.process(raw_page)
txt = ('<div><p>Here is an image: <img src="img.png" '
'title="My image" />.</p></div>')
self.assertMultiLineEqual(proc_page.body.toHtmlLike(), txt)
def testLinkWithoutTitle(self):
txt = ('@page Page Page Title\n'
'\n'
'And a link without a title: @link OtherPage @endlink.\n')
raw_page = self.parseText(txt).entries[0]
proc_page = self.conv.process(raw_page)
txt = ('<div><p>And a link without a title: <a href="seqan:OtherPage">'
'OtherPage</a>.</p></div>')
self.assertMultiLineEqual(proc_page.body.toHtmlLike(), txt)
class TestConvertPage(TestConverterBase):
def setUp(self):
self.proc = self.createDocProcessor()
self.conv = self.proc.converters['page']
def testConvertMinimal(self):
txt = '@page Page Page Title <i>italic</i>.'
raw_page = self.parseText(txt).entries[0]
proc_page = self.conv.process(raw_page)
self.assertEqual(proc_page.name, 'Page')
self.assertEqual(proc_page.kind, 'page')
self.assertEqual(proc_page.title, 'Page Title <i>italic</i>.')
def testConvertFull(self):
txt = ('@page Page Page Title\n'
'@brief This is the <i>very important</i> page brief.\n'
'\n'
'This is the first paragraph.\n'
'@section First <em>heading</em>\n'
'\n'
'Second paragraph\n'
'@see Link Target\n')
raw_page = self.parseText(txt).entries[0]
proc_page = self.conv.process(raw_page)
self.assertEqual(proc_page.name, 'Page')
self.assertEqual(proc_page.kind, 'page')
self.assertEqual(proc_page.title, 'Page Title')
txt = '<div>This is the <i>very important</i> page brief.</div>'
self.assertEqual(proc_page.brief.toHtmlLike(), txt)
txt = ('<div>'
'<p>This is the first paragraph.</p>'
'<h1>First <em>heading</em></h1>'
'<p>Second paragraph</p>'
'</div>'
)
self.assertEqual(proc_page.body.toHtmlLike(), txt)
txt = '<a href="seqan:Link Target">Link Target</a>'
self.assertEqual(len(proc_page.sees), 1)
self.assertEqual(proc_page.sees[0].toHtmlLike(), txt)
def testConvertWithCode(self):
txt = ('@page Page Page Title\n'
'@code{.cpp}\n'
'int main(int argc, char const ** argv) {\n return 0;\n}\n'
'@endcode\n')
raw_page = self.parseText(txt).entries[0]
proc_page = self.conv.process(raw_page)
txt = ('<div>'
'<dox:code type=".cpp">'
'int main(int argc, char const ** argv) {\n return 0;\n}'
'</dox:code>'
'</div>')
self.assertEqual(proc_page.body.toHtmlLike(), txt)
class TestConvertGroup(TestConverterBase):
def setUp(self):
self.proc = self.createDocProcessor()
self.conv = self.proc.converters['defgroup']
def testConvertMinimal(self):
txt = '@defgroup Group Group Title <i>italic</i>.'
raw_group = self.parseText(txt).entries[0]
proc_group = self.conv.process(raw_group)
self.assertEqual(proc_group.name, 'Group')
self.assertEqual(proc_group.kind, 'group')
self.assertEqual(proc_group.title, 'Group Title <i>italic</i>.')
def testConvertFull(self):
txt = ('@defgroup Group Group Title\n'
'@brief This is the <i>very important</i> group brief.\n'
'\n'
'This is the first paragraph.\n'
'@section First <em>heading</em>\n'
'\n'
'Second paragraph\n'
'@see Link Target\n')
raw_group = self.parseText(txt).entries[0]
proc_group = self.conv.process(raw_group)
self.assertEqual(proc_group.name, 'Group')
self.assertEqual(proc_group.kind, 'group')
self.assertEqual(proc_group.title, 'Group Title')
txt = '<div>This is the <i>very important</i> group brief.</div>'
self.assertEqual(proc_group.brief.toHtmlLike(), txt)
txt = ('<div>'
'<p>This is the first paragraph.</p>'
'<h1>First <em>heading</em></h1>'
'<p>Second paragraph</p>'
'</div>'
)
self.assertEqual(proc_group.body.toHtmlLike(), txt)
txt = '<a href="seqan:Link Target">Link Target</a>'
self.assertEqual(len(proc_group.sees), 1)
self.assertEqual(proc_group.sees[0].toHtmlLike(), txt)
def testConvertWithCode(self):
txt = ('@defgroup Group Group Title\n'
'@code{.cpp}\n'
'int main(int argc, char const ** argv) {\n return 0;\n}\n'
'@endcode\n')
raw_group = self.parseText(txt).entries[0]
proc_group = self.conv.process(raw_group)
txt = ('<div>'
'<dox:code type=".cpp">'
'int main(int argc, char const ** argv) {\n return 0;\n}'
'</dox:code>'
'</div>')
self.assertEqual(proc_group.body.toHtmlLike(), txt)
class TestConvertEnum(TestConverterBase):
def setUp(self):
self.proc = self.createDocProcessor()
self.conv = self.proc.converters['enum']
def testConvertMinimal(self):
txt = '@enum MyEnum My Enum'
raw_enum = self.parseText(txt).entries[0]
proc_enum = self.conv.process(raw_enum)
self.assertEqual(proc_enum.name, 'MyEnum')
self.assertEqual(proc_enum.title, 'My Enum')
def testConvertFull(self):
txt = ('@enum EnumName Enum Name\n'
'@headerfile <seqan/header.h>\n'
'@headerfile <seqan/header2.h>\n'
'@brief This is the <i>very important</i> enum brief.\n'
'@deprecated Deprecation msg.\n'
'@signature Enum Name;\n'
'\n'
'This is the first paragraph.\n'
'@section First <em>heading</em>\n'
'\n'
'Second paragraph\n'
'@see Link Target\n')
raw_enum = self.parseText(txt).entries[0]
proc_enum = self.conv.process(raw_enum)
self.assertEqual(proc_enum.name, 'EnumName')
self.assertEqual(proc_enum.title, 'Enum Name')
self.assertEqual(proc_enum.kind, 'enum')
self.assertEqual(proc_enum.headerfiles, ['<seqan/header.h>', '<seqan/header2.h>'])
self.assertEqual(len(proc_enum.signatures), 1)
self.assertEqual(proc_enum.signatures[0].toHtmlLike(), '<div>Enum Name;</div>')
txt = '<div>This is the <i>very important</i> enum brief.</div>'
self.assertEqual(proc_enum.brief.toHtmlLike(), txt)
self.assertEqual(len(proc_enum.deprecation_msgs), 1)
self.assertEqual(proc_enum.deprecation_msgs[0].toHtmlLike(), '<div>Deprecation msg.</div>')
txt = ('<div>'
'<p>This is the first paragraph.</p>'
'<h1>First <em>heading</em></h1>'
'<p>Second paragraph</p>'
'</div>'
)
self.assertEqual(proc_enum.body.toHtmlLike(), txt)
txt = '<a href="seqan:Link Target">Link Target</a>'
self.assertEqual(len(proc_enum.sees), 1)
self.assertEqual(proc_enum.sees[0].toHtmlLike(), txt)
class TestConvertAdaption(TestConverterBase):
def setUp(self):
self.proc = self.createDocProcessor()
self.conv = self.proc.converters['adaption']
def testConvertMinimal(self):
txt = '@adaption MyAdaption My Adaption'
raw_adaption = self.parseText(txt).entries[0]
proc_adaption = self.conv.process(raw_adaption)
self.assertEqual(proc_adaption.name, 'MyAdaption')
self.assertEqual(proc_adaption.title, 'My Adaption')
def testConvertFull(self):
txt = ('@adaption AdaptionName Adaption Name\n'
'@headerfile <seqan/header.h>\n'
'@headerfile <seqan/header2.h>\n'
'@brief This is the <i>very important</i> adaption brief.\n'
'@deprecated Deprecation msg.\n'
'@signature Adaption Name;\n'
'\n'
'This is the first paragraph.\n'
'@section First <em>heading</em>\n'
'\n'
'Second paragraph\n'
'@see Link Target\n')
raw_adaption = self.parseText(txt).entries[0]
proc_adaption = self.conv.process(raw_adaption)
self.assertEqual(proc_adaption.name, 'AdaptionName')
self.assertEqual(proc_adaption.title, 'Adaption Name')
self.assertEqual(proc_adaption.kind, 'adaption')
self.assertEqual(proc_adaption.headerfiles, ['<seqan/header.h>', '<seqan/header2.h>'])
self.assertEqual(len(proc_adaption.signatures), 1)
self.assertEqual(proc_adaption.signatures[0].toHtmlLike(), '<div>Adaption Name;</div>')
txt = '<div>This is the <i>very important</i> adaption brief.</div>'
self.assertEqual(proc_adaption.brief.toHtmlLike(), txt)
self.assertEqual(len(proc_adaption.deprecation_msgs), 1)
self.assertEqual(proc_adaption.deprecation_msgs[0].toHtmlLike(), '<div>Deprecation msg.</div>')
txt = ('<div>'
'<p>This is the first paragraph.</p>'
'<h1>First <em>heading</em></h1>'
'<p>Second paragraph</p>'
'</div>'
)
self.assertEqual(proc_adaption.body.toHtmlLike(), txt)
txt = '<a href="seqan:Link Target">Link Target</a>'
self.assertEqual(len(proc_adaption.sees), 1)
self.assertEqual(proc_adaption.sees[0].toHtmlLike(), txt)
class TestConvertTypedef(TestConverterBase):
def setUp(self):
self.proc = self.createDocProcessor()
self.conv = self.proc.converters['global_typedef']
def testConvertMinimal(self):
txt = '@typedef MyTypedef My Typedef'
raw_typedef = self.parseText(txt).entries[0]
proc_typedef = self.conv.process(raw_typedef)
self.assertEqual(proc_typedef.name, 'MyTypedef')
self.assertEqual(proc_typedef.title, 'My Typedef')
def testConvertFull(self):
txt = ('@typedef TypedefName Typedef Name\n'
'@headerfile <seqan/header.h>\n'
'@headerfile <seqan/header2.h>\n'
'@brief This is the <i>very important</i> typedef brief.\n'
'@deprecated Deprecation msg.\n'
'@signature typedef int Name;\n'
'\n'
'This is the first paragraph.\n'
'@section First <em>heading</em>\n'
'\n'
'Second paragraph\n'
'@see Link Target\n')
raw_typedef = self.parseText(txt).entries[0]
proc_typedef = self.conv.process(raw_typedef)
self.assertEqual(proc_typedef.name, 'TypedefName')
self.assertEqual(proc_typedef.title, 'Typedef Name')
self.assertEqual(proc_typedef.kind, 'global_typedef')
self.assertEqual(proc_typedef.headerfiles, ['<seqan/header.h>', '<seqan/header2.h>'])
self.assertEqual(len(proc_typedef.signatures), 1)
self.assertEqual(proc_typedef.signatures[0].toHtmlLike(), '<div>typedef int Name;</div>')
txt = '<div>This is the <i>very important</i> typedef brief.</div>'
self.assertEqual(proc_typedef.brief.toHtmlLike(), txt)
self.assertEqual(len(proc_typedef.deprecation_msgs), 1)
self.assertEqual(proc_typedef.deprecation_msgs[0].toHtmlLike(), '<div>Deprecation msg.</div>')
txt = ('<div>'
'<p>This is the first paragraph.</p>'
'<h1>First <em>heading</em></h1>'
'<p>Second paragraph</p>'
'</div>'
)
self.assertEqual(proc_typedef.body.toHtmlLike(), txt)
txt = '<a href="seqan:Link Target">Link Target</a>'
self.assertEqual(len(proc_typedef.sees), 1)
self.assertEqual(proc_typedef.sees[0].toHtmlLike(), txt)
class TestConvertConcept(TestConverterBase):
def setUp(self):
self.proc = self.createDocProcessor()
self.conv = self.proc.converters['concept']
def testConvertMinimal(self):
txt = '@concept MyConcept My Concept'
raw_concept = self.parseText(txt).entries[0]
proc_concept = self.conv.process(raw_concept)
self.assertEqual(proc_concept.name, 'MyConcept')
self.assertEqual(proc_concept.title, 'My Concept')
self.assertEqual(proc_concept.kind, 'concept')
def testConvertFull(self):
txt = ('@concept ConceptName Concept Name\n'
'@headerfile <seqan/header.h>\n'
'@headerfile <seqan/header2.h>\n'
'@extends Other Concept\n'
'@brief This is the <i>very important</i> concept brief.\n'
'@deprecated Deprecation msg.\n'
'@signature concept Name;\n'
'\n'
'This is the first paragraph.\n'
'@section First <em>heading</em>\n'
'\n'
'Second paragraph\n'
'@see Link Target\n')
raw_concept = self.parseText(txt).entries[0]
proc_concept = self.conv.process(raw_concept)
self.assertEqual(proc_concept.name, 'ConceptName')
self.assertEqual(proc_concept.title, 'Concept Name')
self.assertEqual(proc_concept.kind, 'concept')
self.assertEqual(proc_concept.headerfiles, ['<seqan/header.h>', '<seqan/header2.h>'])
self.assertEqual(len(proc_concept.signatures), 1)
self.assertEqual(proc_concept.signatures[0].toHtmlLike(), '<div>concept Name;</div>')
self.assertEqual(len(proc_concept.extends), 1)
self.assertEqual(proc_concept.extends[0], 'Other Concept')
txt = '<div>This is the <i>very important</i> concept brief.</div>'
self.assertEqual(proc_concept.brief.toHtmlLike(), txt)
self.assertEqual(len(proc_concept.deprecation_msgs), 1)
self.assertEqual(proc_concept.deprecation_msgs[0].toHtmlLike(), '<div>Deprecation msg.</div>')
txt = ('<div>'
'<p>This is the first paragraph.</p>'
'<h1>First <em>heading</em></h1>'
'<p>Second paragraph</p>'
'</div>'
)
self.assertEqual(proc_concept.body.toHtmlLike(), txt)
txt = '<a href="seqan:Link Target">Link Target</a>'
self.assertEqual(len(proc_concept.sees), 1)
self.assertEqual(proc_concept.sees[0].toHtmlLike(), txt)
class TestConvertClass(TestConverterBase):
def setUp(self):
self.proc = self.createDocProcessor()
self.conv = self.proc.converters['class']
def testConvertMinimal(self):
txt = '@class MyClass My Class'
raw_class = self.parseText(txt).entries[0]
proc_class = self.conv.process(raw_class)
self.assertEqual(proc_class.name, 'MyClass')
self.assertEqual(proc_class.title, 'My Class')
self.assertEqual(proc_class.kind, 'class')
def testConvertFull(self):
txt = ('@class ClassName Class Name\n'
'@headerfile <seqan/header.h>\n'
'@headerfile <seqan/header2.h>\n'
'@implements A Class\n'
'@extends Other Class\n'
'@brief This is the <i>very important</i> class brief.\n'
'@deprecated Deprecation msg.\n'
'@signature template <typename T>\n'
' class Name;\n'
'\n'
'This is the first paragraph.\n'
'@section First <em>heading</em>\n'
'\n'
'Second paragraph\n'
'@see Link Target\n')
raw_class = self.parseText(txt).entries[0]
proc_class = self.conv.process(raw_class)
self.assertEqual(proc_class.name, 'ClassName')
self.assertEqual(proc_class.title, 'Class Name')
self.assertEqual(proc_class.kind, 'specialization')
self.assertEqual(proc_class.headerfiles, ['<seqan/header.h>', '<seqan/header2.h>'])
self.assertEqual(len(proc_class.signatures), 1)
self.assertEqual(proc_class.signatures[0].toHtmlLike(), '<div>template <typename T>\nclass Name;</div>')
self.assertEqual(len(proc_class.extends), 1)
self.assertEqual(proc_class.extends[0], 'Other Class')
self.assertEqual(len(proc_class.implements), 1)
self.assertEqual(proc_class.implements[0], 'A Class')
txt = '<div>This is the <i>very important</i> class brief.</div>'
self.assertEqual(proc_class.brief.toHtmlLike(), txt)
self.assertEqual(len(proc_class.deprecation_msgs), 1)
self.assertEqual(proc_class.deprecation_msgs[0].toHtmlLike(), '<div>Deprecation msg.</div>')
txt = ('<div>'
'<p>This is the first paragraph.</p>'
'<h1>First <em>heading</em></h1>'
'<p>Second paragraph</p>'
'</div>'
)
self.assertEqual(proc_class.body.toHtmlLike(), txt)
txt = '<a href="seqan:Link Target">Link Target</a>'
self.assertEqual(len(proc_class.sees), 1)
self.assertEqual(proc_class.sees[0].toHtmlLike(), txt)
class TestConvertTag(TestConverterBase):
def setUp(self):
self.proc = self.createDocProcessor()
self.conv = self.proc.converters['tag']
def testConvertMinimal(self):
txt = '@tag MyTag My Tag'
raw_tag = self.parseText(txt).entries[0]
proc_tag = self.conv.process(raw_tag)
self.assertEqual(proc_tag.name, 'MyTag')
self.assertEqual(proc_tag.title, 'My Tag')
self.assertEqual(proc_tag.kind, 'tag')
def testConvertFull(self):
txt = ('@tag TagName Tag Name\n'
'@headerfile <seqan/header.h>\n'
'@headerfile <seqan/header2.h>\n'
'@brief This is the <i>very important</i> tag brief.\n'
'@deprecated Deprecation msg.\n'
'@signature typedef Tag<TagName_> TagName;\n'
'\n'
'This is the first paragraph.\n'
'@section First <em>heading</em>\n'
'\n'
'Second paragraph\n'
'@see Link Target\n')
raw_tag = self.parseText(txt).entries[0]
proc_tag = self.conv.process(raw_tag)
self.assertEqual(proc_tag.name, 'TagName')
self.assertEqual(proc_tag.title, 'Tag Name')
self.assertEqual(proc_tag.kind, 'tag')
self.assertEqual(proc_tag.headerfiles, ['<seqan/header.h>', '<seqan/header2.h>'])
self.assertEqual(len(proc_tag.signatures), 1)
self.assertEqual(proc_tag.signatures[0].toHtmlLike(), '<div>typedef Tag<TagName_> TagName;</div>')
txt = '<div>This is the <i>very important</i> tag brief.</div>'
self.assertEqual(proc_tag.brief.toHtmlLike(), txt)
self.assertEqual(len(proc_tag.deprecation_msgs), 1)
self.assertEqual(proc_tag.deprecation_msgs[0].toHtmlLike(), '<div>Deprecation msg.</div>')
txt = ('<div>'
'<p>This is the first paragraph.</p>'
'<h1>First <em>heading</em></h1>'
'<p>Second paragraph</p>'
'</div>'
)
self.assertEqual(proc_tag.body.toHtmlLike(), txt)
txt = '<a href="seqan:Link Target">Link Target</a>'
self.assertEqual(len(proc_tag.sees), 1)
self.assertEqual(proc_tag.sees[0].toHtmlLike(), txt)
def testConvertFullGrouped(self):
txt = ('@tag Group#TagName Tag Name\n'
'@headerfile <seqan/header.h>\n'
'@headerfile <seqan/header2.h>\n'
'@brief This is the <i>very important</i> tag brief.\n'
'@deprecated Deprecation msg.\n'
'@signature typedef Tag<TagName_> TagName;\n'
'\n'
'This is the first paragraph.\n'
'@section First <em>heading</em>\n'
'\n'
'Second paragraph\n'
'@see Link Target\n')
raw_tag = self.parseText(txt).entries[0]
proc_tag = self.conv.process(raw_tag)
self.assertEqual(proc_tag.name, 'Group#TagName')
self.assertEqual(proc_tag.title, 'Tag Name')
self.assertEqual(proc_tag.kind, 'grouped_tag')
self.assertEqual(proc_tag.headerfiles, ['<seqan/header.h>', '<seqan/header2.h>'])
self.assertEqual(len(proc_tag.signatures), 1)
self.assertEqual(proc_tag.signatures[0].toHtmlLike(), '<div>typedef Tag<TagName_> TagName;</div>')
txt = '<div>This is the <i>very important</i> tag brief.</div>'
self.assertEqual(proc_tag.brief.toHtmlLike(), txt)
self.assertEqual(len(proc_tag.deprecation_msgs), 1)
self.assertEqual(proc_tag.deprecation_msgs[0].toHtmlLike(), '<div>Deprecation msg.</div>')
txt = ('<div>'
'<p>This is the first paragraph.</p>'
'<h1>First <em>heading</em></h1>'
'<p>Second paragraph</p>'
'</div>'
)
self.assertEqual(proc_tag.body.toHtmlLike(), txt)
txt = '<a href="seqan:Link Target">Link Target</a>'
self.assertEqual(len(proc_tag.sees), 1)
self.assertEqual(proc_tag.sees[0].toHtmlLike(), txt)
class TestConvertFunction(TestConverterBase):
def setUp(self):
self.proc = self.createDocProcessor()
self.conv = self.proc.converters['global_function']
def testConvertMinimalGlobal(self):
txt = '@fn myFunction my Function'
raw_function = self.parseText(txt).entries[0]
proc_function = self.conv.process(raw_function)
self.assertEqual(proc_function.name, 'myFunction')
self.assertEqual(proc_function.title, 'my Function')
self.assertEqual(proc_function.kind, 'global_function')
def testConvertMinimalInterface(self):
txt = '@fn Klass#myFunction my Function'
raw_function = self.parseText(txt).entries[0]
proc_function = self.conv.process(raw_function)
self.assertEqual(proc_function.name, 'Klass#myFunction')
self.assertEqual(proc_function.title, 'my Function')
self.assertEqual(proc_function.kind, 'interface_function')
def testConvertMinimalMember(self):
txt = '@fn Klass::myFunction my Function'
raw_function = self.parseText(txt).entries[0]
proc_function = self.conv.process(raw_function)
self.assertEqual(proc_function.name, 'Klass::myFunction')
self.assertEqual(proc_function.title, 'my Function')
self.assertEqual(proc_function.kind, 'member_function')
def testConvertFullGlobal(self):
txt = ('@fn myFunction my Function\n'
'@headerfile <seqan/header.h>\n'
'@headerfile <seqan/header2.h>\n'
'@brief This is the <i>very important</i> class brief.\n'
'@deprecated Deprecation msg.\n'
'@signature template <typename T1>\n'
' TReturn foo(T1 x);\n'
'@param[in] x The parameter\n'
'@tparam T1 The type of the first template parameter.\n'
'@return TReturn The return value.\n'
'@throw Exception The exception type.\n'
'\n'
'This is the first paragraph.\n'
'@section First <em>heading</em>\n'
'\n'
'Second paragraph\n'
'@see Link Target\n')
raw_function = self.parseText(txt).entries[0]
proc_function = self.conv.process(raw_function)
self.assertEqual(proc_function.name, 'myFunction')
self.assertEqual(proc_function.title, 'my Function')
self.assertEqual(proc_function.kind, 'global_function')
self.assertEqual(proc_function.headerfiles, ['<seqan/header.h>', '<seqan/header2.h>'])
# params
self.assertEqual(len(proc_function.params), 1)
self.assertEqual(proc_function.params[0].name, 'x')
txt = '<div>The parameter</div>'
self.assertEqual(proc_function.params[0].desc.toHtmlLike(), txt)
self.assertEqual(proc_function.params[0].in_out, proc_doc.ProcParam.IN)
# tparams
self.assertEqual(len(proc_function.tparams), 1)
self.assertEqual(proc_function.tparams[0].type, 'T1')
txt = '<div>The type of the first template parameter.</div>'
self.assertEqual(proc_function.tparams[0].desc.toHtmlLike(), txt)
# returns
self.assertEqual(len(proc_function.returns), 1)
txt = '<div>The return value.</div>'
self.assertEqual(proc_function.returns[0].desc.toHtmlLike(), txt)
# throws
self.assertEqual(len(proc_function.throws), 1)
txt = '<div>The exception type.</div>'
self.assertEqual(proc_function.throws[0].desc.toHtmlLike(), txt)
# brief
txt = '<div>This is the <i>very important</i> class brief.</div>'
self.assertEqual(proc_function.brief.toHtmlLike(), txt)
self.assertEqual(len(proc_function.deprecation_msgs), 1)
self.assertEqual(proc_function.deprecation_msgs[0].toHtmlLike(), '<div>Deprecation msg.</div>')
txt = ('<div>'
'<p>This is the first paragraph.</p>'
'<h1>First <em>heading</em></h1>'
'<p>Second paragraph</p>'
'</div>'
)
self.assertEqual(proc_function.body.toHtmlLike(), txt)
txt = '<a href="seqan:Link Target">Link Target</a>'
self.assertEqual(len(proc_function.sees), 1)
self.assertEqual(proc_function.sees[0].toHtmlLike(), txt)
def testConvertFullInterface(self):
txt = ('@fn Klass#myFunction my Function\n'
'@headerfile <seqan/header.h>\n'
'@headerfile <seqan/header2.h>\n'
'@brief This is the <i>very important</i> class brief.\n'
'@deprecated Deprecation msg.\n'
'@signature template <typename T1>\n'
' TReturn foo(T1 x);\n'
'@param[in] x The parameter\n'
'@tparam T1 The type of the first template parameter.\n'
'@return TReturn The return value.\n'
'@throw Excpetion The exception type.\n'
'\n'
'This is the first paragraph.\n'
'@section First <em>heading</em>\n'
'\n'
'Second paragraph\n'
'@see Link Target\n')
raw_function = self.parseText(txt).entries[0]
proc_function = self.conv.process(raw_function)
self.assertEqual(proc_function.name, 'Klass#myFunction')
self.assertEqual(proc_function.title, 'my Function')
self.assertEqual(proc_function.kind, 'interface_function')
self.assertEqual(proc_function.headerfiles, ['<seqan/header.h>', '<seqan/header2.h>'])
# params
self.assertEqual(len(proc_function.params), 1)
self.assertEqual(proc_function.params[0].name, 'x')
txt = '<div>The parameter</div>'
self.assertEqual(proc_function.params[0].desc.toHtmlLike(), txt)
self.assertEqual(proc_function.params[0].in_out, proc_doc.ProcParam.IN)
# tparams
self.assertEqual(len(proc_function.tparams), 1)
self.assertEqual(proc_function.tparams[0].type, 'T1')
txt = '<div>The type of the first template parameter.</div>'
self.assertEqual(proc_function.tparams[0].desc.toHtmlLike(), txt)
# returns
self.assertEqual(len(proc_function.returns), 1)
txt = '<div>The return value.</div>'
self.assertEqual(proc_function.returns[0].desc.toHtmlLike(), txt)
# throws
self.assertEqual(len(proc_function.throws), 1)
txt = '<div>The exception type.</div>'
self.assertEqual(proc_function.throws[0].desc.toHtmlLike(), txt)
# brief
txt = '<div>This is the <i>very important</i> class brief.</div>'
self.assertEqual(proc_function.brief.toHtmlLike(), txt)
self.assertEqual(len(proc_function.deprecation_msgs), 1)
self.assertEqual(proc_function.deprecation_msgs[0].toHtmlLike(), '<div>Deprecation msg.</div>')
txt = ('<div>'
'<p>This is the first paragraph.</p>'
'<h1>First <em>heading</em></h1>'
'<p>Second paragraph</p>'
'</div>'
)
self.assertEqual(proc_function.body.toHtmlLike(), txt)
txt = '<a href="seqan:Link Target">Link Target</a>'
self.assertEqual(len(proc_function.sees), 1)
self.assertEqual(proc_function.sees[0].toHtmlLike(), txt)
def testConvertFullMember(self):
txt = ('@fn Klass::myFunction my Function\n'
'@headerfile <seqan/header.h>\n'
'@headerfile <seqan/header2.h>\n'
'@brief This is the <i>very important</i> class brief.\n'
'@deprecated Deprecation msg.\n'
'@signature template <typename T1>\n'
' TReturn foo(T1 x);\n'
'@param[in] x The parameter\n'
'@tparam T1 The type of the first template parameter.\n'
'@return TReturn The return value.\n'
'@throw Excpetion The exception type.\n'
'\n'
'This is the first paragraph.\n'
'@section First <em>heading</em>\n'
'\n'
'Second paragraph\n'
'@see Link Target\n')
raw_function = self.parseText(txt).entries[0]
proc_function = self.conv.process(raw_function)
self.assertEqual(proc_function.name, 'Klass::myFunction')
self.assertEqual(proc_function.title, 'my Function')
self.assertEqual(proc_function.kind, 'member_function')
self.assertEqual(proc_function.headerfiles, ['<seqan/header.h>', '<seqan/header2.h>'])
# params
self.assertEqual(len(proc_function.params), 1)
self.assertEqual(proc_function.params[0].name, 'x')
txt = '<div>The parameter</div>'
self.assertEqual(proc_function.params[0].desc.toHtmlLike(), txt)
self.assertEqual(proc_function.params[0].in_out, proc_doc.ProcParam.IN)
# tparams
self.assertEqual(len(proc_function.tparams), 1)
self.assertEqual(proc_function.tparams[0].type, 'T1')
txt = '<div>The type of the first template parameter.</div>'
self.assertEqual(proc_function.tparams[0].desc.toHtmlLike(), txt)
# returns
self.assertEqual(len(proc_function.returns), 1)
txt = '<div>The return value.</div>'
self.assertEqual(proc_function.returns[0].desc.toHtmlLike(), txt)
# throws
self.assertEqual(len(proc_function.throws), 1)
txt = '<div>The exception type.</div>'
self.assertEqual(proc_function.throws[0].desc.toHtmlLike(), txt)
# brief
txt = '<div>This is the <i>very important</i> class brief.</div>'
self.assertEqual(proc_function.brief.toHtmlLike(), txt)
self.assertEqual(len(proc_function.deprecation_msgs), 1)
self.assertEqual(proc_function.deprecation_msgs[0].toHtmlLike(), '<div>Deprecation msg.</div>')
txt = ('<div>'
'<p>This is the first paragraph.</p>'
'<h1>First <em>heading</em></h1>'
'<p>Second paragraph</p>'
'</div>'
)
self.assertEqual(proc_function.body.toHtmlLike(), txt)
txt = '<a href="seqan:Link Target">Link Target</a>'
self.assertEqual(len(proc_function.sees), 1)
self.assertEqual(proc_function.sees[0].toHtmlLike(), txt)
class TestConvertMacro(TestConverterBase):
def setUp(self):
self.proc = self.createDocProcessor()
self.conv = self.proc.converters['macro']
def testConvertMinimalGlobal(self):
txt = '@macro MACRO macro title'
raw_macro = self.parseText(txt).entries[0]
proc_macro = self.conv.process(raw_macro)
self.assertEqual(proc_macro.name, 'MACRO')
self.assertEqual(proc_macro.title, 'macro title')
self.assertEqual(proc_macro.kind, 'macro')
def testConvertMinimalGrouped(self):
txt = '@fn Group#MACRO macro title'
raw_macro = self.parseText(txt).entries[0]
proc_macro = self.conv.process(raw_macro)
self.assertEqual(proc_macro.name, 'Group#MACRO')
self.assertEqual(proc_macro.title, 'macro title')
self.assertEqual(proc_macro.kind, 'grouped_macro')
def testConvertFullGlobal(self):
txt = ('@macro MACRO macro title\n'
'@headerfile <seqan/header.h>\n'
'@headerfile <seqan/header2.h>\n'
'@brief This is the <i>very important</i> macro brief.\n'
'@deprecated Deprecation msg.\n'
'@signature MACRO(param, param2)\n'
'@param param The parameter.\n'
'@param param2 The second parameter.\n'
'@return TReturn The return value.\n'
'@throw Exception The exception type.\n'
'\n'
'This is the first paragraph.\n'
'@section First <em>heading</em>\n'
'\n'
'Second paragraph\n'
'@see Link Target\n')
raw_macro = self.parseText(txt).entries[0]
proc_macro = self.conv.process(raw_macro)
self.assertEqual(proc_macro.name, 'MACRO')
self.assertEqual(proc_macro.title, 'macro title')
self.assertEqual(proc_macro.kind, 'macro')
self.assertEqual(proc_macro.headerfiles, ['<seqan/header.h>', '<seqan/header2.h>'])
# params
self.assertEqual(len(proc_macro.params), 2)
self.assertEqual(proc_macro.params[0].name, 'param')
txt = '<div>The parameter.</div>'
self.assertEqual(proc_macro.params[0].desc.toHtmlLike(), txt)
self.assertEqual(proc_macro.params[0].in_out, None)
self.assertEqual(proc_macro.params[1].name, 'param2')
txt = '<div>The second parameter.</div>'
self.assertEqual(proc_macro.params[1].desc.toHtmlLike(), txt)
self.assertEqual(proc_macro.params[1].in_out, None)
# returns
self.assertEqual(len(proc_macro.returns), 1)
txt = '<div>The return value.</div>'
self.assertEqual(proc_macro.returns[0].desc.toHtmlLike(), txt)
# throws
self.assertEqual(len(proc_macro.throws), 1)
txt = '<div>The exception type.</div>'
self.assertEqual(proc_macro.throws[0].desc.toHtmlLike(), txt)
# brief
txt = '<div>This is the <i>very important</i> macro brief.</div>'
self.assertEqual(proc_macro.brief.toHtmlLike(), txt)
self.assertEqual(len(proc_macro.deprecation_msgs), 1)
self.assertEqual(proc_macro.deprecation_msgs[0].toHtmlLike(), '<div>Deprecation msg.</div>')
txt = ('<div>'
'<p>This is the first paragraph.</p>'
'<h1>First <em>heading</em></h1>'
'<p>Second paragraph</p>'
'</div>'
)
self.assertEqual(proc_macro.body.toHtmlLike(), txt)
txt = '<a href="seqan:Link Target">Link Target</a>'
self.assertEqual(len(proc_macro.sees), 1)
self.assertEqual(proc_macro.sees[0].toHtmlLike(), txt)
def testConvertFullGrouped(self):
txt = ('@macro Group#MACRO macro title\n'
'@headerfile <seqan/header.h>\n'
'@headerfile <seqan/header2.h>\n'
'@brief This is the <i>very important</i> class brief.\n'
'@deprecated Deprecation msg.\n'
'@signature MACRO(param)\n'
'@param param The parameter\n'
'@return TReturn The return value.\n'
'@throw Exception The exception type.\n'
'\n'
'This is the first paragraph.\n'
'@section First <em>heading</em>\n'
'\n'
'Second paragraph\n'
'@see Link Target\n')
raw_macro = self.parseText(txt).entries[0]
proc_macro = self.conv.process(raw_macro)
self.assertEqual(proc_macro.name, 'Group#MACRO')
self.assertEqual(proc_macro.title, 'macro title')
self.assertEqual(proc_macro.kind, 'grouped_macro')
self.assertEqual(proc_macro.headerfiles, ['<seqan/header.h>', '<seqan/header2.h>'])
# params
self.assertEqual(len(proc_macro.params), 1)
self.assertEqual(proc_macro.params[0].name, 'param')
txt = '<div>The parameter</div>'
self.assertEqual(proc_macro.params[0].desc.toHtmlLike(), txt)
self.assertEqual(proc_macro.params[0].in_out, None)
# returns
self.assertEqual(len(proc_macro.returns), 1)
txt = '<div>The return value.</div>'
self.assertEqual(proc_macro.returns[0].desc.toHtmlLike(), txt)
# throws
self.assertEqual(len(proc_macro.throws), 1)
txt = '<div>The exception type.</div>'
self.assertEqual(proc_macro.throws[0].desc.toHtmlLike(), txt)
# brief
txt = '<div>This is the <i>very important</i> class brief.</div>'
self.assertEqual(proc_macro.brief.toHtmlLike(), txt)
self.assertEqual(len(proc_macro.deprecation_msgs), 1)
self.assertEqual(proc_macro.deprecation_msgs[0].toHtmlLike(), '<div>Deprecation msg.</div>')
txt = ('<div>'
'<p>This is the first paragraph.</p>'
'<h1>First <em>heading</em></h1>'
'<p>Second paragraph</p>'
'</div>'
)
self.assertEqual(proc_macro.body.toHtmlLike(), txt)
txt = '<a href="seqan:Link Target">Link Target</a>'
self.assertEqual(len(proc_macro.sees), 1)
self.assertEqual(proc_macro.sees[0].toHtmlLike(), txt)
class TestConvertMetafunction(TestConverterBase):
def setUp(self):
self.proc = self.createDocProcessor()
self.conv = self.proc.converters['global_metafunction']
def testConvertMinimalGlobal(self):
txt = '@mfn Metafunktion metafunction title'
raw_function = self.parseText(txt).entries[0]
proc_function = self.conv.process(raw_function)
self.assertEqual(proc_function.name, 'Metafunktion')
self.assertEqual(proc_function.title, 'metafunction title')
self.assertEqual(proc_function.kind, 'global_metafunction')
def testConvertMinimalInterface(self):
txt = '@mfn Klass#Metafunktion metafunction title'
raw_function = self.parseText(txt).entries[0]
proc_function = self.conv.process(raw_function)
self.assertEqual(proc_function.name, 'Klass#Metafunktion')
self.assertEqual(proc_function.title, 'metafunction title')
self.assertEqual(proc_function.kind, 'interface_metafunction')
def testConvertFullGlobal(self):
txt = ('@mfn Metafunktion metafunction title\n'
'@headerfile <seqan/header.h>\n'
'@headerfile <seqan/header2.h>\n'
'@brief This is the <i>very important</i> class brief.\n'
'@deprecated Deprecation msg.\n'
'@signature template <typename T1>\n'
' Metafunktion<T1>::Type;\n'
'@tparam T1 The type of the first template parameter.\n'
'@return Type The return type.\n'
'\n'
'This is the first paragraph.\n'
'@section First <em>heading</em>\n'
'\n'
'Second paragraph\n'
'@see Link Target\n')
raw_metafunction = self.parseText(txt).entries[0]
proc_metafunction = self.conv.process(raw_metafunction)
self.assertEqual(proc_metafunction.name, 'Metafunktion')
self.assertEqual(proc_metafunction.title, 'metafunction title')
self.assertEqual(proc_metafunction.kind, 'global_metafunction')
self.assertEqual(proc_metafunction.headerfiles, ['<seqan/header.h>', '<seqan/header2.h>'])
# tparams
self.assertEqual(len(proc_metafunction.tparams), 1)
self.assertEqual(proc_metafunction.tparams[0].type, 'T1')
txt = '<div>The type of the first template parameter.</div>'
self.assertEqual(proc_metafunction.tparams[0].desc.toHtmlLike(), txt)
# returns
self.assertEqual(len(proc_metafunction.returns), 1)
txt = '<div>The return type.</div>'
self.assertEqual(proc_metafunction.returns[0].desc.toHtmlLike(), txt)
# brief
txt = '<div>This is the <i>very important</i> class brief.</div>'
self.assertEqual(proc_metafunction.brief.toHtmlLike(), txt)
self.assertEqual(len(proc_metafunction.deprecation_msgs), 1)
self.assertEqual(proc_metafunction.deprecation_msgs[0].toHtmlLike(), '<div>Deprecation msg.</div>')
txt = ('<div>'
'<p>This is the first paragraph.</p>'
'<h1>First <em>heading</em></h1>'
'<p>Second paragraph</p>'
'</div>'
)
self.assertEqual(proc_metafunction.body.toHtmlLike(), txt)
txt = '<a href="seqan:Link Target">Link Target</a>'
self.assertEqual(len(proc_metafunction.sees), 1)
self.assertEqual(proc_metafunction.sees[0].toHtmlLike(), txt)
def testConvertFullInterface(self):
txt = ('@fn Klass#Metafunktion metafunction title\n'
'@headerfile <seqan/header.h>\n'
'@headerfile <seqan/header2.h>\n'
'@brief This is the <i>very important</i> class brief.\n'
'@deprecated Deprecation msg.\n'
'@signature template <typename T1>\n'
' Metafunktion<T1>::Type;\n'
'@tparam T1 The type of the first template parameter.\n'
'@return Type The return type.\n'
'\n'
'This is the first paragraph.\n'
'@section First <em>heading</em>\n'
'\n'
'Second paragraph\n'
'@see Link Target\n')
raw_metafunction = self.parseText(txt).entries[0]
proc_metafunction = self.conv.process(raw_metafunction)
self.assertEqual(proc_metafunction.name, 'Klass#Metafunktion')
self.assertEqual(proc_metafunction.title, 'metafunction title')
self.assertEqual(proc_metafunction.kind, 'interface_metafunction')
# tparams
self.assertEqual(len(proc_metafunction.tparams), 1)
self.assertEqual(proc_metafunction.tparams[0].type, 'T1')
txt = '<div>The type of the first template parameter.</div>'
self.assertEqual(proc_metafunction.tparams[0].desc.toHtmlLike(), txt)
# returns
self.assertEqual(len(proc_metafunction.returns), 1)
txt = '<div>The return type.</div>'
self.assertEqual(proc_metafunction.returns[0].desc.toHtmlLike(), txt)
# brief
txt = '<div>This is the <i>very important</i> class brief.</div>'
self.assertEqual(proc_metafunction.brief.toHtmlLike(), txt)
self.assertEqual(len(proc_metafunction.deprecation_msgs), 1)
self.assertEqual(proc_metafunction.deprecation_msgs[0].toHtmlLike(), '<div>Deprecation msg.</div>')
txt = ('<div>'
'<p>This is the first paragraph.</p>'
'<h1>First <em>heading</em></h1>'
'<p>Second paragraph</p>'
'</div>'
)
self.assertEqual(proc_metafunction.body.toHtmlLike(), txt)
txt = '<a href="seqan:Link Target">Link Target</a>'
self.assertEqual(len(proc_metafunction.sees), 1)
self.assertEqual(proc_metafunction.sees[0].toHtmlLike(), txt)
class TestConvertVariable(TestConverterBase):
def setUp(self):
self.proc = self.createDocProcessor()
self.conv = self.proc.converters['variable']
def testConvertMinimalGlobal(self):
txt = '@var Type myVar'
raw_function = self.parseText(txt).entries[0]
proc_function = self.conv.process(raw_function)
self.assertEqual(proc_function.name, 'myVar')
self.assertEqual(proc_function.title, 'myVar')
self.assertEqual(proc_function.type, 'Type')
self.assertEqual(proc_function.kind, 'variable')
def testConvertMinimalMember(self):
txt = '@var Type Klass::myVar'
raw_function = self.parseText(txt).entries[0]
doc = proc_doc.ProcDoc
proc_function = self.conv.process(raw_function)
self.assertEqual(proc_function.name, 'Klass::myVar')
self.assertEqual(proc_function.title, 'Klass::myVar')
self.assertEqual(proc_function.type, 'Type')
self.assertEqual(proc_function.kind, 'member_variable')
def testConvertFullGlobal(self):
txt = ('@var Type myVar\n'
'@headerfile <seqan/header.h>\n'
'@headerfile <seqan/header2.h>\n'
'@brief This is the <i>very important</i> variable brief.\n'
'@signature Type myVar;\n'
'\n'
'This is the first paragraph.\n'
'@section First <em>heading</em>\n'
'\n'
'Second paragraph\n'
'@see Link Target\n')
raw_variable = self.parseText(txt).entries[0]
proc_variable = self.conv.process(raw_variable)
self.assertEqual(proc_variable.name, 'myVar')
self.assertEqual(proc_variable.title, 'myVar')
self.assertEqual(proc_variable.type, 'Type')
self.assertEqual(proc_variable.kind, 'variable')
# brief
txt = '<div>This is the <i>very important</i> variable brief.</div>'
self.assertEqual(proc_variable.brief.toHtmlLike(), txt)
txt = ('<div>'
'<p>This is the first paragraph.</p>'
'<h1>First <em>heading</em></h1>'
'<p>Second paragraph</p>'
'</div>'
)
self.assertEqual(proc_variable.body.toHtmlLike(), txt)
txt = '<a href="seqan:Link Target">Link Target</a>'
self.assertEqual(len(proc_variable.sees), 1)
self.assertEqual(proc_variable.sees[0].toHtmlLike(), txt)
def testConvertFullMember(self):
txt = ('@var Type Klass::myVar\n'
'@headerfile <seqan/header.h>\n'
'@headerfile <seqan/header2.h>\n'
'@brief This is the <i>very important</i> variable brief.\n'
'@deprecated Deprecation msg.\n'
'@signature Type myVar;\n'
'\n'
'This is the first paragraph.\n'
'@section First <em>heading</em>\n'
'\n'
'Second paragraph\n'
'@see Link Target\n')
raw_variable = self.parseText(txt).entries[0]
proc_variable = self.conv.process(raw_variable)
self.assertEqual(proc_variable.name, 'Klass::myVar')
self.assertEqual(proc_variable.title, 'Klass::myVar')
self.assertEqual(proc_variable.type, 'Type')
self.assertEqual(proc_variable.kind, 'member_variable')
self.assertEqual(proc_variable.headerfiles, ['<seqan/header.h>', '<seqan/header2.h>'])
# brief
txt = '<div>This is the <i>very important</i> variable brief.</div>'
self.assertEqual(proc_variable.brief.toHtmlLike(), txt)
self.assertEqual(len(proc_variable.deprecation_msgs), 1)
self.assertEqual(proc_variable.deprecation_msgs[0].toHtmlLike(), '<div>Deprecation msg.</div>')
txt = ('<div>'
'<p>This is the first paragraph.</p>'
'<h1>First <em>heading</em></h1>'
'<p>Second paragraph</p>'
'</div>'
)
self.assertEqual(proc_variable.body.toHtmlLike(), txt)
txt = '<a href="seqan:Link Target">Link Target</a>'
self.assertEqual(len(proc_variable.sees), 1)
self.assertEqual(proc_variable.sees[0].toHtmlLike(), txt)
def testConvertFullEnumValue(self):
txt = ('@var Enum CONSTANT\n'
'@headerfile <seqan/header.h>\n'
'@headerfile <seqan/header2.h>\n'
'@brief This is the <i>very important</i> value brief.\n'
'@deprecated Deprecation msg.\n'
'\n'
'This is the first paragraph.\n'
'@section First <em>heading</em>\n'
'\n'
'Second paragraph\n'
'@see Link Target\n')
raw_variable = self.parseText(txt).entries[0]
proc_variable = self.conv.process(raw_variable)
self.assertEqual(proc_variable.name, 'CONSTANT')
self.assertEqual(proc_variable.title, 'CONSTANT')
self.assertEqual(proc_variable.type, 'Enum')
self.assertEqual(proc_variable.kind, 'variable')
self.assertEqual(proc_variable.headerfiles, ['<seqan/header.h>', '<seqan/header2.h>'])
# brief
txt = '<div>This is the <i>very important</i> value brief.</div>'
self.assertEqual(proc_variable.brief.toHtmlLike(), txt)
self.assertEqual(len(proc_variable.deprecation_msgs), 1)
self.assertEqual(proc_variable.deprecation_msgs[0].toHtmlLike(), '<div>Deprecation msg.</div>')
txt = ('<div>'
'<p>This is the first paragraph.</p>'
'<h1>First <em>heading</em></h1>'
'<p>Second paragraph</p>'
'</div>'
)
self.assertEqual(proc_variable.body.toHtmlLike(), txt)
txt = '<a href="seqan:Link Target">Link Target</a>'
self.assertEqual(len(proc_variable.sees), 1)
self.assertEqual(proc_variable.sees[0].toHtmlLike(), txt)
def testConvertFullMemberEnumValue(self):
txt = ('@var Klass::Enum Klass::CONSTANT\n'
'@headerfile <seqan/header.h>\n'
'@headerfile <seqan/header2.h>\n'
'@brief This is the <i>very important</i> value brief.\n'
'@deprecated Deprecation msg.\n'
'\n'
'This is the first paragraph.\n'
'@section First <em>heading</em>\n'
'\n'
'Second paragraph\n'
'@see Link Target\n')
raw_variable = self.parseText(txt).entries[0]
proc_variable = self.conv.process(raw_variable)
self.assertEqual(proc_variable.name, 'Klass::CONSTANT')
self.assertEqual(proc_variable.title, 'Klass::CONSTANT')
self.assertEqual(proc_variable.type, 'Klass::Enum')
self.assertEqual(proc_variable.kind, 'member_variable')
self.assertEqual(proc_variable.headerfiles, ['<seqan/header.h>', '<seqan/header2.h>'])
# brief
txt = '<div>This is the <i>very important</i> value brief.</div>'
self.assertEqual(proc_variable.brief.toHtmlLike(), txt)
self.assertEqual(len(proc_variable.deprecation_msgs), 1)
self.assertEqual(proc_variable.deprecation_msgs[0].toHtmlLike(), '<div>Deprecation msg.</div>')
txt = ('<div>'
'<p>This is the first paragraph.</p>'
'<h1>First <em>heading</em></h1>'
'<p>Second paragraph</p>'
'</div>'
)
self.assertEqual(proc_variable.body.toHtmlLike(), txt)
txt = '<a href="seqan:Link Target">Link Target</a>'
self.assertEqual(len(proc_variable.sees), 1)
self.assertEqual(proc_variable.sees[0].toHtmlLike(), txt)
class TestDocProcessorInheritance(TestConverterBase):
def setUp(self):
self.proc = self.createDocProcessor()
def testConceptInheritance(self):
txt = ('@concept ConceptA1\n'
'@brief Concept A1\n'
'\n'
'@concept ConceptA2\n'
'@brief Concept A2\n'
'\n'
'@concept ConceptB\n'
'@brief Concept B\n'
'@extends ConceptA1\n'
'@extends ConceptA2\n'
'\n'
'@concept ConceptC\n'
'@brief Concept C\n'
'@extends ConceptB\n')
raw_doc = self.parseText(txt)
proc_doc = self.proc.run(raw_doc)
concept_a1 = proc_doc.top_level_entries['ConceptA1']
self.assertEqual(concept_a1.all_extended, set())
self.assertEqual(concept_a1.all_extending, set(['ConceptB', 'ConceptC']))
concept_a2 = proc_doc.top_level_entries['ConceptA2']
self.assertEqual(concept_a2.all_extended, set())
self.assertEqual(concept_a2.all_extending, set(['ConceptB', 'ConceptC']))
concept_b = proc_doc.top_level_entries['ConceptB']
self.assertEqual(concept_b.all_extended, set(['ConceptA1', 'ConceptA2']))
self.assertEqual(concept_b.all_extending, set(['ConceptC']))
concept_c = proc_doc.top_level_entries['ConceptC']
self.assertEqual(concept_c.all_extended, set(['ConceptA1', 'ConceptA2', 'ConceptB']))
self.assertEqual(concept_c.all_extending, set([]))
def testClassInheritance(self):
txt = ('@class ClassA\n'
'@brief Brief A\n'
'@signature class A\n'
'\n'
'@class ClassB\n'
'@brief Brief B\n'
'@signature class B\n'
'@extends ClassA\n'
'\n'
'@class ClassC\n'
'@brief Brief C\n'
'@signature class C\n'
'@extends ClassB\n')
raw_doc = self.parseText(txt)
proc_doc = self.proc.run(raw_doc)
class_a = proc_doc.top_level_entries['ClassA']
self.assertEqual(class_a.all_extended, set())
self.assertEqual(class_a.all_extending, set(['ClassB', 'ClassC']))
class_b = proc_doc.top_level_entries['ClassB']
self.assertEqual(class_b.all_extended, set(['ClassA']))
self.assertEqual(class_b.all_extending, set(['ClassC']))
class_c = proc_doc.top_level_entries['ClassC']
self.assertEqual(class_c.all_extended, set(['ClassA', 'ClassB']))
def testConceptClassInheritance(self):
txt = ('@concept ConceptA\n'
'@brief Concept A\n'
'@signature concept A;\n'
'\n'
'@concept ConceptB\n'
'@brief Concept B\n'
'@signature concept B;\n'
'@extends ConceptA\n'
'\n'
'@class ClassA\n'
'@brief Class A\n'
'@signature class A\n'
'@implements ConceptB\n'
'\n'
'@class ClassB\n'
'@brief Class B\n'
'@signature class B\n'
'@extends ClassA\n'
'\n'
'@class ClassC\n'
'@brief Class C\n'
'@signature class C\n'
'@extends ClassB\n')
raw_doc = self.parseText(txt)
proc_doc = self.proc.run(raw_doc)
concept_a = proc_doc.top_level_entries['ConceptA']
self.assertEqual(concept_a.all_extended, set())
self.assertEqual(concept_a.all_extending, set(['ConceptB']))
self.assertEqual(concept_a.all_implementing, set(['ClassA', 'ClassB', 'ClassC']))
concept_b = proc_doc.top_level_entries['ConceptB']
self.assertEqual(concept_b.all_extended, set(['ConceptA']))
self.assertEqual(concept_b.all_extending, set([]))
self.assertEqual(concept_b.all_implementing, set(['ClassA', 'ClassB', 'ClassC']))
class_a = proc_doc.top_level_entries['ClassA']
self.assertEqual(class_a.all_extended, set())
self.assertEqual(class_a.all_extending, set(['ClassB', 'ClassC']))
self.assertEqual(class_a.all_implemented, set(['ConceptA', 'ConceptB']))
class_b = proc_doc.top_level_entries['ClassB']
self.assertEqual(class_b.all_extended, set(['ClassA']))
self.assertEqual(class_b.all_extending, set(['ClassC']))
self.assertEqual(class_b.all_implemented, set(['ConceptA', 'ConceptB']))
class_c = proc_doc.top_level_entries['ClassC']
self.assertEqual(class_c.all_extended, set(['ClassA', 'ClassB']))
self.assertEqual(class_c.all_extending, set([]))
self.assertEqual(class_c.all_implemented, set(['ConceptA', 'ConceptB']))
if __name__ == '__main__':
unittest.main()
|
cainmatt/django
|
refs/heads/master
|
tests/template_tests/syntax_tests/test_list_index.py
|
521
|
from django.test import SimpleTestCase
from ..utils import setup
class ListIndexTests(SimpleTestCase):
@setup({'list-index01': '{{ var.1 }}'})
def test_list_index01(self):
"""
List-index syntax allows a template to access a certain item of a
subscriptable object.
"""
output = self.engine.render_to_string('list-index01', {'var': ['first item', 'second item']})
self.assertEqual(output, 'second item')
@setup({'list-index02': '{{ var.5 }}'})
def test_list_index02(self):
"""
Fail silently when the list index is out of range.
"""
output = self.engine.render_to_string('list-index02', {'var': ['first item', 'second item']})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'list-index03': '{{ var.1 }}'})
def test_list_index03(self):
"""
Fail silently when the list index is out of range.
"""
output = self.engine.render_to_string('list-index03', {'var': None})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'list-index04': '{{ var.1 }}'})
def test_list_index04(self):
"""
Fail silently when variable is a dict without the specified key.
"""
output = self.engine.render_to_string('list-index04', {'var': {}})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'list-index05': '{{ var.1 }}'})
def test_list_index05(self):
"""
Dictionary lookup wins out when dict's key is a string.
"""
output = self.engine.render_to_string('list-index05', {'var': {'1': "hello"}})
self.assertEqual(output, 'hello')
@setup({'list-index06': '{{ var.1 }}'})
def test_list_index06(self):
"""
But list-index lookup wins out when dict's key is an int, which
behind the scenes is really a dictionary lookup (for a dict)
after converting the key to an int.
"""
output = self.engine.render_to_string('list-index06', {"var": {1: "hello"}})
self.assertEqual(output, 'hello')
@setup({'list-index07': '{{ var.1 }}'})
def test_list_index07(self):
"""
Dictionary lookup wins out when there is a string and int version
of the key.
"""
output = self.engine.render_to_string('list-index07', {"var": {'1': "hello", 1: "world"}})
self.assertEqual(output, 'hello')
|
WhisperSystems/TextSecureKit
|
refs/heads/master
|
Utilities/precommit.py
|
2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import subprocess
import datetime
import argparse
import commands
git_repo_path = os.path.abspath(subprocess.check_output(['git', 'rev-parse', '--show-toplevel']).strip())
def splitall(path):
allparts = []
while 1:
parts = os.path.split(path)
if parts[0] == path: # sentinel for absolute paths
allparts.insert(0, parts[0])
break
elif parts[1] == path: # sentinel for relative paths
allparts.insert(0, parts[1])
break
else:
path = parts[0]
allparts.insert(0, parts[1])
return allparts
def process(filepath):
short_filepath = filepath[len(git_repo_path):]
if short_filepath.startswith(os.sep):
short_filepath = short_filepath[len(os.sep):]
filename = os.path.basename(filepath)
if filename.startswith('.'):
return
file_ext = os.path.splitext(filename)[1]
if file_ext in ('.swift'):
env_copy = os.environ.copy()
env_copy["SCRIPT_INPUT_FILE_COUNT"] = "1"
env_copy["SCRIPT_INPUT_FILE_0"] = '%s' % ( short_filepath, )
lint_output = subprocess.check_output(['swiftlint', 'autocorrect', '--use-script-input-files'], env=env_copy)
print lint_output
try:
lint_output = subprocess.check_output(['swiftlint', 'lint', '--use-script-input-files'], env=env_copy)
except subprocess.CalledProcessError, e:
lint_output = e.output
print lint_output
with open(filepath, 'rt') as f:
text = f.read()
original_text = text
lines = text.split('\n')
while lines and lines[0].startswith('//'):
lines = lines[1:]
text = '\n'.join(lines)
text = text.strip()
header = '''//
// Copyright (c) %s Open Whisper Systems. All rights reserved.
//
''' % (
datetime.datetime.now().year,
)
text = header + text + '\n'
if original_text == text:
return
print 'Updating:', short_filepath
with open(filepath, 'wt') as f:
f.write(text)
def should_ignore_path(path):
ignore_paths = [
os.path.join(git_repo_path, '.git')
]
for ignore_path in ignore_paths:
if path.startswith(ignore_path):
return True
for component in splitall(path):
if component.startswith('.'):
return True
if component.endswith('.framework'):
return True
if component in ('Pods', 'ThirdParty', 'Carthage',):
return True
return False
def process_if_appropriate(filepath):
filename = os.path.basename(filepath)
if filename.startswith('.'):
return
file_ext = os.path.splitext(filename)[1]
if file_ext not in ('.h', '.hpp', '.cpp', '.m', '.mm', '.pch', '.swift'):
return
if should_ignore_path(filepath):
return
process(filepath)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Precommit script.')
parser.add_argument('--all', action='store_true', help='process all files in or below current dir')
args = parser.parse_args()
if args.all:
for rootdir, dirnames, filenames in os.walk(git_repo_path):
for filename in filenames:
file_path = os.path.abspath(os.path.join(rootdir, filename))
process_if_appropriate(file_path)
else:
filepaths = []
# Staging
output = commands.getoutput('git diff --cached --name-only --diff-filter=ACMR')
filepaths.extend([line.strip() for line in output.split('\n')])
# Working
output = commands.getoutput('git diff --name-only --diff-filter=ACMR')
filepaths.extend([line.strip() for line in output.split('\n')])
# Only process each path once.
filepaths = sorted(set(filepaths))
for filepath in filepaths:
filepath = os.path.abspath(os.path.join(git_repo_path, filepath))
process_if_appropriate(filepath)
print 'git clang-format...'
print commands.getoutput('git clang-format')
|
toolmacher/micropython
|
refs/heads/master
|
tests/basics/for_break.py
|
117
|
# Testcase for break in a for [within bunch of other code]
# https://github.com/micropython/micropython/issues/635
def foo():
seq = [1, 2, 3]
v = 100
i = 5
while i > 0:
print(i)
for a in seq:
if a == 2:
break
i -= 1
foo()
# break from within nested for loop
def bar():
l = [1, 2, 3]
for e1 in l:
print(e1)
for e2 in l:
print(e1, e2)
if e2 == 2:
break
bar()
|
geekboxzone/mmallow_prebuilts_gcc_darwin-x86_x86_x86_64-linux-android-4.9
|
refs/heads/geekbox
|
share/gdb/python/gdb/command/prompt.py
|
120
|
# Extended prompt.
# Copyright (C) 2011-2014 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""GDB command for working with extended prompts."""
import gdb
import gdb.prompt
class _ExtendedPrompt(gdb.Parameter):
"""Set the extended prompt.
Usage: set extended-prompt VALUE
Substitutions are applied to VALUE to compute the real prompt.
The currently defined substitutions are:
"""
# Add the prompt library's dynamically generated help to the
# __doc__ string.
__doc__ = __doc__ + gdb.prompt.prompt_help()
set_doc = "Set the extended prompt."
show_doc = "Show the extended prompt."
def __init__(self):
super(_ExtendedPrompt, self).__init__("extended-prompt",
gdb.COMMAND_SUPPORT,
gdb.PARAM_STRING_NOESCAPE)
self.value = ''
self.hook_set = False
def get_show_string (self, pvalue):
if self.value is not '':
return "The extended prompt is: " + self.value
else:
return "The extended prompt is not set."
def get_set_string (self):
if self.hook_set == False:
gdb.prompt_hook = self.before_prompt_hook
self.hook_set = True
return ""
def before_prompt_hook(self, current):
if self.value is not '':
newprompt = gdb.prompt.substitute_prompt(self.value)
return newprompt.replace('\\', '\\\\')
else:
return None
_ExtendedPrompt()
|
jfunez/opac
|
refs/heads/master
|
opac/webapp/admin/custom_filters.py
|
3
|
# coding: utf-8
from flask_admin.contrib.mongoengine.filters import (
FilterEqual, FilterNotEqual, FilterLike, FilterNotLike,
FilterEmpty, FilterInList, FilterNotInList, FilterConverter)
from flask_admin.contrib.mongoengine.tools import parse_like_term
from flask_admin.model import filters
from flask_admin.contrib import sqla
from mongoengine import ReferenceField, EmbeddedDocumentField, ListField, StringField
from mongoengine.queryset import Q
from opac_schema.v1.models import Journal, Issue
from webapp import models
def get_flt(column=None, value=None, term=''):
flt = None
search_fields = {
'journal': ['title', 'title_iso', 'short_title', 'acronym', 'print_issn', 'eletronic_issn'],
'issue': ['label'],
'use_licenses': ['license_code', 'reference_url', 'disclaimer']
}
if isinstance(column, ReferenceField):
criteria = None
reference_values = None
for field in search_fields[column.name]:
flt = {'%s__%s' % (field, term): value}
q = Q(**flt)
if criteria is None:
criteria = q
elif term in ['ne', 'not__contains', 'nin']:
criteria &= q
else:
criteria |= q
if isinstance(column.document_type_obj(), Journal):
reference_values = Journal.objects.filter(criteria)
if isinstance(column.document_type_obj(), Issue):
reference_values = Issue.objects.filter(criteria)
flt = {'%s__in' % column.name: reference_values}
elif isinstance(column, EmbeddedDocumentField):
criteria = None
for field in search_fields[column.name]:
flt = {'%s__%s__%s' % (column.name, field, term): value}
q = Q(**flt)
if criteria is None:
criteria = q
elif term in ['ne', 'not__contains', 'nin']:
criteria &= q
else:
criteria |= q
return criteria
elif isinstance(column, ListField) and isinstance(column.field, StringField):
flt = {'%s__%s' % (column.name, term): value if value else []}
else:
flt = {'%s__%s' % (column.name, term): value}
return Q(**flt)
class CustomFilterEqual(FilterEqual):
def apply(self, query, value):
flt = get_flt(self.column, value)
return query.filter(flt)
class CustomFilterNotEqual(FilterNotEqual):
def apply(self, query, value):
flt = get_flt(self.column, value, 'ne')
return query.filter(flt)
class CustomFilterLike(FilterLike):
def apply(self, query, value):
term, data = parse_like_term(value)
flt = get_flt(self.column, data, term)
return query.filter(flt)
class CustomFilterNotLike(FilterNotLike):
def apply(self, query, value):
term, data = parse_like_term(value)
flt = get_flt(self.column, data, 'not__%s' % term)
return query.filter(flt)
class CustomFilterEmpty(FilterEmpty):
def apply(self, query, value):
if value == '1':
flt = get_flt(self.column, None)
else:
flt = get_flt(self.column, None, 'ne')
return query.filter(flt)
class CustomFilterInList(FilterInList):
def apply(self, query, value):
flt = get_flt(self.column, value, 'in')
return query.filter(flt)
class CustomFilterNotInList(FilterNotInList):
def apply(self, query, value):
flt = get_flt(self.column, value, 'nin')
return query.filter(flt)
class CustomFilterConverter(FilterConverter):
# Campos dentro filtros ReferenceField, EmbeddedDocumentField, ListField
# deve ser do tipo StringField
reference_filters = (
CustomFilterLike, CustomFilterNotLike, CustomFilterEqual,
CustomFilterNotEqual, CustomFilterInList, CustomFilterNotInList)
embedded_filters = (
CustomFilterLike, CustomFilterNotLike, CustomFilterEqual,
CustomFilterNotEqual, CustomFilterEmpty, CustomFilterInList,
CustomFilterNotInList)
list_filters = (
CustomFilterLike, CustomFilterNotLike, CustomFilterEmpty)
@filters.convert('ReferenceField')
def conv_reference(self, column, name):
return [f(column, name) for f in self.reference_filters]
@filters.convert('EmbeddedDocumentField')
def conv_embedded(self, column, name):
return [f(column, name) for f in self.embedded_filters]
@filters.convert('ListField')
def conv_list(self, column, name):
return [f(column, name) for f in self.list_filters]
class CustomFilterConverterSqla(sqla.filters.FilterConverter):
choice_filters = (sqla.filters.FilterEqual, sqla.filters.FilterNotEqual,
sqla.filters.FilterEmpty, sqla.filters.FilterInList,
sqla.filters.FilterNotInList)
choices = {
'language': models.LANGUAGES_CHOICES,
}
@filters.convert('ChoiceType')
def conv_choice(self, column, name, options):
if not options:
options = self.choices[column.name]
return [f(column, name, options) for f in self.choice_filters]
|
imsplitbit/nova
|
refs/heads/master
|
nova/tests/integrated/v3/test_keypairs.py
|
4
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from nova.tests.integrated.v3 import api_sample_base
class KeyPairsSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
sample_dir = "keypairs"
def generalize_subs(self, subs, vanilla_regexes):
subs['keypair_name'] = 'keypair-[0-9a-f-]+'
return subs
def test_keypairs_post(self, public_key=None):
"""Get api sample of key pairs post request."""
key_name = 'keypair-' + str(uuid.uuid4())
response = self._do_post('keypairs', 'keypairs-post-req',
{'keypair_name': key_name})
subs = self._get_regexes()
subs['keypair_name'] = '(%s)' % key_name
self._verify_response('keypairs-post-resp', subs, response, 201)
# NOTE(maurosr): return the key_name is necessary cause the
# verification returns the label of the last compared information in
# the response, not necessarily the key name.
return key_name
def test_keypairs_import_key_post(self):
# Get api sample of key pairs post to import user's key.
key_name = 'keypair-' + str(uuid.uuid4())
subs = {
'keypair_name': key_name,
'public_key': "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGg"
"B4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0l"
"RE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv"
"9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYc"
"pSxsIbECHw== Generated by Nova"
}
response = self._do_post('keypairs', 'keypairs-import-post-req',
subs)
subs = self._get_regexes()
subs['keypair_name'] = '(%s)' % key_name
self._verify_response('keypairs-import-post-resp', subs, response, 201)
def test_keypairs_get(self):
# Get api sample of key pairs get request.
key_name = self.test_keypairs_post()
response = self._do_get('keypairs')
subs = self._get_regexes()
subs['keypair_name'] = '(%s)' % key_name
self._verify_response('keypairs-get-resp', subs, response, 200)
class KeyPairsSampleXmlTest(KeyPairsSampleJsonTest):
ctype = 'xml'
|
kernevil/samba
|
refs/heads/master
|
source4/dsdb/tests/python/tombstone_reanimation.py
|
1
|
#!/usr/bin/env python3
#
# Tombstone reanimation tests
#
# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2014
# Copyright (C) Nadezhda Ivanova <nivanova@symas.com> 2014
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import sys
import unittest
sys.path.insert(0, "bin/python")
import samba
from samba.ndr import ndr_unpack, ndr_print
from samba.dcerpc import misc
from samba.dcerpc import security
from samba.dcerpc import drsblobs
from samba.dcerpc.drsuapi import *
from samba.tests.password_test import PasswordCommon
from samba.common import get_string
import samba.tests
from ldb import (SCOPE_BASE, FLAG_MOD_ADD, FLAG_MOD_DELETE, FLAG_MOD_REPLACE, Dn, Message,
MessageElement, LdbError,
ERR_ATTRIBUTE_OR_VALUE_EXISTS, ERR_NO_SUCH_OBJECT, ERR_ENTRY_ALREADY_EXISTS,
ERR_OPERATIONS_ERROR, ERR_UNWILLING_TO_PERFORM)
class RestoredObjectAttributesBaseTestCase(samba.tests.TestCase):
""" verify Samba restores required attributes when
user restores a Deleted object
"""
def setUp(self):
super(RestoredObjectAttributesBaseTestCase, self).setUp()
self.samdb = samba.tests.connect_samdb_env("TEST_SERVER", "TEST_USERNAME", "TEST_PASSWORD")
self.base_dn = self.samdb.domain_dn()
self.schema_dn = self.samdb.get_schema_basedn().get_linearized()
self.configuration_dn = self.samdb.get_config_basedn().get_linearized()
# permit password changes during this test
PasswordCommon.allow_password_changes(self, self.samdb)
def tearDown(self):
super(RestoredObjectAttributesBaseTestCase, self).tearDown()
def GUID_string(self, guid):
return get_string(self.samdb.schema_format_value("objectGUID", guid))
def search_guid(self, guid, attrs=["*"]):
res = self.samdb.search(base="<GUID=%s>" % self.GUID_string(guid),
scope=SCOPE_BASE, attrs=attrs,
controls=["show_deleted:1"])
self.assertEqual(len(res), 1)
return res[0]
def search_dn(self, dn):
res = self.samdb.search(expression="(objectClass=*)",
base=dn,
scope=SCOPE_BASE,
controls=["show_recycled:1"])
self.assertEqual(len(res), 1)
return res[0]
def _create_object(self, msg):
""":param msg: dict with dn and attributes to create an object from"""
# delete an object if leftover from previous test
samba.tests.delete_force(self.samdb, msg['dn'])
self.samdb.add(msg)
return self.search_dn(msg['dn'])
def assertNamesEqual(self, attrs_expected, attrs_extra):
self.assertEqual(attrs_expected, attrs_extra,
"Actual object does not have expected attributes, missing from expected (%s), extra (%s)"
% (str(attrs_expected.difference(attrs_extra)), str(attrs_extra.difference(attrs_expected))))
def assertAttributesEqual(self, obj_orig, attrs_orig, obj_restored, attrs_rest):
self.assertNamesEqual(attrs_orig, attrs_rest)
# remove volatile attributes, they can't be equal
attrs_orig -= set(["uSNChanged", "dSCorePropagationData", "whenChanged"])
for attr in attrs_orig:
# convert original attr value to ldif
orig_val = obj_orig.get(attr)
if orig_val is None:
continue
if not isinstance(orig_val, MessageElement):
orig_val = MessageElement(str(orig_val), 0, attr)
m = Message()
m.add(orig_val)
orig_ldif = self.samdb.write_ldif(m, 0)
# convert restored attr value to ldif
rest_val = obj_restored.get(attr)
self.assertFalse(rest_val is None)
m = Message()
if not isinstance(rest_val, MessageElement):
rest_val = MessageElement(str(rest_val), 0, attr)
m.add(rest_val)
rest_ldif = self.samdb.write_ldif(m, 0)
# compare generated ldif's
self.assertEqual(orig_ldif, rest_ldif)
def assertAttributesExists(self, attr_expected, obj_msg):
"""Check object contains at least expected attrbigutes
:param attr_expected: dict of expected attributes with values. ** is any value
:param obj_msg: Ldb.Message for the object under test
"""
actual_names = set(obj_msg.keys())
# Samba does not use 'dSCorePropagationData', so skip it
actual_names -= set(['dSCorePropagationData'])
expected_names = set(attr_expected.keys())
self.assertNamesEqual(expected_names, actual_names)
for name in attr_expected.keys():
expected_val = attr_expected[name]
actual_val = obj_msg.get(name)
self.assertFalse(actual_val is None, "No value for attribute '%s'" % name)
if expected_val == "**":
# "**" values means "any"
continue
# if expected_val is e.g. ldb.bytes we can't depend on
# str(actual_value) working, we may just get a decoding
# error. Better to just compare raw values
if not isinstance(expected_val, str):
actual_val = actual_val[0]
else:
actual_val = str(actual_val)
self.assertEqual(expected_val, actual_val,
"Unexpected value (%s) for '%s', expected (%s)" % (
repr(actual_val), name, repr(expected_val)))
def _check_metadata(self, metadata, expected):
repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob, metadata[0])
repl_array = []
for o in repl.ctr.array:
repl_array.append((o.attid, o.version))
repl_set = set(repl_array)
expected_set = set(expected)
self.assertEqual(len(repl_set), len(expected),
"Unexpected metadata, missing from expected (%s), extra (%s)), repl: \n%s" % (
str(expected_set.difference(repl_set)),
str(repl_set.difference(expected_set)),
ndr_print(repl)))
i = 0
for o in repl.ctr.array:
e = expected[i]
(attid, version) = e
self.assertEqual(attid, o.attid,
"(LDAP) Wrong attid "
"for expected value %d, wanted 0x%08x got 0x%08x, "
"repl: \n%s"
% (i, attid, o.attid, ndr_print(repl)))
# Allow version to be skipped when it does not matter
if version is not None:
self.assertEqual(o.version, version,
"(LDAP) Wrong version for expected value %d, "
"attid 0x%08x, "
"wanted %d got %d, repl: \n%s"
% (i, o.attid,
version, o.version, ndr_print(repl)))
i = i + 1
@staticmethod
def restore_deleted_object(samdb, del_dn, new_dn, new_attrs=None):
"""Restores a deleted object
:param samdb: SamDB connection to SAM
:param del_dn: str Deleted object DN
:param new_dn: str Where to restore the object
:param new_attrs: dict Additional attributes to set
"""
msg = Message()
msg.dn = Dn(samdb, str(del_dn))
msg["isDeleted"] = MessageElement([], FLAG_MOD_DELETE, "isDeleted")
msg["distinguishedName"] = MessageElement([str(new_dn)], FLAG_MOD_REPLACE, "distinguishedName")
if new_attrs is not None:
assert isinstance(new_attrs, dict)
for attr in new_attrs:
msg[attr] = MessageElement(new_attrs[attr], FLAG_MOD_REPLACE, attr)
samdb.modify(msg, ["show_deleted:1"])
class BaseRestoreObjectTestCase(RestoredObjectAttributesBaseTestCase):
def setUp(self):
super(BaseRestoreObjectTestCase, self).setUp()
def enable_recycle_bin(self):
msg = Message()
msg.dn = Dn(self.samdb, "")
msg["enableOptionalFeature"] = MessageElement(
"CN=Partitions," + self.configuration_dn + ":766ddcd8-acd0-445e-f3b9-a7f9b6744f2a",
FLAG_MOD_ADD, "enableOptionalFeature")
try:
self.samdb.modify(msg)
except LdbError as e:
(num, _) = e.args
self.assertEqual(num, ERR_ATTRIBUTE_OR_VALUE_EXISTS)
def test_undelete(self):
print("Testing standard undelete operation")
usr1 = "cn=testuser,cn=users," + self.base_dn
samba.tests.delete_force(self.samdb, usr1)
self.samdb.add({
"dn": usr1,
"objectclass": "user",
"description": "test user description",
"samaccountname": "testuser"})
objLive1 = self.search_dn(usr1)
guid1 = objLive1["objectGUID"][0]
self.samdb.delete(usr1)
objDeleted1 = self.search_guid(guid1)
self.restore_deleted_object(self.samdb, objDeleted1.dn, usr1)
objLive2 = self.search_dn(usr1)
self.assertEqual(str(objLive2.dn).lower(), str(objLive1.dn).lower())
samba.tests.delete_force(self.samdb, usr1)
def test_rename(self):
print("Testing attempt to rename deleted object")
usr1 = "cn=testuser,cn=users," + self.base_dn
self.samdb.add({
"dn": usr1,
"objectclass": "user",
"description": "test user description",
"samaccountname": "testuser"})
objLive1 = self.search_dn(usr1)
guid1 = objLive1["objectGUID"][0]
self.samdb.delete(usr1)
objDeleted1 = self.search_guid(guid1)
# just to make sure we get the correct error if the show deleted is missing
try:
self.samdb.rename(str(objDeleted1.dn), usr1)
self.fail()
except LdbError as e1:
(num, _) = e1.args
self.assertEqual(num, ERR_NO_SUCH_OBJECT)
try:
self.samdb.rename(str(objDeleted1.dn), usr1, ["show_deleted:1"])
self.fail()
except LdbError as e2:
(num, _) = e2.args
self.assertEqual(num, ERR_UNWILLING_TO_PERFORM)
def test_undelete_with_mod(self):
print("Testing standard undelete operation with modification of additional attributes")
usr1 = "cn=testuser,cn=users," + self.base_dn
self.samdb.add({
"dn": usr1,
"objectclass": "user",
"description": "test user description",
"samaccountname": "testuser"})
objLive1 = self.search_dn(usr1)
guid1 = objLive1["objectGUID"][0]
self.samdb.delete(usr1)
objDeleted1 = self.search_guid(guid1)
self.restore_deleted_object(self.samdb, objDeleted1.dn, usr1, {"url": "www.samba.org"})
objLive2 = self.search_dn(usr1)
self.assertEqual(str(objLive2["url"][0]), "www.samba.org")
samba.tests.delete_force(self.samdb, usr1)
def test_undelete_newuser(self):
print("Testing undelete user with a different dn")
usr1 = "cn=testuser,cn=users," + self.base_dn
usr2 = "cn=testuser2,cn=users," + self.base_dn
samba.tests.delete_force(self.samdb, usr1)
self.samdb.add({
"dn": usr1,
"objectclass": "user",
"description": "test user description",
"samaccountname": "testuser"})
objLive1 = self.search_dn(usr1)
guid1 = objLive1["objectGUID"][0]
self.samdb.delete(usr1)
objDeleted1 = self.search_guid(guid1)
self.restore_deleted_object(self.samdb, objDeleted1.dn, usr2)
objLive2 = self.search_dn(usr2)
samba.tests.delete_force(self.samdb, usr1)
samba.tests.delete_force(self.samdb, usr2)
def test_undelete_existing(self):
print("Testing undelete user after a user with the same dn has been created")
usr1 = "cn=testuser,cn=users," + self.base_dn
self.samdb.add({
"dn": usr1,
"objectclass": "user",
"description": "test user description",
"samaccountname": "testuser"})
objLive1 = self.search_dn(usr1)
guid1 = objLive1["objectGUID"][0]
self.samdb.delete(usr1)
self.samdb.add({
"dn": usr1,
"objectclass": "user",
"description": "test user description",
"samaccountname": "testuser"})
objDeleted1 = self.search_guid(guid1)
try:
self.restore_deleted_object(self.samdb, objDeleted1.dn, usr1)
self.fail()
except LdbError as e3:
(num, _) = e3.args
self.assertEqual(num, ERR_ENTRY_ALREADY_EXISTS)
def test_undelete_cross_nc(self):
print("Cross NC undelete")
c1 = "cn=ldaptestcontainer," + self.base_dn
c2 = "cn=ldaptestcontainer2," + self.configuration_dn
c3 = "cn=ldaptestcontainer," + self.configuration_dn
c4 = "cn=ldaptestcontainer2," + self.base_dn
samba.tests.delete_force(self.samdb, c1)
samba.tests.delete_force(self.samdb, c2)
samba.tests.delete_force(self.samdb, c3)
samba.tests.delete_force(self.samdb, c4)
self.samdb.add({
"dn": c1,
"objectclass": "container"})
self.samdb.add({
"dn": c2,
"objectclass": "container"})
objLive1 = self.search_dn(c1)
objLive2 = self.search_dn(c2)
guid1 = objLive1["objectGUID"][0]
guid2 = objLive2["objectGUID"][0]
self.samdb.delete(c1)
self.samdb.delete(c2)
objDeleted1 = self.search_guid(guid1)
objDeleted2 = self.search_guid(guid2)
# try to undelete from base dn to config
try:
self.restore_deleted_object(self.samdb, objDeleted1.dn, c3)
self.fail()
except LdbError as e4:
(num, _) = e4.args
self.assertEqual(num, ERR_OPERATIONS_ERROR)
# try to undelete from config to base dn
try:
self.restore_deleted_object(self.samdb, objDeleted2.dn, c4)
self.fail()
except LdbError as e5:
(num, _) = e5.args
self.assertEqual(num, ERR_OPERATIONS_ERROR)
# assert undeletion will work in same nc
self.restore_deleted_object(self.samdb, objDeleted1.dn, c4)
self.restore_deleted_object(self.samdb, objDeleted2.dn, c3)
class RestoreUserObjectTestCase(RestoredObjectAttributesBaseTestCase):
"""Test cases for delete/reanimate user objects"""
def _expected_user_add_attributes(self, username, user_dn, category):
return {'dn': user_dn,
'objectClass': '**',
'cn': username,
'distinguishedName': user_dn,
'instanceType': '4',
'whenCreated': '**',
'whenChanged': '**',
'uSNCreated': '**',
'uSNChanged': '**',
'name': username,
'objectGUID': '**',
'userAccountControl': '546',
'badPwdCount': '0',
'badPasswordTime': '0',
'codePage': '0',
'countryCode': '0',
'lastLogon': '0',
'lastLogoff': '0',
'pwdLastSet': '0',
'primaryGroupID': '513',
'objectSid': '**',
'accountExpires': '9223372036854775807',
'logonCount': '0',
'sAMAccountName': username,
'sAMAccountType': '805306368',
'objectCategory': 'CN=%s,%s' % (category, self.schema_dn)
}
def _expected_user_add_metadata(self):
return [
(DRSUAPI_ATTID_objectClass, 1),
(DRSUAPI_ATTID_cn, 1),
(DRSUAPI_ATTID_instanceType, 1),
(DRSUAPI_ATTID_whenCreated, 1),
(DRSUAPI_ATTID_ntSecurityDescriptor, 1),
(DRSUAPI_ATTID_name, 1),
(DRSUAPI_ATTID_userAccountControl, None),
(DRSUAPI_ATTID_codePage, 1),
(DRSUAPI_ATTID_countryCode, 1),
(DRSUAPI_ATTID_dBCSPwd, 1),
(DRSUAPI_ATTID_logonHours, 1),
(DRSUAPI_ATTID_unicodePwd, 1),
(DRSUAPI_ATTID_ntPwdHistory, 1),
(DRSUAPI_ATTID_pwdLastSet, 1),
(DRSUAPI_ATTID_primaryGroupID, 1),
(DRSUAPI_ATTID_objectSid, 1),
(DRSUAPI_ATTID_accountExpires, 1),
(DRSUAPI_ATTID_lmPwdHistory, 1),
(DRSUAPI_ATTID_sAMAccountName, 1),
(DRSUAPI_ATTID_sAMAccountType, 1),
(DRSUAPI_ATTID_objectCategory, 1)]
def _expected_user_del_attributes(self, username, _guid, _sid):
guid = ndr_unpack(misc.GUID, _guid)
dn = "CN=%s\\0ADEL:%s,CN=Deleted Objects,%s" % (username, guid, self.base_dn)
cn = "%s\nDEL:%s" % (username, guid)
return {'dn': dn,
'objectClass': '**',
'cn': cn,
'distinguishedName': dn,
'isDeleted': 'TRUE',
'isRecycled': 'TRUE',
'instanceType': '4',
'whenCreated': '**',
'whenChanged': '**',
'uSNCreated': '**',
'uSNChanged': '**',
'name': cn,
'objectGUID': _guid,
'userAccountControl': '546',
'objectSid': _sid,
'sAMAccountName': username,
'lastKnownParent': 'CN=Users,%s' % self.base_dn,
}
def _expected_user_del_metadata(self):
return [
(DRSUAPI_ATTID_objectClass, 1),
(DRSUAPI_ATTID_cn, 2),
(DRSUAPI_ATTID_instanceType, 1),
(DRSUAPI_ATTID_whenCreated, 1),
(DRSUAPI_ATTID_isDeleted, 1),
(DRSUAPI_ATTID_ntSecurityDescriptor, 1),
(DRSUAPI_ATTID_name, 2),
(DRSUAPI_ATTID_userAccountControl, None),
(DRSUAPI_ATTID_codePage, 2),
(DRSUAPI_ATTID_countryCode, 2),
(DRSUAPI_ATTID_dBCSPwd, 1),
(DRSUAPI_ATTID_logonHours, 1),
(DRSUAPI_ATTID_unicodePwd, 1),
(DRSUAPI_ATTID_ntPwdHistory, 1),
(DRSUAPI_ATTID_pwdLastSet, 2),
(DRSUAPI_ATTID_primaryGroupID, 2),
(DRSUAPI_ATTID_objectSid, 1),
(DRSUAPI_ATTID_accountExpires, 2),
(DRSUAPI_ATTID_lmPwdHistory, 1),
(DRSUAPI_ATTID_sAMAccountName, 1),
(DRSUAPI_ATTID_sAMAccountType, 2),
(DRSUAPI_ATTID_lastKnownParent, 1),
(DRSUAPI_ATTID_objectCategory, 2),
(DRSUAPI_ATTID_isRecycled, 1)]
def _expected_user_restore_attributes(self, username, guid, sid, user_dn, category):
return {'dn': user_dn,
'objectClass': '**',
'cn': username,
'distinguishedName': user_dn,
'instanceType': '4',
'whenCreated': '**',
'whenChanged': '**',
'uSNCreated': '**',
'uSNChanged': '**',
'name': username,
'objectGUID': guid,
'userAccountControl': '546',
'badPwdCount': '0',
'badPasswordTime': '0',
'codePage': '0',
'countryCode': '0',
'lastLogon': '0',
'lastLogoff': '0',
'pwdLastSet': '0',
'primaryGroupID': '513',
'operatorCount': '0',
'objectSid': sid,
'adminCount': '0',
'accountExpires': '0',
'logonCount': '0',
'sAMAccountName': username,
'sAMAccountType': '805306368',
'lastKnownParent': 'CN=Users,%s' % self.base_dn,
'objectCategory': 'CN=%s,%s' % (category, self.schema_dn)
}
def _expected_user_restore_metadata(self):
return [
(DRSUAPI_ATTID_objectClass, 1),
(DRSUAPI_ATTID_cn, 3),
(DRSUAPI_ATTID_instanceType, 1),
(DRSUAPI_ATTID_whenCreated, 1),
(DRSUAPI_ATTID_isDeleted, 2),
(DRSUAPI_ATTID_ntSecurityDescriptor, 1),
(DRSUAPI_ATTID_name, 3),
(DRSUAPI_ATTID_userAccountControl, None),
(DRSUAPI_ATTID_codePage, 3),
(DRSUAPI_ATTID_countryCode, 3),
(DRSUAPI_ATTID_dBCSPwd, 1),
(DRSUAPI_ATTID_logonHours, 1),
(DRSUAPI_ATTID_unicodePwd, 1),
(DRSUAPI_ATTID_ntPwdHistory, 1),
(DRSUAPI_ATTID_pwdLastSet, 3),
(DRSUAPI_ATTID_primaryGroupID, 3),
(DRSUAPI_ATTID_operatorCount, 1),
(DRSUAPI_ATTID_objectSid, 1),
(DRSUAPI_ATTID_adminCount, 1),
(DRSUAPI_ATTID_accountExpires, 3),
(DRSUAPI_ATTID_lmPwdHistory, 1),
(DRSUAPI_ATTID_sAMAccountName, 1),
(DRSUAPI_ATTID_sAMAccountType, 3),
(DRSUAPI_ATTID_lastKnownParent, 1),
(DRSUAPI_ATTID_objectCategory, 3),
(DRSUAPI_ATTID_isRecycled, 2)]
def test_restore_user(self):
print("Test restored user attributes")
username = "restore_user"
usr_dn = "CN=%s,CN=Users,%s" % (username, self.base_dn)
samba.tests.delete_force(self.samdb, usr_dn)
self.samdb.add({
"dn": usr_dn,
"objectClass": "user",
"sAMAccountName": username})
obj = self.search_dn(usr_dn)
guid = obj["objectGUID"][0]
sid = obj["objectSID"][0]
obj_rmd = self.search_guid(guid, attrs=["replPropertyMetaData"])
self.assertAttributesExists(self._expected_user_add_attributes(username, usr_dn, "Person"), obj)
self._check_metadata(obj_rmd["replPropertyMetaData"],
self._expected_user_add_metadata())
self.samdb.delete(usr_dn)
obj_del = self.search_guid(guid)
obj_del_rmd = self.search_guid(guid, attrs=["replPropertyMetaData"])
orig_attrs = set(obj.keys())
del_attrs = set(obj_del.keys())
self.assertAttributesExists(self._expected_user_del_attributes(username, guid, sid), obj_del)
self._check_metadata(obj_del_rmd["replPropertyMetaData"],
self._expected_user_del_metadata())
# restore the user and fetch what's restored
self.restore_deleted_object(self.samdb, obj_del.dn, usr_dn)
obj_restore = self.search_guid(guid)
obj_restore_rmd = self.search_guid(guid, attrs=["replPropertyMetaData"])
# check original attributes and restored one are same
orig_attrs = set(obj.keys())
# windows restore more attributes that originally we have
orig_attrs.update(['adminCount', 'operatorCount', 'lastKnownParent'])
rest_attrs = set(obj_restore.keys())
self.assertAttributesExists(self._expected_user_restore_attributes(username, guid, sid, usr_dn, "Person"), obj_restore)
self._check_metadata(obj_restore_rmd["replPropertyMetaData"],
self._expected_user_restore_metadata())
class RestoreUserPwdObjectTestCase(RestoredObjectAttributesBaseTestCase):
"""Test cases for delete/reanimate user objects with password"""
def _expected_userpw_add_attributes(self, username, user_dn, category):
return {'dn': user_dn,
'objectClass': '**',
'cn': username,
'distinguishedName': user_dn,
'instanceType': '4',
'whenCreated': '**',
'whenChanged': '**',
'uSNCreated': '**',
'uSNChanged': '**',
'name': username,
'objectGUID': '**',
'userAccountControl': '546',
'badPwdCount': '0',
'badPasswordTime': '0',
'codePage': '0',
'countryCode': '0',
'lastLogon': '0',
'lastLogoff': '0',
'pwdLastSet': '**',
'primaryGroupID': '513',
'objectSid': '**',
'accountExpires': '9223372036854775807',
'logonCount': '0',
'sAMAccountName': username,
'sAMAccountType': '805306368',
'objectCategory': 'CN=%s,%s' % (category, self.schema_dn)
}
def _expected_userpw_add_metadata(self):
return [
(DRSUAPI_ATTID_objectClass, 1),
(DRSUAPI_ATTID_cn, 1),
(DRSUAPI_ATTID_instanceType, 1),
(DRSUAPI_ATTID_whenCreated, 1),
(DRSUAPI_ATTID_ntSecurityDescriptor, 1),
(DRSUAPI_ATTID_name, 1),
(DRSUAPI_ATTID_userAccountControl, None),
(DRSUAPI_ATTID_codePage, 1),
(DRSUAPI_ATTID_countryCode, 1),
(DRSUAPI_ATTID_dBCSPwd, 1),
(DRSUAPI_ATTID_logonHours, 1),
(DRSUAPI_ATTID_unicodePwd, 1),
(DRSUAPI_ATTID_ntPwdHistory, 1),
(DRSUAPI_ATTID_pwdLastSet, 1),
(DRSUAPI_ATTID_primaryGroupID, 1),
(DRSUAPI_ATTID_supplementalCredentials, 1),
(DRSUAPI_ATTID_objectSid, 1),
(DRSUAPI_ATTID_accountExpires, 1),
(DRSUAPI_ATTID_lmPwdHistory, 1),
(DRSUAPI_ATTID_sAMAccountName, 1),
(DRSUAPI_ATTID_sAMAccountType, 1),
(DRSUAPI_ATTID_objectCategory, 1)]
def _expected_userpw_del_attributes(self, username, _guid, _sid):
guid = ndr_unpack(misc.GUID, _guid)
dn = "CN=%s\\0ADEL:%s,CN=Deleted Objects,%s" % (username, guid, self.base_dn)
cn = "%s\nDEL:%s" % (username, guid)
return {'dn': dn,
'objectClass': '**',
'cn': cn,
'distinguishedName': dn,
'isDeleted': 'TRUE',
'isRecycled': 'TRUE',
'instanceType': '4',
'whenCreated': '**',
'whenChanged': '**',
'uSNCreated': '**',
'uSNChanged': '**',
'name': cn,
'objectGUID': _guid,
'userAccountControl': '546',
'objectSid': _sid,
'sAMAccountName': username,
'lastKnownParent': 'CN=Users,%s' % self.base_dn,
}
def _expected_userpw_del_metadata(self):
return [
(DRSUAPI_ATTID_objectClass, 1),
(DRSUAPI_ATTID_cn, 2),
(DRSUAPI_ATTID_instanceType, 1),
(DRSUAPI_ATTID_whenCreated, 1),
(DRSUAPI_ATTID_isDeleted, 1),
(DRSUAPI_ATTID_ntSecurityDescriptor, 1),
(DRSUAPI_ATTID_name, 2),
(DRSUAPI_ATTID_userAccountControl, None),
(DRSUAPI_ATTID_codePage, 2),
(DRSUAPI_ATTID_countryCode, 2),
(DRSUAPI_ATTID_dBCSPwd, 1),
(DRSUAPI_ATTID_logonHours, 1),
(DRSUAPI_ATTID_unicodePwd, 2),
(DRSUAPI_ATTID_ntPwdHistory, 2),
(DRSUAPI_ATTID_pwdLastSet, 2),
(DRSUAPI_ATTID_primaryGroupID, 2),
(DRSUAPI_ATTID_supplementalCredentials, 2),
(DRSUAPI_ATTID_objectSid, 1),
(DRSUAPI_ATTID_accountExpires, 2),
(DRSUAPI_ATTID_lmPwdHistory, 2),
(DRSUAPI_ATTID_sAMAccountName, 1),
(DRSUAPI_ATTID_sAMAccountType, 2),
(DRSUAPI_ATTID_lastKnownParent, 1),
(DRSUAPI_ATTID_objectCategory, 2),
(DRSUAPI_ATTID_isRecycled, 1)]
def _expected_userpw_restore_attributes(self, username, guid, sid, user_dn, category):
return {'dn': user_dn,
'objectClass': '**',
'cn': username,
'distinguishedName': user_dn,
'instanceType': '4',
'whenCreated': '**',
'whenChanged': '**',
'uSNCreated': '**',
'uSNChanged': '**',
'name': username,
'objectGUID': guid,
'userAccountControl': '546',
'badPwdCount': '0',
'badPasswordTime': '0',
'codePage': '0',
'countryCode': '0',
'lastLogon': '0',
'lastLogoff': '0',
'pwdLastSet': '**',
'primaryGroupID': '513',
'operatorCount': '0',
'objectSid': sid,
'adminCount': '0',
'accountExpires': '0',
'logonCount': '0',
'sAMAccountName': username,
'sAMAccountType': '805306368',
'lastKnownParent': 'CN=Users,%s' % self.base_dn,
'objectCategory': 'CN=%s,%s' % (category, self.schema_dn)
}
def _expected_userpw_restore_metadata(self):
return [
(DRSUAPI_ATTID_objectClass, 1),
(DRSUAPI_ATTID_cn, 3),
(DRSUAPI_ATTID_instanceType, 1),
(DRSUAPI_ATTID_whenCreated, 1),
(DRSUAPI_ATTID_isDeleted, 2),
(DRSUAPI_ATTID_ntSecurityDescriptor, 1),
(DRSUAPI_ATTID_name, 3),
(DRSUAPI_ATTID_userAccountControl, None),
(DRSUAPI_ATTID_codePage, 3),
(DRSUAPI_ATTID_countryCode, 3),
(DRSUAPI_ATTID_dBCSPwd, 2),
(DRSUAPI_ATTID_logonHours, 1),
(DRSUAPI_ATTID_unicodePwd, 3),
(DRSUAPI_ATTID_ntPwdHistory, 3),
(DRSUAPI_ATTID_pwdLastSet, 4),
(DRSUAPI_ATTID_primaryGroupID, 3),
(DRSUAPI_ATTID_supplementalCredentials, 3),
(DRSUAPI_ATTID_operatorCount, 1),
(DRSUAPI_ATTID_objectSid, 1),
(DRSUAPI_ATTID_adminCount, 1),
(DRSUAPI_ATTID_accountExpires, 3),
(DRSUAPI_ATTID_lmPwdHistory, 3),
(DRSUAPI_ATTID_sAMAccountName, 1),
(DRSUAPI_ATTID_sAMAccountType, 3),
(DRSUAPI_ATTID_lastKnownParent, 1),
(DRSUAPI_ATTID_objectCategory, 3),
(DRSUAPI_ATTID_isRecycled, 2)]
def test_restorepw_user(self):
print("Test restored user attributes")
username = "restorepw_user"
usr_dn = "CN=%s,CN=Users,%s" % (username, self.base_dn)
samba.tests.delete_force(self.samdb, usr_dn)
self.samdb.add({
"dn": usr_dn,
"objectClass": "user",
"userPassword": "thatsAcomplPASS0",
"sAMAccountName": username})
obj = self.search_dn(usr_dn)
guid = obj["objectGUID"][0]
sid = obj["objectSID"][0]
obj_rmd = self.search_guid(guid, attrs=["replPropertyMetaData"])
self.assertAttributesExists(self._expected_userpw_add_attributes(username, usr_dn, "Person"), obj)
self._check_metadata(obj_rmd["replPropertyMetaData"],
self._expected_userpw_add_metadata())
self.samdb.delete(usr_dn)
obj_del = self.search_guid(guid)
obj_del_rmd = self.search_guid(guid, attrs=["replPropertyMetaData"])
orig_attrs = set(obj.keys())
del_attrs = set(obj_del.keys())
self.assertAttributesExists(self._expected_userpw_del_attributes(username, guid, sid), obj_del)
self._check_metadata(obj_del_rmd["replPropertyMetaData"],
self._expected_userpw_del_metadata())
# restore the user and fetch what's restored
self.restore_deleted_object(self.samdb, obj_del.dn, usr_dn, {"userPassword": ["thatsAcomplPASS1"]})
obj_restore = self.search_guid(guid)
obj_restore_rmd = self.search_guid(guid, attrs=["replPropertyMetaData"])
# check original attributes and restored one are same
orig_attrs = set(obj.keys())
# windows restore more attributes that originally we have
orig_attrs.update(['adminCount', 'operatorCount', 'lastKnownParent'])
rest_attrs = set(obj_restore.keys())
self.assertAttributesExists(self._expected_userpw_restore_attributes(username, guid, sid, usr_dn, "Person"), obj_restore)
self._check_metadata(obj_restore_rmd["replPropertyMetaData"],
self._expected_userpw_restore_metadata())
class RestoreGroupObjectTestCase(RestoredObjectAttributesBaseTestCase):
"""Test different scenarios for delete/reanimate group objects"""
def _make_object_dn(self, name):
return "CN=%s,CN=Users,%s" % (name, self.base_dn)
def _create_test_user(self, user_name):
user_dn = self._make_object_dn(user_name)
ldif = {
"dn": user_dn,
"objectClass": "user",
"sAMAccountName": user_name,
}
# delete an object if leftover from previous test
samba.tests.delete_force(self.samdb, user_dn)
# finally, create the group
self.samdb.add(ldif)
return self.search_dn(user_dn)
def _create_test_group(self, group_name, members=None):
group_dn = self._make_object_dn(group_name)
ldif = {
"dn": group_dn,
"objectClass": "group",
"sAMAccountName": group_name,
}
try:
ldif["member"] = [str(usr_dn) for usr_dn in members]
except TypeError:
pass
# delete an object if leftover from previous test
samba.tests.delete_force(self.samdb, group_dn)
# finally, create the group
self.samdb.add(ldif)
return self.search_dn(group_dn)
def _expected_group_attributes(self, groupname, group_dn, category):
return {'dn': group_dn,
'groupType': '-2147483646',
'distinguishedName': group_dn,
'sAMAccountName': groupname,
'name': groupname,
'objectCategory': 'CN=%s,%s' % (category, self.schema_dn),
'objectClass': '**',
'objectGUID': '**',
'lastKnownParent': 'CN=Users,%s' % self.base_dn,
'whenChanged': '**',
'sAMAccountType': '268435456',
'objectSid': '**',
'whenCreated': '**',
'uSNCreated': '**',
'operatorCount': '0',
'uSNChanged': '**',
'instanceType': '4',
'adminCount': '0',
'cn': groupname}
def test_plain_group(self):
print("Test restored Group attributes")
# create test group
obj = self._create_test_group("r_group")
guid = obj["objectGUID"][0]
# delete the group
self.samdb.delete(str(obj.dn))
obj_del = self.search_guid(guid)
# restore the Group and fetch what's restored
self.restore_deleted_object(self.samdb, obj_del.dn, obj.dn)
obj_restore = self.search_guid(guid)
# check original attributes and restored one are same
attr_orig = set(obj.keys())
# windows restore more attributes that originally we have
attr_orig.update(['adminCount', 'operatorCount', 'lastKnownParent'])
attr_rest = set(obj_restore.keys())
self.assertAttributesEqual(obj, attr_orig, obj_restore, attr_rest)
self.assertAttributesExists(self._expected_group_attributes("r_group", str(obj.dn), "Group"), obj_restore)
def test_group_with_members(self):
print("Test restored Group with members attributes")
# create test group
usr1 = self._create_test_user("r_user_1")
usr2 = self._create_test_user("r_user_2")
obj = self._create_test_group("r_group", [usr1.dn, usr2.dn])
guid = obj["objectGUID"][0]
# delete the group
self.samdb.delete(str(obj.dn))
obj_del = self.search_guid(guid)
# restore the Group and fetch what's restored
self.restore_deleted_object(self.samdb, obj_del.dn, obj.dn)
obj_restore = self.search_guid(guid)
# check original attributes and restored one are same
attr_orig = set(obj.keys())
# windows restore more attributes that originally we have
attr_orig.update(['adminCount', 'operatorCount', 'lastKnownParent'])
# and does not restore following attributes
attr_orig.remove("member")
attr_rest = set(obj_restore.keys())
self.assertAttributesEqual(obj, attr_orig, obj_restore, attr_rest)
self.assertAttributesExists(self._expected_group_attributes("r_group", str(obj.dn), "Group"), obj_restore)
class RestoreContainerObjectTestCase(RestoredObjectAttributesBaseTestCase):
"""Test different scenarios for delete/reanimate OU/container objects"""
def _expected_container_attributes(self, rdn, name, dn, category):
if rdn == 'OU':
lastKnownParent = '%s' % self.base_dn
else:
lastKnownParent = 'CN=Users,%s' % self.base_dn
return {'dn': dn,
'distinguishedName': dn,
'name': name,
'objectCategory': 'CN=%s,%s' % (category, self.schema_dn),
'objectClass': '**',
'objectGUID': '**',
'lastKnownParent': lastKnownParent,
'whenChanged': '**',
'whenCreated': '**',
'uSNCreated': '**',
'uSNChanged': '**',
'instanceType': '4',
rdn.lower(): name}
def _create_test_ou(self, rdn, name=None, description=None):
ou_dn = "OU=%s,%s" % (rdn, self.base_dn)
# delete an object if leftover from previous test
samba.tests.delete_force(self.samdb, ou_dn)
# create ou and return created object
self.samdb.create_ou(ou_dn, name=name, description=description)
return self.search_dn(ou_dn)
def test_ou_with_name_description(self):
print("Test OU reanimation")
# create OU to test with
obj = self._create_test_ou(rdn="r_ou",
name="r_ou name",
description="r_ou description")
guid = obj["objectGUID"][0]
# delete the object
self.samdb.delete(str(obj.dn))
obj_del = self.search_guid(guid)
# restore the Object and fetch what's restored
self.restore_deleted_object(self.samdb, obj_del.dn, obj.dn)
obj_restore = self.search_guid(guid)
# check original attributes and restored one are same
attr_orig = set(obj.keys())
attr_rest = set(obj_restore.keys())
# windows restore more attributes that originally we have
attr_orig.update(["lastKnownParent"])
# and does not restore following attributes
attr_orig -= set(["description"])
self.assertAttributesEqual(obj, attr_orig, obj_restore, attr_rest)
expected_attrs = self._expected_container_attributes("OU", "r_ou", str(obj.dn), "Organizational-Unit")
self.assertAttributesExists(expected_attrs, obj_restore)
def test_container(self):
print("Test Container reanimation")
# create test Container
obj = self._create_object({
"dn": "CN=r_container,CN=Users,%s" % self.base_dn,
"objectClass": "container"
})
guid = obj["objectGUID"][0]
# delete the object
self.samdb.delete(str(obj.dn))
obj_del = self.search_guid(guid)
# restore the Object and fetch what's restored
self.restore_deleted_object(self.samdb, obj_del.dn, obj.dn)
obj_restore = self.search_guid(guid)
# check original attributes and restored one are same
attr_orig = set(obj.keys())
attr_rest = set(obj_restore.keys())
# windows restore more attributes that originally we have
attr_orig.update(["lastKnownParent"])
# and does not restore following attributes
attr_orig -= set(["showInAdvancedViewOnly"])
self.assertAttributesEqual(obj, attr_orig, obj_restore, attr_rest)
expected_attrs = self._expected_container_attributes("CN", "r_container",
str(obj.dn), "Container")
self.assertAttributesExists(expected_attrs, obj_restore)
if __name__ == '__main__':
unittest.main()
|
pwoodworth/intellij-community
|
refs/heads/master
|
python/testData/refactoring/extractsuperclass/importNotBroken.before.py
|
80
|
from shared import SharedClass
class Source(SharedClass):
pass
|
ojengwa/sympy
|
refs/heads/master
|
sympy/functions/special/tests/test_hyper.py
|
6
|
from sympy import (hyper, meijerg, S, Tuple, pi, I, exp, log,
cos, sqrt, symbols, oo, Derivative, gamma, O)
from sympy.series.limits import limit
from sympy.abc import x, z, k
from sympy.utilities.pytest import raises
from sympy.utilities.randtest import (
random_complex_number as randcplx,
test_numerically as tn,
test_derivative_numerically as td)
def test_TupleParametersBase():
# test that our implementation of the chain rule works
p = hyper((), (), z**2)
assert p.diff(z) == p*2*z
def test_hyper():
raises(TypeError, lambda: hyper(1, 2, z))
assert hyper((1, 2), (1,), z) == hyper(Tuple(1, 2), Tuple(1), z)
h = hyper((1, 2), (3, 4, 5), z)
assert h.ap == Tuple(1, 2)
assert h.bq == Tuple(3, 4, 5)
assert h.argument == z
assert h.is_commutative is True
# just a few checks to make sure that all arguments go where they should
assert tn(hyper(Tuple(), Tuple(), z), exp(z), z)
assert tn(z*hyper((1, 1), Tuple(2), -z), log(1 + z), z)
# differentiation
h = hyper(
(randcplx(), randcplx(), randcplx()), (randcplx(), randcplx()), z)
assert td(h, z)
a1, a2, b1, b2, b3 = symbols('a1:3, b1:4')
assert hyper((a1, a2), (b1, b2, b3), z).diff(z) == \
a1*a2/(b1*b2*b3) * hyper((a1 + 1, a2 + 1), (b1 + 1, b2 + 1, b3 + 1), z)
# differentiation wrt parameters is not supported
assert hyper([z], [], z).diff(z) == Derivative(hyper([z], [], z), z)
# hyper is unbranched wrt parameters
from sympy import polar_lift
assert hyper([polar_lift(z)], [polar_lift(k)], polar_lift(x)) == \
hyper([z], [k], polar_lift(x))
def test_expand_func():
# evaluation at 1 of Gauss' hypergeometric function:
from sympy.abc import a, b, c
from sympy import gamma, expand_func
a1, b1, c1 = randcplx(), randcplx(), randcplx() + 5
assert expand_func(hyper([a, b], [c], 1)) == \
gamma(c)*gamma(-a - b + c)/(gamma(-a + c)*gamma(-b + c))
assert abs(expand_func(hyper([a1, b1], [c1], 1)).n()
- hyper([a1, b1], [c1], 1).n()) < 1e-10
# hyperexpand wrapper for hyper:
assert expand_func(hyper([], [], z)) == exp(z)
assert expand_func(hyper([1, 2, 3], [], z)) == hyper([1, 2, 3], [], z)
assert expand_func(meijerg([[1, 1], []], [[1], [0]], z)) == log(z + 1)
assert expand_func(meijerg([[1, 1], []], [[], []], z)) == \
meijerg([[1, 1], []], [[], []], z)
def replace_dummy(expr, sym):
from sympy import Dummy
dum = expr.atoms(Dummy)
if not dum:
return expr
assert len(dum) == 1
return expr.xreplace({dum.pop(): sym})
def test_hyper_rewrite_sum():
from sympy import RisingFactorial, factorial, Dummy, Sum
_k = Dummy("k")
assert replace_dummy(hyper((1, 2), (1, 3), x).rewrite(Sum), _k) == \
Sum(x**_k / factorial(_k) * RisingFactorial(2, _k) /
RisingFactorial(3, _k), (_k, 0, oo))
assert hyper((1, 2, 3), (-1, 3), z).rewrite(Sum) == \
hyper((1, 2, 3), (-1, 3), z)
def test_radius_of_convergence():
assert hyper((1, 2), [3], z).radius_of_convergence == 1
assert hyper((1, 2), [3, 4], z).radius_of_convergence == oo
assert hyper((1, 2, 3), [4], z).radius_of_convergence == 0
assert hyper((0, 1, 2), [4], z).radius_of_convergence == oo
assert hyper((-1, 1, 2), [-4], z).radius_of_convergence == 0
assert hyper((-1, -2, 2), [-1], z).radius_of_convergence == oo
assert hyper((-1, 2), [-1, -2], z).radius_of_convergence == 0
assert hyper([-1, 1, 3], [-2, 2], z).radius_of_convergence == 1
assert hyper([-1, 1], [-2, 2], z).radius_of_convergence == oo
assert hyper([-1, 1, 3], [-2], z).radius_of_convergence == 0
assert hyper((-1, 2, 3, 4), [], z).radius_of_convergence == oo
assert hyper([1, 1], [3], 1).convergence_statement == True
assert hyper([1, 1], [2], 1).convergence_statement == False
assert hyper([1, 1], [2], -1).convergence_statement == True
assert hyper([1, 1], [1], -1).convergence_statement == False
def test_meijer():
raises(TypeError, lambda: meijerg(1, z))
raises(TypeError, lambda: meijerg(((1,), (2,)), (3,), (4,), z))
assert meijerg(((1, 2), (3,)), ((4,), (5,)), z) == \
meijerg(Tuple(1, 2), Tuple(3), Tuple(4), Tuple(5), z)
g = meijerg((1, 2), (3, 4, 5), (6, 7, 8, 9), (10, 11, 12, 13, 14), z)
assert g.an == Tuple(1, 2)
assert g.ap == Tuple(1, 2, 3, 4, 5)
assert g.aother == Tuple(3, 4, 5)
assert g.bm == Tuple(6, 7, 8, 9)
assert g.bq == Tuple(6, 7, 8, 9, 10, 11, 12, 13, 14)
assert g.bother == Tuple(10, 11, 12, 13, 14)
assert g.argument == z
assert g.nu == 75
assert g.delta == -1
assert g.is_commutative is True
assert meijerg([1, 2], [3], [4], [5], z).delta == S(1)/2
# just a few checks to make sure that all arguments go where they should
assert tn(meijerg(Tuple(), Tuple(), Tuple(0), Tuple(), -z), exp(z), z)
assert tn(sqrt(pi)*meijerg(Tuple(), Tuple(),
Tuple(0), Tuple(S(1)/2), z**2/4), cos(z), z)
assert tn(meijerg(Tuple(1, 1), Tuple(), Tuple(1), Tuple(0), z),
log(1 + z), z)
# differentiation
g = meijerg((randcplx(),), (randcplx() + 2*I,), Tuple(),
(randcplx(), randcplx()), z)
assert td(g, z)
g = meijerg(Tuple(), (randcplx(),), Tuple(),
(randcplx(), randcplx()), z)
assert td(g, z)
g = meijerg(Tuple(), Tuple(), Tuple(randcplx()),
Tuple(randcplx(), randcplx()), z)
assert td(g, z)
a1, a2, b1, b2, c1, c2, d1, d2 = symbols('a1:3, b1:3, c1:3, d1:3')
assert meijerg((a1, a2), (b1, b2), (c1, c2), (d1, d2), z).diff(z) == \
(meijerg((a1 - 1, a2), (b1, b2), (c1, c2), (d1, d2), z)
+ (a1 - 1)*meijerg((a1, a2), (b1, b2), (c1, c2), (d1, d2), z))/z
assert meijerg([z, z], [], [], [], z).diff(z) == \
Derivative(meijerg([z, z], [], [], [], z), z)
# meijerg is unbranched wrt parameters
from sympy import polar_lift as pl
assert meijerg([pl(a1)], [pl(a2)], [pl(b1)], [pl(b2)], pl(z)) == \
meijerg([a1], [a2], [b1], [b2], pl(z))
# integrand
from sympy.abc import a, b, c, d, s
assert meijerg([a], [b], [c], [d], z).integrand(s) == \
z**s*gamma(c - s)*gamma(-a + s + 1)/(gamma(b - s)*gamma(-d + s + 1))
def test_meijerg_derivative():
assert meijerg([], [1, 1], [0, 0, x], [], z).diff(x) == \
log(z)*meijerg([], [1, 1], [0, 0, x], [], z) \
+ 2*meijerg([], [1, 1, 1], [0, 0, x, 0], [], z)
y = randcplx()
a = 5 # mpmath chokes with non-real numbers, and Mod1 with floats
assert td(meijerg([x], [], [], [], y), x)
assert td(meijerg([x**2], [], [], [], y), x)
assert td(meijerg([], [x], [], [], y), x)
assert td(meijerg([], [], [x], [], y), x)
assert td(meijerg([], [], [], [x], y), x)
assert td(meijerg([x], [a], [a + 1], [], y), x)
assert td(meijerg([x], [a + 1], [a], [], y), x)
assert td(meijerg([x, a], [], [], [a + 1], y), x)
assert td(meijerg([x, a + 1], [], [], [a], y), x)
b = S(3)/2
assert td(meijerg([a + 2], [b], [b - 3, x], [a], y), x)
def test_meijerg_period():
assert meijerg([], [1], [0], [], x).get_period() == 2*pi
assert meijerg([1], [], [], [0], x).get_period() == 2*pi
assert meijerg([], [], [0], [], x).get_period() == 2*pi # exp(x)
assert meijerg(
[], [], [0], [S(1)/2], x).get_period() == 2*pi # cos(sqrt(x))
assert meijerg(
[], [], [S(1)/2], [0], x).get_period() == 4*pi # sin(sqrt(x))
assert meijerg([1, 1], [], [1], [0], x).get_period() == oo # log(1 + x)
def test_hyper_unpolarify():
from sympy import exp_polar
a = exp_polar(2*pi*I)*x
b = x
assert hyper([], [], a).argument == b
assert hyper([0], [], a).argument == a
assert hyper([0], [0], a).argument == b
assert hyper([0, 1], [0], a).argument == a
def test_hyperrep():
from sympy.functions.special.hyper import (HyperRep, HyperRep_atanh,
HyperRep_power1, HyperRep_power2, HyperRep_log1, HyperRep_asin1,
HyperRep_asin2, HyperRep_sqrts1, HyperRep_sqrts2, HyperRep_log2,
HyperRep_cosasin, HyperRep_sinasin)
# First test the base class works.
from sympy import Piecewise, exp_polar
a, b, c, d, z = symbols('a b c d z')
class myrep(HyperRep):
@classmethod
def _expr_small(cls, x):
return a
@classmethod
def _expr_small_minus(cls, x):
return b
@classmethod
def _expr_big(cls, x, n):
return c*n
@classmethod
def _expr_big_minus(cls, x, n):
return d*n
assert myrep(z).rewrite('nonrep') == Piecewise((0, abs(z) > 1), (a, True))
assert myrep(exp_polar(I*pi)*z).rewrite('nonrep') == \
Piecewise((0, abs(z) > 1), (b, True))
assert myrep(exp_polar(2*I*pi)*z).rewrite('nonrep') == \
Piecewise((c, abs(z) > 1), (a, True))
assert myrep(exp_polar(3*I*pi)*z).rewrite('nonrep') == \
Piecewise((d, abs(z) > 1), (b, True))
assert myrep(exp_polar(4*I*pi)*z).rewrite('nonrep') == \
Piecewise((2*c, abs(z) > 1), (a, True))
assert myrep(exp_polar(5*I*pi)*z).rewrite('nonrep') == \
Piecewise((2*d, abs(z) > 1), (b, True))
assert myrep(z).rewrite('nonrepsmall') == a
assert myrep(exp_polar(I*pi)*z).rewrite('nonrepsmall') == b
def t(func, hyp, z):
""" Test that func is a valid representation of hyp. """
# First test that func agrees with hyp for small z
if not tn(func.rewrite('nonrepsmall'), hyp, z,
a=S(-1)/2, b=S(-1)/2, c=S(1)/2, d=S(1)/2):
return False
# Next check that the two small representations agree.
if not tn(
func.rewrite('nonrepsmall').subs(
z, exp_polar(I*pi)*z).replace(exp_polar, exp),
func.subs(z, exp_polar(I*pi)*z).rewrite('nonrepsmall'),
z, a=S(-1)/2, b=S(-1)/2, c=S(1)/2, d=S(1)/2):
return False
# Next check continuity along exp_polar(I*pi)*t
expr = func.subs(z, exp_polar(I*pi)*z).rewrite('nonrep')
if abs(expr.subs(z, 1 + 1e-15).n() - expr.subs(z, 1 - 1e-15).n()) > 1e-10:
return False
# Finally check continuity of the big reps.
def dosubs(func, a, b):
rv = func.subs(z, exp_polar(a)*z).rewrite('nonrep')
return rv.subs(z, exp_polar(b)*z).replace(exp_polar, exp)
for n in [0, 1, 2, 3, 4, -1, -2, -3, -4]:
expr1 = dosubs(func, 2*I*pi*n, I*pi/2)
expr2 = dosubs(func, 2*I*pi*n + I*pi, -I*pi/2)
if not tn(expr1, expr2, z):
return False
expr1 = dosubs(func, 2*I*pi*(n + 1), -I*pi/2)
expr2 = dosubs(func, 2*I*pi*n + I*pi, I*pi/2)
if not tn(expr1, expr2, z):
return False
return True
# Now test the various representatives.
a = S(1)/3
assert t(HyperRep_atanh(z), hyper([S(1)/2, 1], [S(3)/2], z), z)
assert t(HyperRep_power1(a, z), hyper([-a], [], z), z)
assert t(HyperRep_power2(a, z), hyper([a, a - S(1)/2], [2*a], z), z)
assert t(HyperRep_log1(z), -z*hyper([1, 1], [2], z), z)
assert t(HyperRep_asin1(z), hyper([S(1)/2, S(1)/2], [S(3)/2], z), z)
assert t(HyperRep_asin2(z), hyper([1, 1], [S(3)/2], z), z)
assert t(HyperRep_sqrts1(a, z), hyper([-a, S(1)/2 - a], [S(1)/2], z), z)
assert t(HyperRep_sqrts2(a, z),
-2*z/(2*a + 1)*hyper([-a - S(1)/2, -a], [S(1)/2], z).diff(z), z)
assert t(HyperRep_log2(z), -z/4*hyper([S(3)/2, 1, 1], [2, 2], z), z)
assert t(HyperRep_cosasin(a, z), hyper([-a, a], [S(1)/2], z), z)
assert t(HyperRep_sinasin(a, z), 2*a*z*hyper([1 - a, 1 + a], [S(3)/2], z), z)
def test_meijerg_eval():
from sympy import besseli, exp_polar
from sympy.abc import l
a = randcplx()
arg = x*exp_polar(k*pi*I)
expr1 = pi*meijerg([[], [(a + 1)/2]], [[a/2], [-a/2, (a + 1)/2]], arg**2/4)
expr2 = besseli(a, arg)
# Test that the two expressions agree for all arguments.
for x_ in [0.5, 1.5]:
for k_ in [0.0, 0.1, 0.3, 0.5, 0.8, 1, 5.751, 15.3]:
assert abs((expr1 - expr2).n(subs={x: x_, k: k_})) < 1e-10
assert abs((expr1 - expr2).n(subs={x: x_, k: -k_})) < 1e-10
# Test continuity independently
eps = 1e-13
expr2 = expr1.subs(k, l)
for x_ in [0.5, 1.5]:
for k_ in [0.5, S(1)/3, 0.25, 0.75, S(2)/3, 1.0, 1.5]:
assert abs((expr1 - expr2).n(
subs={x: x_, k: k_ + eps, l: k_ - eps})) < 1e-10
assert abs((expr1 - expr2).n(
subs={x: x_, k: -k_ + eps, l: -k_ - eps})) < 1e-10
expr = (meijerg(((0.5,), ()), ((0.5, 0, 0.5), ()), exp_polar(-I*pi)/4)
+ meijerg(((0.5,), ()), ((0.5, 0, 0.5), ()), exp_polar(I*pi)/4)) \
/(2*sqrt(pi))
assert (expr - pi/exp(1)).n(chop=True) == 0
def test_limits():
k, x = symbols('k, x')
assert hyper((1,), (S(4)/3, S(5)/3), k**2).series(k) == \
hyper((1,), (S(4)/3, S(5)/3), 0) + \
9*k**2*hyper((2,), (S(7)/3, S(8)/3), 0)/20 + \
81*k**4*hyper((3,), (S(10)/3, S(11)/3), 0)/1120 + \
O(k**6) # issue 6350
assert limit(meijerg((), (), (1,), (0,), -x), x, 0) == \
meijerg(((), ()), ((1,), (0,)), 0) # issue 6052
|
nolanliou/tensorflow
|
refs/heads/master
|
tensorflow/contrib/eager/python/examples/linear_regression/linear_regression_test.py
|
7
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for linear regression example under TensorFlow eager execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import shutil
import tempfile
import time
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from tensorflow.contrib.eager.python.examples.linear_regression import linear_regression
def device():
return "/device:GPU:0" if tfe.num_gpus() > 0 else "/device:CPU:0"
class LinearRegressionTest(tf.test.TestCase):
def setUp(self):
super(LinearRegressionTest, self).setUp()
self._tmp_logdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self._tmp_logdir)
super(LinearRegressionTest, self).tearDown()
def testSyntheticDataset(self):
true_w = tf.random_uniform([3, 1])
true_b = [1.0]
batch_size = 10
num_batches = 2
noise_level = 0.
dataset = linear_regression.synthetic_dataset(true_w, true_b, noise_level,
batch_size, num_batches)
it = tfe.Iterator(dataset)
for _ in range(2):
(xs, ys) = it.next()
self.assertEqual((batch_size, 3), xs.shape)
self.assertEqual((batch_size, 1), ys.shape)
self.assertEqual(tf.float32, xs.dtype)
self.assertEqual(tf.float32, ys.dtype)
with self.assertRaises(StopIteration):
it.next()
def testLinearRegression(self):
true_w = [[1.0], [-0.5], [2.0]]
true_b = [1.0]
model = linear_regression.LinearModel()
dataset = linear_regression.synthetic_dataset(
true_w, true_b, noise_level=0., batch_size=64, num_batches=40)
with tf.device(device()):
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
linear_regression.fit(model, dataset, optimizer, logdir=self._tmp_logdir)
self.assertAllClose(true_w, model.variables[0].numpy(), rtol=1e-2)
self.assertAllClose(true_b, model.variables[1].numpy(), rtol=1e-2)
self.assertTrue(glob.glob(os.path.join(self._tmp_logdir, "events.out.*")))
class EagerLinearRegressionBenchmark(tf.test.Benchmark):
def benchmarkEagerLinearRegression(self):
num_epochs = 10
num_batches = 200
batch_size = 64
dataset = linear_regression.synthetic_dataset(
w=tf.random_uniform([3, 1]),
b=tf.random_uniform([1]),
noise_level=0.01,
batch_size=batch_size,
num_batches=num_batches)
burn_in_dataset = dataset.take(10)
model = linear_regression.LinearModel()
with tf.device(device()):
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
# Perform burn-in.
linear_regression.fit(model, burn_in_dataset, optimizer)
start_time = time.time()
for _ in range(num_epochs):
linear_regression.fit(model, dataset, optimizer)
wall_time = time.time() - start_time
examples_per_sec = num_epochs * num_batches * batch_size / wall_time
self.report_benchmark(
name="eager_train_%s" %
("gpu" if tfe.num_gpus() > 0 else "cpu"),
iters=num_epochs * num_batches,
extras={"examples_per_sec": examples_per_sec},
wall_time=wall_time)
if __name__ == "__main__":
tfe.enable_eager_execution()
tf.test.main()
|
tommo/gii
|
refs/heads/master
|
lib/mock/asset/PrefabAsset.py
|
1
|
import os.path
import json
from gii.core import *
class PrefabAssetManager(AssetManager):
def getName(self):
return 'asset_manager.prefab'
def acceptAssetFile( self, filePath ):
if not os.path.isfile(filePath): return False
name,ext = os.path.splitext(filePath)
if not ext in ['.prefab']: return False
return True
# data = jsonHelper.tryLoadJSON( filePath )
# return data and data.get( '_assetType', None ) == 'prefab'
def importAsset( self, node, reload = False ):
node.assetType = 'prefab'
node.setObjectFile( 'def', node.getFilePath() )
return True
# def editAsset( self, node ):
# editor = app.getModule( 'scenegraph_editor' )
# if not editor:
# return alertMessage( 'Editor not load', 'prefab Editor not found!' )
# editor.openScene( node )
##----------------------------------------------------------------##
class PrefabCreator(AssetCreator):
def getAssetType( self ):
return 'prefab'
def getLabel( self ):
return 'prefab'
def createAsset( self, name, contextNode, assetType ):
ext = '.prefab'
filename = name + ext
if contextNode.isType('folder'):
nodepath = contextNode.getChildPath( filename )
else:
nodepath = contextNode.getSiblingPath( filename )
fullpath = AssetLibrary.get().getAbsPath( nodepath )
data={
'_assetType' : 'prefab', #checksum
'map' : False ,
'body' : False #empty
}
if os.path.exists(fullpath):
raise Exception('File already exist:%s'%fullpath)
fp = open(fullpath,'w')
json.dump( data, fp, sort_keys=True, indent=2 )
fp.close()
return nodepath
##----------------------------------------------------------------##
PrefabAssetManager().register()
PrefabCreator().register()
AssetLibrary.get().setAssetIcon( 'prefab', 'prefab' )
|
cyberphox/MissionPlanner
|
refs/heads/master
|
Lib/site-packages/numpy/doc/performance.py
|
100
|
"""
===========
Performance
===========
Placeholder for Improving Performance documentation.
"""
|
TheProjecter/zedt
|
refs/heads/master
|
build.py
|
2
|
#!/usr/bin/env python
# Parchment build script
#
# Copyright (c) 2008-2010 The Parchment Contributors
# Licenced under the GPL v2
# http://code.google.com/p/parchment
# Lists of files to combine together
includes = (
('.build/parchment.js', (
'src/parchment/intro.js',
'src/lib/class.js',
'src/lib/iff.js',
'src/plugins/querystring.js',
'src/plugins/remedial.js',
'src/parchment/error-handling.js',
'src/parchment/file-chrome.js',
'src/parchment/ui.js',
'src/parchment/library.js',
'src/parchment/outro.js',
)),
('.build/zmachine.js', (
'src/zmachine/zui.js',
'src/plugins/quetzal.js',
'src/zmachine/runner.js',
'src/zmachine/console.js',
)),
)
# List of files to compress (with debug code removed)
compress = (
('src/gnusto/gnusto-engine.js', 'lib/gnusto.min.js'),
('.build/parchment.js', 'lib/parchment.min.js'),
('.build/zmachine.js', 'lib/zmachine.min.js'),
)
import datetime
import os
import re
# Today's date
today = str(datetime.date.today())
# regex for debug lines
debug = re.compile(';;;.+$', re.M)
# Create .build directory if needed
if not os.path.isdir('.build'):
os.makedirs('.build')
# Combine source files together to make 'packages'
for package in includes:
print 'Building package: ' + package[0]
output = open(package[0], 'w')
for include in package[1]:
data = file(include).read()
output.write(data)
output.close()
# Compress these files, requires the YUI Compressor. Icky Java
for package in compress:
print 'Compressing file: ' + package[1]
# Strip out debug lines beginning with ;;;
data = file(package[0]).read()
data = debug.sub('', data)
# Set the date
data = data.replace('BUILDDATE', today)
# Write to a temp file
output = open('.build/temp', 'w')
output.write(data)
output.close()
# Compress!
command = 'java -jar tools/yuicompressor-2.4.2.jar --type js .build/temp -o %s' % package[1]
os.system(command)
|
gauribhoite/personfinder
|
refs/heads/master
|
env/google_appengine/lib/django-1.5/django/core/cache/backends/dummy.py
|
209
|
"Dummy cache backend"
from django.core.cache.backends.base import BaseCache
class DummyCache(BaseCache):
def __init__(self, host, *args, **kwargs):
BaseCache.__init__(self, *args, **kwargs)
def add(self, key, value, timeout=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return True
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return default
def set(self, key, value, timeout=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
def get_many(self, keys, version=None):
return {}
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return False
def set_many(self, data, timeout=0, version=None):
pass
def delete_many(self, keys, version=None):
pass
def clear(self):
pass
# For backwards compatibility
class CacheClass(DummyCache):
pass
|
johnkeepmoving/oss-ftp
|
refs/heads/master
|
python27/unix/lib/urllib3/contrib/ntlmpool.py
|
199
|
"""
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
from __future__ import absolute_import
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' %
(self.num_connections, self.host, self.authurl))
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % reshdr)
log.debug('Response data: %s [...]' % res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % dict(res.getheaders()))
log.debug('Response data: %s [...]' % res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
|
HiroIshikawa/21playground
|
refs/heads/master
|
microblog/flask/lib/python3.5/site-packages/flask/testsuite/deprecations.py
|
563
|
# -*- coding: utf-8 -*-
"""
flask.testsuite.deprecations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests deprecation support.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import unittest
from flask.testsuite import FlaskTestCase, catch_warnings
class DeprecationsTestCase(FlaskTestCase):
"""not used currently"""
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DeprecationsTestCase))
return suite
|
aeby/brokenthings
|
refs/heads/master
|
bts/pipelines.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright (c) Reto Aebersold.
# See LICENSE for details.
from scrapy.exceptions import DropItem
class FilterWordsPipeline(object):
# put all words in lowercase
words_to_filter = ['ohne defekt', 'nicht kaputt']
def process_item(self, item, spider):
for word in self.words_to_filter:
if word in unicode(item['desc']).lower():
raise DropItem("Is not broken")
else:
return item
|
acsone/Arelle
|
refs/heads/master
|
arelle/CntlrWinMain.py
|
2
|
'''
Created on Oct 3, 2010
This module is Arelle's controller in windowing interactive UI mode
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
'''
from arelle import PythonUtil # define 2.x or 3.x string types
import os, sys, subprocess, pickle, time, locale, re, fnmatch
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
# need the .dll directory in path to be able to access Tk and Tcl DLLs efore importinng Tk, etc.
os.environ['PATH'] = os.path.dirname(sys.executable) + ";" + os.environ['PATH']
from tkinter import (Tk, Tcl, TclError, Toplevel, Menu, PhotoImage, StringVar, BooleanVar, N, S, E, W, EW,
HORIZONTAL, VERTICAL, END, font as tkFont)
try:
from tkinter.ttk import Frame, Button, Label, Combobox, Separator, PanedWindow, Notebook
except ImportError: # 3.0 versions of tkinter
from ttk import Frame, Button, Label, Combobox, Separator, PanedWindow, Notebook
try:
import syslog
except ImportError:
syslog = None
import tkinter.tix
import tkinter.filedialog
import tkinter.messagebox, traceback
from arelle.FileSource import saveFile as writeToFile
from arelle.Locale import format_string
from arelle.CntlrWinTooltip import ToolTip
from arelle import XbrlConst
from arelle.PluginManager import pluginClassMethods
from arelle.UrlUtil import isHttpUrl
import logging
import threading, queue
from arelle import Cntlr
from arelle import (DialogURL, DialogLanguage,
DialogPluginManager, DialogPackageManager,
ModelDocument,
ModelManager,
PackageManager,
RenderingEvaluator,
TableStructure,
ViewWinDTS,
ViewWinProperties, ViewWinConcepts, ViewWinRelationshipSet, ViewWinFormulae,
ViewWinFactList, ViewFileFactList, ViewWinFactTable, ViewWinRenderedGrid, ViewWinXml,
ViewWinRoleTypes, ViewFileRoleTypes, ViewFileConcepts,
ViewWinTests, ViewWinTree, ViewWinVersReport, ViewWinRssFeed,
ViewFileTests,
ViewFileRenderedGrid,
ViewFileRelationshipSet,
Updater
)
from arelle.ModelFormulaObject import FormulaOptions
from arelle.FileSource import openFileSource
restartMain = True
class CntlrWinMain (Cntlr.Cntlr):
def __init__(self, parent):
super(CntlrWinMain, self).__init__(hasGui=True)
self.parent = parent
self.filename = None
self.dirty = False
overrideLang = self.config.get("labelLangOverride")
self.labelLang = overrideLang if overrideLang else self.modelManager.defaultLang
self.data = {}
if self.isMac: # mac Python fonts bigger than other apps (terminal, text edit, Word), and to windows Arelle
_defaultFont = tkFont.nametofont("TkDefaultFont") # label, status bar, treegrid
_defaultFont.configure(size=11)
_textFont = tkFont.nametofont("TkTextFont") # entry widget and combobox entry field
_textFont.configure(size=11)
#parent.option_add("*Font", _defaultFont) # would be needed if not using defaulted font
toolbarButtonPadding = 1
else:
toolbarButtonPadding = 4
tkinter.CallWrapper = TkinterCallWrapper
imgpath = self.imagesDir + os.sep
if self.isMSW:
icon = imgpath + "arelle.ico"
parent.iconbitmap(icon, default=icon)
#image = PhotoImage(file=path + "arelle32.gif")
#label = Label(None, image=image)
#parent.iconwindow(label)
else:
self.iconImage = PhotoImage(file=imgpath + "arelle-mac-icon-4.gif") # must keep reference during life of window
parent.tk.call('wm', 'iconphoto', parent._w, self.iconImage)
#parent.iconbitmap("@" + imgpath + "arelle.xbm")
# try with gif file
#parent.iconbitmap(path + "arelle.gif")
self.menubar = Menu(self.parent)
self.parent["menu"] = self.menubar
self.fileMenu = Menu(self.menubar, tearoff=0)
self.fileMenuLength = 1
for label, command, shortcut_text, shortcut in (
#(_("New..."), self.fileNew, "Ctrl+N", "<Control-n>"),
(_("Open File..."), self.fileOpen, "Ctrl+O", "<Control-o>"),
(_("Open Web..."), self.webOpen, "Shift+Alt+O", "<Shift-Alt-o>"),
(_("Import File..."), self.importFileOpen, None, None),
(_("Import Web..."), self.importWebOpen, None, None),
(_("Reopen"), self.fileReopen, None, None),
("PLUG-IN", "CntlrWinMain.Menu.File.Open", None, None),
(_("Save"), self.fileSaveExistingFile, "Ctrl+S", "<Control-s>"),
(_("Save As..."), self.fileSave, None, None),
(_("Save DTS Package"), self.saveDTSpackage, None, None),
("PLUG-IN", "CntlrWinMain.Menu.File.Save", None, None),
(_("Close"), self.fileClose, "Ctrl+W", "<Control-w>"),
(None, None, None, None),
(_("Quit"), self.quit, "Ctrl+Q", "<Control-q>"),
#(_("Restart"), self.restart, None, None),
(None, None, None, None),
("",None,None,None) # position for file history
):
if label is None:
self.fileMenu.add_separator()
elif label == "PLUG-IN":
for pluginMenuExtender in pluginClassMethods(command):
pluginMenuExtender(self, self.fileMenu)
self.fileMenuLength += 1
else:
self.fileMenu.add_command(label=label, underline=0, command=command, accelerator=shortcut_text)
self.parent.bind(shortcut, command)
self.fileMenuLength += 1
self.loadFileMenuHistory()
self.menubar.add_cascade(label=_("File"), menu=self.fileMenu, underline=0)
toolsMenu = Menu(self.menubar, tearoff=0)
validateMenu = Menu(self.menubar, tearoff=0)
toolsMenu.add_cascade(label=_("Validation"), menu=validateMenu, underline=0)
validateMenu.add_command(label=_("Validate"), underline=0, command=self.validate)
self.modelManager.validateDisclosureSystem = self.config.setdefault("validateDisclosureSystem",False)
self.validateDisclosureSystem = BooleanVar(value=self.modelManager.validateDisclosureSystem)
self.validateDisclosureSystem.trace("w", self.setValidateDisclosureSystem)
validateMenu.add_checkbutton(label=_("Disclosure system checks"), underline=0, variable=self.validateDisclosureSystem, onvalue=True, offvalue=False)
validateMenu.add_command(label=_("Select disclosure system..."), underline=0, command=self.selectDisclosureSystem)
self.modelManager.validateCalcLB = self.config.setdefault("validateCalcLB",False)
self.validateCalcLB = BooleanVar(value=self.modelManager.validateCalcLB)
self.validateCalcLB.trace("w", self.setValidateCalcLB)
validateMenu.add_checkbutton(label=_("Calc Linkbase checks"), underline=0, variable=self.validateCalcLB, onvalue=True, offvalue=False)
self.modelManager.validateInferDecimals = self.config.setdefault("validateInferDecimals",True)
self.validateInferDecimals = BooleanVar(value=self.modelManager.validateInferDecimals)
self.validateInferDecimals.trace("w", self.setValidateInferDecimals)
validateMenu.add_checkbutton(label=_("Infer Decimals in calculations"), underline=0, variable=self.validateInferDecimals, onvalue=True, offvalue=False)
self.modelManager.validateDedupCalcs = self.config.setdefault("validateDedupCalcs",False)
self.validateDedupCalcs = BooleanVar(value=self.modelManager.validateDedupCalcs)
self.validateDedupCalcs.trace("w", self.setValidateDedupCalcs)
validateMenu.add_checkbutton(label=_("De-duplicate calculations"), underline=0, variable=self.validateDedupCalcs, onvalue=True, offvalue=False)
self.modelManager.validateUtr = self.config.setdefault("validateUtr",True)
self.validateUtr = BooleanVar(value=self.modelManager.validateUtr)
self.validateUtr.trace("w", self.setValidateUtr)
validateMenu.add_checkbutton(label=_("Unit Type Registry validation"), underline=0, variable=self.validateUtr, onvalue=True, offvalue=False)
for pluginMenuExtender in pluginClassMethods("CntlrWinMain.Menu.Validation"):
pluginMenuExtender(self, validateMenu)
formulaMenu = Menu(self.menubar, tearoff=0)
formulaMenu.add_command(label=_("Parameters..."), underline=0, command=self.formulaParametersDialog)
toolsMenu.add_cascade(label=_("Formula"), menu=formulaMenu, underline=0)
self.modelManager.formulaOptions = FormulaOptions(self.config.get("formulaParameters"))
toolsMenu.add_command(label=_("Compare DTSes..."), underline=0, command=self.compareDTSes)
cacheMenu = Menu(self.menubar, tearoff=0)
rssWatchMenu = Menu(self.menubar, tearoff=0)
rssWatchMenu.add_command(label=_("Options..."), underline=0, command=self.rssWatchOptionsDialog)
rssWatchMenu.add_command(label=_("Start"), underline=0, command=lambda: self.rssWatchControl(start=True))
rssWatchMenu.add_command(label=_("Stop"), underline=0, command=lambda: self.rssWatchControl(stop=True))
toolsMenu.add_cascade(label=_("RSS Watch"), menu=rssWatchMenu, underline=0)
self.modelManager.rssWatchOptions = self.config.setdefault("rssWatchOptions", {})
toolsMenu.add_cascade(label=_("Internet"), menu=cacheMenu, underline=0)
self.webCache.workOffline = self.config.setdefault("workOffline",False)
self.workOffline = BooleanVar(value=self.webCache.workOffline)
self.workOffline.trace("w", self.setWorkOffline)
cacheMenu.add_checkbutton(label=_("Work offline"), underline=0, variable=self.workOffline, onvalue=True, offvalue=False)
self.webCache.noCertificateCheck = self.config.setdefault("noCertificateCheck",False) # resets proxy handler stack if true
self.noCertificateCheck = BooleanVar(value=self.webCache.noCertificateCheck)
self.noCertificateCheck.trace("w", self.setNoCertificateCheck)
cacheMenu.add_checkbutton(label=_("No certificate check"), underline=0, variable=self.noCertificateCheck, onvalue=True, offvalue=False)
'''
self.webCache.recheck = self.config.setdefault("webRecheck",False)
self.webRecheck = BooleanVar(value=self.webCache.webRecheck)
self.webRecheck.trace("w", self.setWebRecheck)
cacheMenu.add_checkbutton(label=_("Recheck file dates weekly"), underline=0, variable=self.workOffline, onvalue=True, offvalue=False)
self.webCache.notify = self.config.setdefault("",False)
self.downloadNotify = BooleanVar(value=self.webCache.retrievalNotify)
self.downloadNotify.trace("w", self.setRetrievalNotify)
cacheMenu.add_checkbutton(label=_("Notify file downloads"), underline=0, variable=self.workOffline, onvalue=True, offvalue=False)
'''
cacheMenu.add_command(label=_("Clear cache"), underline=0, command=self.confirmClearWebCache)
cacheMenu.add_command(label=_("Manage cache"), underline=0, command=self.manageWebCache)
cacheMenu.add_command(label=_("Proxy Server"), underline=0, command=self.setupProxy)
logmsgMenu = Menu(self.menubar, tearoff=0)
toolsMenu.add_cascade(label=_("Messages log"), menu=logmsgMenu, underline=0)
logmsgMenu.add_command(label=_("Clear"), underline=0, command=self.logClear)
logmsgMenu.add_command(label=_("Save to file"), underline=0, command=self.logSaveToFile)
self.modelManager.collectProfileStats = self.config.setdefault("collectProfileStats",False)
self.collectProfileStats = BooleanVar(value=self.modelManager.collectProfileStats)
self.collectProfileStats.trace("w", self.setCollectProfileStats)
logmsgMenu.add_checkbutton(label=_("Collect profile stats"), underline=0, variable=self.collectProfileStats, onvalue=True, offvalue=False)
logmsgMenu.add_command(label=_("Log profile stats"), underline=0, command=self.showProfileStats)
logmsgMenu.add_command(label=_("Clear profile stats"), underline=0, command=self.clearProfileStats)
self.showDebugMessages = BooleanVar(value=self.config.setdefault("showDebugMessages",False))
self.showDebugMessages.trace("w", self.setShowDebugMessages)
logmsgMenu.add_checkbutton(label=_("Show debug messages"), underline=0, variable=self.showDebugMessages, onvalue=True, offvalue=False)
toolsMenu.add_command(label=_("Language..."), underline=0, command=lambda: DialogLanguage.askLanguage(self))
for pluginMenuExtender in pluginClassMethods("CntlrWinMain.Menu.Tools"):
pluginMenuExtender(self, toolsMenu)
self.menubar.add_cascade(label=_("Tools"), menu=toolsMenu, underline=0)
# view menu only if any plug-in additions provided
if any (pluginClassMethods("CntlrWinMain.Menu.View")):
viewMenu = Menu(self.menubar, tearoff=0)
for pluginMenuExtender in pluginClassMethods("CntlrWinMain.Menu.View"):
pluginMenuExtender(self, viewMenu)
self.menubar.add_cascade(label=_("View"), menu=viewMenu, underline=0)
helpMenu = Menu(self.menubar, tearoff=0)
for label, command, shortcut_text, shortcut in (
(_("Check for updates"), lambda: Updater.checkForUpdates(self), None, None),
(_("Manage plug-ins"), lambda: DialogPluginManager.dialogPluginManager(self), None, None),
(_("Manage packages"), lambda: DialogPackageManager.dialogPackageManager(self), None, None),
("PLUG-IN", "CntlrWinMain.Menu.Help.Upper", None, None),
(None, None, None, None),
(_("About..."), self.helpAbout, None, None),
("PLUG-IN", "CntlrWinMain.Menu.Help.Lower", None, None),
):
if label is None:
helpMenu.add_separator()
elif label == "PLUG-IN":
for pluginMenuExtender in pluginClassMethods(command):
pluginMenuExtender(self, helpMenu)
else:
helpMenu.add_command(label=label, underline=0, command=command, accelerator=shortcut_text)
self.parent.bind(shortcut, command)
for pluginMenuExtender in pluginClassMethods("CntlrWinMain.Menu.Help"):
pluginMenuExtender(self, toolsMenu)
self.menubar.add_cascade(label=_("Help"), menu=helpMenu, underline=0)
windowFrame = Frame(self.parent)
self.statusbar = Label(windowFrame, text=_("Ready..."), anchor=W)
self.statusbarTimerId = self.statusbar.after(5000, self.uiClearStatusTimerEvent)
self.statusbar.grid(row=2, column=0, columnspan=2, sticky=EW)
#self.balloon = tkinter.tix.Balloon(windowFrame, statusbar=self.statusbar)
self.toolbar_images = []
toolbar = Frame(windowFrame)
menubarColumn = 0
self.validateTooltipText = StringVar()
for image, command, toolTip, statusMsg in (
#("images/toolbarNewFile.gif", self.fileNew),
("toolbarOpenFile.gif", self.fileOpen, _("Open local file"), _("Open by choosing a local XBRL file, testcase, or archive file")),
("toolbarOpenWeb.gif", self.webOpen, _("Open web file"), _("Enter an http:// URL of an XBRL file or testcase")),
("toolbarReopen.gif", self.fileReopen, _("Reopen"), _("Reopen last opened XBRL file or testcase(s)")),
("toolbarSaveFile.gif", self.fileSaveExistingFile, _("Save file"), _("Saves currently selected local XBRL file")),
("toolbarClose.gif", self.fileClose, _("Close"), _("Closes currently selected instance/DTS or testcase(s)")),
(None,None,None,None),
("toolbarFindMenu.gif", self.find, _("Find"), _("Find dialog for scope and method of searching")),
(None,None,None,None),
("toolbarValidate.gif", self.validate, self.validateTooltipText, _("Validate currently selected DTS or testcase(s)")),
("toolbarCompare.gif", self.compareDTSes, _("Compare DTSes"), _("compare two DTSes")),
(None,None,None,None),
("toolbarLogClear.gif", self.logClear, _("Messages Log | Clear"), _("Clears the messages log")),
#(Combobox(toolbar, textvariable=self.findVar, values=self.findValues,
# ), self.logClear, _("Find options"), _("Select of find options")),
):
if command is None:
tbControl = Separator(toolbar, orient=VERTICAL)
tbControl.grid(row=0, column=menubarColumn, padx=6)
elif isinstance(image, Combobox):
tbControl = image
tbControl.grid(row=0, column=menubarColumn)
else:
image = os.path.join(self.imagesDir, image)
try:
image = PhotoImage(file=image)
self.toolbar_images.append(image)
tbControl = Button(toolbar, image=image, command=command, style="Toolbutton", padding=toolbarButtonPadding)
tbControl.grid(row=0, column=menubarColumn)
except TclError as err:
print(err)
if isinstance(toolTip,StringVar):
ToolTip(tbControl, textvariable=toolTip, wraplength=240)
else:
ToolTip(tbControl, text=toolTip)
menubarColumn += 1
for toolbarExtender in pluginClassMethods("CntlrWinMain.Toolbar"):
toolbarExtender(self, toolbar)
toolbar.grid(row=0, column=0, sticky=(N, W))
paneWinTopBtm = PanedWindow(windowFrame, orient=VERTICAL)
paneWinTopBtm.grid(row=1, column=0, sticky=(N, S, E, W))
paneWinLeftRt = tkinter.PanedWindow(paneWinTopBtm, orient=HORIZONTAL)
paneWinLeftRt.grid(row=0, column=0, sticky=(N, S, E, W))
paneWinLeftRt.bind("<<NotebookTabChanged>>", self.onTabChanged)
paneWinTopBtm.add(paneWinLeftRt)
self.tabWinTopLeft = Notebook(paneWinLeftRt, width=250, height=300)
self.tabWinTopLeft.grid(row=0, column=0, sticky=(N, S, E, W))
paneWinLeftRt.add(self.tabWinTopLeft)
self.tabWinTopRt = Notebook(paneWinLeftRt)
self.tabWinTopRt.grid(row=0, column=0, sticky=(N, S, E, W))
self.tabWinTopRt.bind("<<NotebookTabChanged>>", self.onTabChanged)
paneWinLeftRt.add(self.tabWinTopRt)
self.tabWinBtm = Notebook(paneWinTopBtm)
self.tabWinBtm.grid(row=0, column=0, sticky=(N, S, E, W))
self.tabWinBtm.bind("<<NotebookTabChanged>>", self.onTabChanged)
paneWinTopBtm.add(self.tabWinBtm)
from arelle import ViewWinList
self.logView = ViewWinList.ViewList(None, self.tabWinBtm, _("messages"), True)
self.startLogging(logHandler=WinMainLogHandler(self)) # start logger
logViewMenu = self.logView.contextMenu(contextMenuClick=self.contextMenuClick)
logViewMenu.add_command(label=_("Clear"), underline=0, command=self.logClear)
logViewMenu.add_command(label=_("Save to file"), underline=0, command=self.logSaveToFile)
if self.hasClipboard:
logViewMenu.add_command(label=_("Copy to clipboard"), underline=0, command=lambda: self.logView.copyToClipboard(cntlr=self))
windowFrame.grid(row=0, column=0, sticky=(N,S,E,W))
windowFrame.columnconfigure(0, weight=999)
windowFrame.columnconfigure(1, weight=1)
windowFrame.rowconfigure(0, weight=1)
windowFrame.rowconfigure(1, weight=999)
windowFrame.rowconfigure(2, weight=1)
paneWinTopBtm.columnconfigure(0, weight=1)
paneWinTopBtm.rowconfigure(0, weight=1)
paneWinLeftRt.columnconfigure(0, weight=1)
paneWinLeftRt.rowconfigure(0, weight=1)
self.tabWinTopLeft.columnconfigure(0, weight=1)
self.tabWinTopLeft.rowconfigure(0, weight=1)
self.tabWinTopRt.columnconfigure(0, weight=1)
self.tabWinTopRt.rowconfigure(0, weight=1)
self.tabWinBtm.columnconfigure(0, weight=1)
self.tabWinBtm.rowconfigure(0, weight=1)
window = self.parent.winfo_toplevel()
window.columnconfigure(0, weight=1)
window.rowconfigure(0, weight=1)
priorState = self.config.get('windowState')
screenW = self.parent.winfo_screenwidth() - 16 # allow for window edge
screenH = self.parent.winfo_screenheight() - 64 # allow for caption and menus
if priorState == "zoomed":
self.parent.state("zoomed")
w = screenW
h = screenH
else:
priorGeometry = re.match("(\d+)x(\d+)[+]?([-]?\d+)[+]?([-]?\d+)",self.config.get('windowGeometry'))
if priorGeometry and priorGeometry.lastindex >= 4:
try:
w = int(priorGeometry.group(1))
h = int(priorGeometry.group(2))
x = int(priorGeometry.group(3))
y = int(priorGeometry.group(4))
if x + w > screenW:
if w < screenW:
x = screenW - w
else:
x = 0
w = screenW
elif x < 0:
x = 0
if w > screenW:
w = screenW
if y + h > screenH:
if y < screenH:
y = screenH - h
else:
y = 0
h = screenH
elif y < 0:
y = 0
if h > screenH:
h = screenH
self.parent.geometry("{0}x{1}+{2}+{3}".format(w,h,x,y))
except:
pass
# set top/btm divider
topLeftW, topLeftH = self.config.get('tabWinTopLeftSize',(250,300))
if 10 < topLeftW < w - 60:
self.tabWinTopLeft.config(width=topLeftW)
if 10 < topLeftH < h - 60:
self.tabWinTopLeft.config(height=topLeftH)
self.parent.title(_("arelle - Unnamed"))
self.logFile = None
self.uiThreadQueue = queue.Queue() # background processes communicate with ui thread
self.uiThreadChecker(self.statusbar) # start background queue
self.modelManager.loadCustomTransforms() # load if custom transforms not loaded
if not self.modelManager.disclosureSystem.select(self.config.setdefault("disclosureSystem", None)):
self.validateDisclosureSystem.set(False)
self.modelManager.validateDisclosureSystem = False
# load argv overrides for modelManager options
lastArg = None
for arg in sys.argv:
if not arg: continue
if lastArg == "--skipLoading": # skip loading matching files (list of unix patterns)
self.modelManager.skipLoading = re.compile('|'.join(fnmatch.translate(f) for f in arg.split('|')))
elif arg == "--skipDTS": # skip DTS loading, discovery, etc
self.modelManager.skipDTS = True
lastArg = arg
self.setValidateTooltipText()
def onTabChanged(self, event, *args):
try:
widgetIndex = event.widget.index("current")
tabId = event.widget.tabs()[widgetIndex]
for widget in event.widget.winfo_children():
if str(widget) == tabId:
self.currentView = widget.view
break
except (AttributeError, TypeError, TclError):
pass
def loadFileMenuHistory(self):
self.fileMenu.delete(self.fileMenuLength, self.fileMenuLength + 2)
fileHistory = self.config.setdefault("fileHistory", [])
self.recentFilesMenu = Menu(self.menubar, tearoff=0)
for i in range( min( len(fileHistory), 10 ) ):
self.recentFilesMenu.add_command(
label=fileHistory[i],
command=lambda j=i: self.fileOpenFile(self.config["fileHistory"][j]))
self.fileMenu.add_cascade(label=_("Recent files"), menu=self.recentFilesMenu, underline=0)
importHistory = self.config.setdefault("importHistory", [])
self.recentAttachMenu = Menu(self.menubar, tearoff=0)
for i in range( min( len(importHistory), 10 ) ):
self.recentAttachMenu.add_command(
label=importHistory[i],
command=lambda j=i: self.fileOpenFile(self.config["importHistory"][j],importToDTS=True))
self.fileMenu.add_cascade(label=_("Recent imports"), menu=self.recentAttachMenu, underline=0)
self.packagesMenu = Menu(self.menubar, tearoff=0)
hasPackages = False
for i, packageInfo in enumerate(sorted(PackageManager.packagesConfig.get("packages", []),
key=lambda packageInfo: (packageInfo.get("name",""),packageInfo.get("version",""))),
start=1):
name = packageInfo.get("name", "package{}".format(i))
version = packageInfo.get("version")
if version:
name = "{} ({})".format(name, version)
URL = packageInfo.get("URL")
if name and URL and packageInfo.get("status") == "enabled":
self.packagesMenu.add_command(
label=name,
command=lambda url=URL: self.fileOpenFile(url))
hasPackages = True
if hasPackages:
self.fileMenu.add_cascade(label=_("Packages"), menu=self.packagesMenu, underline=0)
def onPackageEnablementChanged(self):
self.loadFileMenuHistory()
def fileNew(self, *ignore):
if not self.okayToContinue():
return
self.logClear()
self.dirty = False
self.filename = None
self.data = {}
self.parent.title(_("arelle - Unnamed"));
self.modelManager.load(None);
def getViewAndModelXbrl(self):
view = getattr(self, "currentView", None)
if view:
modelXbrl = None
try:
modelXbrl = view.modelXbrl
return (view, modelXbrl)
except AttributeError:
return (view, None)
return (None, None)
def okayToContinue(self):
view, modelXbrl = self.getViewAndModelXbrl()
documentIsModified = False
if view is not None:
try:
# What follows only exists in ViewWinRenderedGrid
view.updateInstanceFromFactPrototypes()
except AttributeError:
pass
if modelXbrl is not None:
documentIsModified = modelXbrl.isModified()
if not self.dirty and (not documentIsModified):
return True
reply = tkinter.messagebox.askokcancel(
_("arelle - Unsaved Changes"),
_("Are you sure to close the current instance without saving?\n (OK will discard changes.)"),
parent=self.parent)
if reply is None:
return False
else:
return reply
def fileSave(self, event=None, view=None, fileType=None, filenameFromInstance=False, *ignore):
if view is None:
view = getattr(self, "currentView", None)
if view is not None:
filename = None
modelXbrl = None
try:
modelXbrl = view.modelXbrl
except AttributeError:
pass
if filenameFromInstance:
try:
modelXbrl = view.modelXbrl
filename = modelXbrl.modelDocument.filepath
if filename.endswith('.xsd'): # DTS entry point, no instance saved yet!
filename = None
except AttributeError:
pass
if isinstance(view, ViewWinRenderedGrid.ViewRenderedGrid):
initialdir = os.path.dirname(modelXbrl.modelDocument.uri)
if fileType in ("html", "xml", None):
if fileType == "html" and filename is None:
filename = self.uiFileDialog("save",
title=_("arelle - Save HTML-rendered Table"),
initialdir=initialdir,
filetypes=[(_("HTML file .html"), "*.html"), (_("HTML file .htm"), "*.htm")],
defaultextension=".html")
elif fileType == "xml" and filename is None:
filename = self.uiFileDialog("save",
title=_("arelle - Save Table Layout Model"),
initialdir=initialdir,
filetypes=[(_("Layout model file .xml"), "*.xml")],
defaultextension=".xml")
else: # ask file type
if filename is None:
filename = self.uiFileDialog("save",
title=_("arelle - Save XBRL Instance or HTML-rendered Table"),
initialdir=initialdir,
filetypes=[(_("XBRL instance .xbrl"), "*.xbrl"), (_("XBRL instance .xml"), "*.xml"), (_("HTML table .html"), "*.html"), (_("HTML table .htm"), "*.htm")],
defaultextension=".html")
if filename and (filename.endswith(".xbrl") or filename.endswith(".xml")):
view.saveInstance(filename)
return True
if not filename:
return False
try:
ViewFileRenderedGrid.viewRenderedGrid(modelXbrl, filename, lang=self.labelLang, sourceView=view)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True
elif fileType == "xbrl":
return self.uiFileDialog("save",
title=_("arelle - Save Instance"),
initialdir=initialdir,
filetypes=[(_("XBRL instance .xbrl"), "*.xbrl"), (_("XBRL instance .xml"), "*.xml")],
defaultextension=".xbrl")
elif isinstance(view, ViewWinTests.ViewTests) and modelXbrl.modelDocument.type in (ModelDocument.Type.TESTCASESINDEX, ModelDocument.Type.TESTCASE):
filename = self.uiFileDialog("save",
title=_("arelle - Save Test Results"),
initialdir=os.path.dirname(self.modelManager.modelXbrl.modelDocument.uri),
filetypes=[(_("XLSX file"), "*.xlsx"),(_("CSV file"), "*.csv"),(_("HTML file"), "*.html"),(_("XML file"), "*.xml"),(_("JSON file"), "*.json")],
defaultextension=".xlsx")
if not filename:
return False
try:
ViewFileTests.viewTests(self.modelManager.modelXbrl, filename)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True
elif isinstance(view, ViewWinTree.ViewTree):
filename = self.uiFileDialog("save",
title=_("arelle - Save {0}").format(view.tabTitle),
initialdir=os.path.dirname(self.modelManager.modelXbrl.modelDocument.uri),
filetypes=[(_("XLSX file"), "*.xlsx"),(_("CSV file"), "*.csv"),(_("HTML file"), "*.html"),(_("XML file"), "*.xml"),(_("JSON file"), "*.json")],
defaultextension=".xlsx")
if not filename:
return False
try:
if isinstance(view, ViewWinRoleTypes.ViewRoleTypes):
ViewFileRoleTypes.viewRoleTypes(modelXbrl, filename, view.tabTitle, view.isArcrole, lang=view.lang)
elif isinstance(view, ViewWinConcepts.ViewConcepts):
ViewFileConcepts.viewConcepts(modelXbrl, filename, labelrole=view.labelrole, lang=view.lang)
elif isinstance(view, ViewWinFactList.ViewFactList):
ViewFileFactList.viewFacts(modelXbrl, filename, labelrole=view.labelrole, lang=view.lang)
else:
ViewFileRelationshipSet.viewRelationshipSet(modelXbrl, filename, view.tabTitle, view.arcrole, labelrole=view.labelrole, lang=view.lang)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True
elif isinstance(view, ViewWinXml.ViewXml) and self.modelManager.modelXbrl.formulaOutputInstance:
filename = self.uiFileDialog("save",
title=_("arelle - Save Formula Result Instance Document"),
initialdir=os.path.dirname(self.modelManager.modelXbrl.modelDocument.uri),
filetypes=[(_("XBRL output instance .xml"), "*.xml"), (_("XBRL output instance .xbrl"), "*.xbrl")],
defaultextension=".xml")
if not filename:
return False
try:
from arelle import XmlUtil
with open(filename, "w") as fh:
XmlUtil.writexml(fh, self.modelManager.modelXbrl.formulaOutputInstance.modelDocument.xmlDocument, encoding="utf-8")
self.addToLog(_("[info] Saved formula output instance to {0}").format(filename) )
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
self.filename, err),
parent=self.parent)
return True
tkinter.messagebox.showwarning(_("arelle - Save what?"),
_("Nothing has been selected that can be saved. \nPlease select a view pane that can be saved."),
parent=self.parent)
'''
if self.filename is None:
filename = self.uiFileDialog("save",
title=_("arelle - Save File"),
initialdir=".",
filetypes=[(_("Xbrl file"), "*.x*")],
defaultextension=".xbrl")
if not filename:
return False
self.filename = filename
if not self.filename.endswith(".xbrl"):
self.filename += ".xbrl"
try:
with open(self.filename, "wb") as fh:
pickle.dump(self.data, fh, pickle.HIGHEST_PROTOCOL)
self.dirty = False
self.uiShowStatus(_("Saved {0} items to {1}").format(
len(self.data),
self.filename), clearAfter=5000)
self.parent.title(_("arelle - {0}").format(
os.path.basename(self.filename)))
except (EnvironmentError, pickle.PickleError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
self.filename, err),
parent=self.parent)
return True;
'''
def fileSaveExistingFile(self, event=None, view=None, fileType=None, *ignore):
return self.fileSave(view=view, fileType=fileType, filenameFromInstance=True)
def saveDTSpackage(self):
self.modelManager.saveDTSpackage(allDTSes=True)
def fileOpen(self, *ignore):
if not self.okayToContinue():
return
filename = self.uiFileDialog("open",
title=_("arelle - Open file"),
initialdir=self.config.setdefault("fileOpenDir","."),
filetypes=[(_("XBRL files"), "*.*")],
defaultextension=".xbrl")
if self.isMSW and "/Microsoft/Windows/Temporary Internet Files/Content.IE5/" in filename:
tkinter.messagebox.showerror(_("Loading web-accessed files"),
_('Please open web-accessed files with the second toolbar button, "Open web file", or the File menu, second entry, "Open web..."'), parent=self.parent)
return
if os.sep == "\\":
filename = filename.replace("/", "\\")
self.fileOpenFile(filename)
def importFileOpen(self, *ignore):
if not self.modelManager.modelXbrl or self.modelManager.modelXbrl.modelDocument.type not in (
ModelDocument.Type.SCHEMA, ModelDocument.Type.LINKBASE, ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL):
tkinter.messagebox.showwarning(_("arelle - Warning"),
_("Import requires an opened DTS"), parent=self.parent)
return False
filename = self.uiFileDialog("open",
title=_("arelle - Import file into opened DTS"),
initialdir=self.config.setdefault("importOpenDir","."),
filetypes=[(_("XBRL files"), "*.*")],
defaultextension=".xml")
if self.isMSW and "/Microsoft/Windows/Temporary Internet Files/Content.IE5/" in filename:
tkinter.messagebox.showerror(_("Loading web-accessed files"),
_('Please import web-accessed files with the File menu, fourth entry, "Import web..."'), parent=self.parent)
return
if os.sep == "\\":
filename = filename.replace("/", "\\")
self.fileOpenFile(filename, importToDTS=True)
def updateFileHistory(self, url, importToDTS):
key = "importHistory" if importToDTS else "fileHistory"
fileHistory = self.config.setdefault(key, [])
while fileHistory.count(url) > 0:
fileHistory.remove(url)
if len(fileHistory) > 10:
fileHistory[10:] = []
fileHistory.insert(0, url)
self.config[key] = fileHistory
self.loadFileMenuHistory()
self.saveConfig()
def fileOpenFile(self, filename, importToDTS=False, selectTopView=False):
if filename:
for xbrlLoadedMethod in pluginClassMethods("CntlrWinMain.Xbrl.Open"):
filename = xbrlLoadedMethod(self, filename) # runs in GUI thread, allows mapping filename, mult return filename
filesource = None
# check for archive files
filesource = openFileSource(filename, self,
checkIfXmlIsEis=self.modelManager.disclosureSystem and
self.modelManager.disclosureSystem.validationType == "EFM")
if filesource.isArchive:
if not filesource.selection: # or filesource.isRss:
from arelle import DialogOpenArchive
filename = DialogOpenArchive.askArchiveFile(self, filesource)
if filename and filesource.basefile and not isHttpUrl(filesource.basefile):
self.config["fileOpenDir"] = os.path.dirname(filesource.baseurl)
filesource.loadTaxonomyPackageMappings() # if a package, load mappings if not loaded yet
if filename:
if not isinstance(filename, (dict, list)): # json objects
if importToDTS:
if not isHttpUrl(filename):
self.config["importOpenDir"] = os.path.dirname(filename)
else:
if not isHttpUrl(filename):
self.config["fileOpenDir"] = os.path.dirname(filesource.baseurl if filesource.isArchive else filename)
self.updateFileHistory(filename, importToDTS)
elif len(filename) == 1:
self.updateFileHistory(filename[0], importToDTS)
thread = threading.Thread(target=self.backgroundLoadXbrl, args=(filesource,importToDTS,selectTopView), daemon=True).start()
def webOpen(self, *ignore):
if not self.okayToContinue():
return
url = DialogURL.askURL(self.parent, buttonSEC=True, buttonRSS=True)
if url:
self.updateFileHistory(url, False)
for xbrlLoadedMethod in pluginClassMethods("CntlrWinMain.Xbrl.Open"):
url = xbrlLoadedMethod(self, url) # runs in GUI thread, allows mapping url, mult return url
filesource = openFileSource(url,self)
if filesource.isArchive and not filesource.selection: # or filesource.isRss:
from arelle import DialogOpenArchive
url = DialogOpenArchive.askArchiveFile(self, filesource)
self.updateFileHistory(url, False)
thread = threading.Thread(target=self.backgroundLoadXbrl, args=(filesource,False,False), daemon=True).start()
def importWebOpen(self, *ignore):
if not self.modelManager.modelXbrl or self.modelManager.modelXbrl.modelDocument.type not in (
ModelDocument.Type.SCHEMA, ModelDocument.Type.LINKBASE, ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL):
tkinter.messagebox.showwarning(_("arelle - Warning"),
_("Import requires an opened DTS"), parent=self.parent)
return False
url = DialogURL.askURL(self.parent, buttonSEC=False, buttonRSS=False)
if url:
self.fileOpenFile(url, importToDTS=True)
def backgroundLoadXbrl(self, filesource, importToDTS, selectTopView):
startedAt = time.time()
try:
if importToDTS:
action = _("imported")
profileStat = "import"
modelXbrl = self.modelManager.modelXbrl
if modelXbrl:
ModelDocument.load(modelXbrl, filesource.url, isSupplemental=importToDTS)
modelXbrl.relationshipSets.clear() # relationships have to be re-cached
else:
action = _("loaded")
profileStat = "load"
modelXbrl = self.modelManager.load(filesource, _("views loading"),
checkModifiedTime=isHttpUrl(filesource.url)) # check modified time if GUI-loading from web
except ModelDocument.LoadingException:
self.showStatus(_("Loading terminated, unrecoverable error"), 15000)
return
except Exception as err:
msg = _("Exception loading {0}: {1}, at {2}").format(
filesource.url,
err,
traceback.format_tb(sys.exc_info()[2]))
# not sure if message box can be shown from background thread
# tkinter.messagebox.showwarning(_("Exception loading"),msg, parent=self.parent)
self.addToLog(msg);
self.showStatus(_("Loading terminated, unrecoverable error"), 15000)
return
if modelXbrl and modelXbrl.modelDocument:
statTime = time.time() - startedAt
modelXbrl.profileStat(profileStat, statTime)
self.addToLog(format_string(self.modelManager.locale,
_("%s in %.2f secs"),
(action, statTime)))
if modelXbrl.hasTableRendering:
self.showStatus(_("Initializing table rendering"))
RenderingEvaluator.init(modelXbrl)
self.showStatus(_("{0}, preparing views").format(action))
self.waitForUiThreadQueue() # force status update
self.uiThreadQueue.put((self.showLoadedXbrl, [modelXbrl, importToDTS, selectTopView]))
else:
self.addToLog(format_string(self.modelManager.locale,
_("not successfully %s in %.2f secs"),
(action, time.time() - startedAt)))
self.showStatus(_("Loading terminated"), 15000)
def showLoadedXbrl(self, modelXbrl, attach, selectTopView=False):
startedAt = time.time()
currentAction = "setting title"
topView = None
self.currentView = None
try:
if attach:
modelXbrl.closeViews()
self.parent.title(_("arelle - {0}").format(
os.path.basename(modelXbrl.modelDocument.uri)))
self.setValidateTooltipText()
if modelXbrl.modelDocument.type in ModelDocument.Type.TESTCASETYPES:
currentAction = "tree view of tests"
ViewWinTests.viewTests(modelXbrl, self.tabWinTopRt)
topView = modelXbrl.views[-1]
elif modelXbrl.modelDocument.type == ModelDocument.Type.VERSIONINGREPORT:
currentAction = "view of versioning report"
ViewWinVersReport.viewVersReport(modelXbrl, self.tabWinTopRt)
from arelle.ViewWinDiffs import ViewWinDiffs
ViewWinDiffs(modelXbrl, self.tabWinBtm, lang=self.labelLang)
elif modelXbrl.modelDocument.type == ModelDocument.Type.RSSFEED:
currentAction = "view of RSS feed"
ViewWinRssFeed.viewRssFeed(modelXbrl, self.tabWinTopRt)
topView = modelXbrl.views[-1]
else:
if modelXbrl.hasTableIndexing:
currentAction = "table index view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopLeft, ("Tables", (XbrlConst.euGroupTable,)), lang=self.labelLang,
treeColHdr="Table Index", showLinkroles=False, showColumns=False, expandAll=True)
elif modelXbrl.modelDocument.type in (ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL, ModelDocument.Type.INLINEXBRLDOCUMENTSET):
currentAction = "table index view"
firstTableLinkroleURI, indexLinkroleURI = TableStructure.evaluateTableIndex(modelXbrl, lang=self.labelLang)
if firstTableLinkroleURI is not None:
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopLeft, ("Tables", (XbrlConst.parentChild,)), lang=self.labelLang, linkrole=indexLinkroleURI,
treeColHdr="Table Index", showRelationships=False, showColumns=False, expandAll=False, hasTableIndex=True)
'''
elif (modelXbrl.modelDocument.type in (ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL, ModelDocument.Type.INLINEXBRLDOCUMENTSET) and
not modelXbrl.hasTableRendering):
currentAction = "facttable ELRs view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopLeft, ("Tables", (XbrlConst.parentChild,)), lang=self.labelLang,
treeColHdr="Fact Table Index", showLinkroles=True, showColumns=False, showRelationships=False, expandAll=False)
'''
currentAction = "tree view of DTS"
ViewWinDTS.viewDTS(modelXbrl, self.tabWinTopLeft, altTabWin=self.tabWinTopRt)
currentAction = "view of concepts"
ViewWinConcepts.viewConcepts(modelXbrl, self.tabWinBtm, "Concepts", lang=self.labelLang, altTabWin=self.tabWinTopRt)
if modelXbrl.hasTableRendering: # show rendering grid even without any facts
ViewWinRenderedGrid.viewRenderedGrid(modelXbrl, self.tabWinTopRt, lang=self.labelLang)
if topView is None: topView = modelXbrl.views[-1]
if modelXbrl.modelDocument.type in (ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL, ModelDocument.Type.INLINEXBRLDOCUMENTSET):
currentAction = "table view of facts"
if (not modelXbrl.hasTableRendering and # table view only if not grid rendered view
modelXbrl.relationshipSet(XbrlConst.parentChild)): # requires presentation relationships to render this tab
ViewWinFactTable.viewFacts(modelXbrl, self.tabWinTopRt, linkrole=firstTableLinkroleURI, lang=self.labelLang, expandAll=firstTableLinkroleURI is not None)
if topView is None: topView = modelXbrl.views[-1]
currentAction = "tree/list of facts"
ViewWinFactList.viewFacts(modelXbrl, self.tabWinTopRt, lang=self.labelLang)
if topView is None: topView = modelXbrl.views[-1]
currentAction = "presentation linkbase view"
hasView = ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, XbrlConst.parentChild, lang=self.labelLang)
if hasView and topView is None: topView = modelXbrl.views[-1]
currentAction = "calculation linkbase view"
hasView = ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, XbrlConst.summationItem, lang=self.labelLang)
if hasView and topView is None: topView = modelXbrl.views[-1]
currentAction = "dimensions relationships view"
hasView = ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, "XBRL-dimensions", lang=self.labelLang)
if hasView and topView is None: topView = modelXbrl.views[-1]
currentAction = "anchoring relationships view"
hasView = ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, XbrlConst.widerNarrower, lang=self.labelLang, noRelationshipsMsg=False, treeColHdr="Wider-Narrower Relationships")
if hasView and topView is None: topView = modelXbrl.views[-1]
if modelXbrl.hasTableRendering:
currentAction = "rendering view"
hasView = ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, "Table-rendering", lang=self.labelLang)
if hasView and topView is None: topView = modelXbrl.views[-1]
if modelXbrl.hasFormulae:
currentAction = "formulae view"
ViewWinFormulae.viewFormulae(modelXbrl, self.tabWinTopRt)
if topView is None: topView = modelXbrl.views[-1]
for name, arcroles in sorted(self.config.get("arcroleGroups", {}).items()):
if XbrlConst.arcroleGroupDetect in arcroles:
currentAction = name + " view"
hasView = ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, (name, arcroles), lang=self.labelLang)
if hasView and topView is None: topView = modelXbrl.views[-1]
currentAction = "property grid"
ViewWinProperties.viewProperties(modelXbrl, self.tabWinTopLeft)
currentAction = "log view creation time"
viewTime = time.time() - startedAt
modelXbrl.profileStat("view", viewTime)
self.addToLog(format_string(self.modelManager.locale,
_("views %.2f secs"), viewTime))
if selectTopView and topView:
topView.select()
self.currentView = topView
currentAction = "plugin method CntlrWinMain.Xbrl.Loaded"
for xbrlLoadedMethod in pluginClassMethods("CntlrWinMain.Xbrl.Loaded"):
xbrlLoadedMethod(self, modelXbrl, attach) # runs in GUI thread
except Exception as err:
msg = _("Exception preparing {0}: {1}, at {2}").format(
currentAction,
err,
traceback.format_tb(sys.exc_info()[2]))
tkinter.messagebox.showwarning(_("Exception preparing view"),msg, parent=self.parent)
self.addToLog(msg);
self.showStatus(_("Ready..."), 2000)
def showFormulaOutputInstance(self, priorOutputInstance, currentOutputInstance):
currentAction = "closing prior formula output instance"
try:
if priorOutputInstance: # if has UI must close on UI thread, not background thread
priorOutputInstance.close()
currentAction = "showing resulting formula output instance"
if currentOutputInstance:
ViewWinXml.viewXml(currentOutputInstance, self.tabWinBtm, "Formula Output Instance", currentOutputInstance.modelDocument.xmlDocument)
except Exception as err:
msg = _("Exception {0}: {1}, at {2}").format(
currentAction,
err,
traceback.format_tb(sys.exc_info()[2]))
tkinter.messagebox.showwarning(_("Exception preparing view"),msg, parent=self.parent)
self.addToLog(msg);
self.showStatus(_("Ready..."), 2000)
def showProfileStats(self):
modelXbrl = self.modelManager.modelXbrl
if modelXbrl and self.modelManager.collectProfileStats:
modelXbrl.logProfileStats()
def clearProfileStats(self):
modelXbrl = self.modelManager.modelXbrl
if modelXbrl and self.modelManager.collectProfileStats:
modelXbrl.profileStats.clear()
def fileClose(self, *ignore):
if not self.okayToContinue():
return
self.modelManager.close()
self.parent.title(_("arelle - Unnamed"))
self.setValidateTooltipText()
self.currentView = None
def fileReopen(self, *ignore):
self.fileClose()
fileHistory = self.config.setdefault("fileHistory", [])
if len(fileHistory) > 0:
self.fileOpenFile(fileHistory[0])
def validate(self):
modelXbrl = self.modelManager.modelXbrl
if modelXbrl and modelXbrl.modelDocument:
if (modelXbrl.modelManager.validateDisclosureSystem and
not modelXbrl.modelManager.disclosureSystem.selection):
tkinter.messagebox.showwarning(_("arelle - Warning"),
_("Validation - disclosure system checks is requested but no disclosure system is selected, please select one by validation - select disclosure system."),
parent=self.parent)
else:
if modelXbrl.modelDocument.type in ModelDocument.Type.TESTCASETYPES:
for pluginXbrlMethod in pluginClassMethods("Testcases.Start"):
pluginXbrlMethod(self, None, modelXbrl)
thread = threading.Thread(target=self.backgroundValidate, daemon=True).start()
def backgroundValidate(self):
startedAt = time.time()
modelXbrl = self.modelManager.modelXbrl
priorOutputInstance = modelXbrl.formulaOutputInstance
modelXbrl.formulaOutputInstance = None # prevent closing on background thread by validateFormula
self.modelManager.validate()
self.addToLog(format_string(self.modelManager.locale,
_("validated in %.2f secs"),
time.time() - startedAt))
if not modelXbrl.isClosed and (priorOutputInstance or modelXbrl.formulaOutputInstance):
self.uiThreadQueue.put((self.showFormulaOutputInstance, [priorOutputInstance, modelXbrl.formulaOutputInstance]))
self.uiThreadQueue.put((self.logSelect, []))
def compareDTSes(self):
countLoadedDTSes = len(self.modelManager.loadedModelXbrls)
if countLoadedDTSes != 2:
tkinter.messagebox.showwarning(_("arelle - Warning"),
_("Two DTSes are required for the Compare DTSes operation, {0} found").format(countLoadedDTSes),
parent=self.parent)
return False
versReportFile = self.uiFileDialog("save",
title=_("arelle - Save Versioning Report File"),
initialdir=self.config.setdefault("versioningReportDir","."),
filetypes=[(_("Versioning report file"), "*.xml")],
defaultextension=".xml")
if not versReportFile:
return False
self.config["versioningReportDir"] = os.path.dirname(versReportFile)
self.saveConfig()
thread = threading.Thread(target=self.backgroundCompareDTSes, args=(versReportFile,), daemon=True).start()
def backgroundCompareDTSes(self, versReportFile):
startedAt = time.time()
modelVersReport = self.modelManager.compareDTSes(versReportFile)
if modelVersReport and modelVersReport.modelDocument:
self.addToLog(format_string(self.modelManager.locale,
_("compared in %.2f secs"),
time.time() - startedAt))
self.uiThreadQueue.put((self.showComparedDTSes, [modelVersReport]))
def showComparedDTSes(self, modelVersReport):
# close prior DTS displays
modelVersReport.modelDocument.fromDTS.closeViews()
modelVersReport.modelDocument.toDTS.closeViews()
self.showLoadedXbrl(modelVersReport, True)
def loadFile(self, filename):
self.filename = filename
self.listBox.delete(0, END)
self.dirty = False
try:
with open(self.filename, "rb") as fh:
self.data = pickle.load(fh)
for name in sorted(self.data, key=str.lower):
self.listBox.insert(END, name)
self.showStatus(_("Loaded {0} items from {1}").format(
self.listbox.size(),
self.filename), clearAfter=5000)
self.parent.title(_("arelle - {0}").format(
os.path.basename(self.filename)))
except (EnvironmentError, pickle.PickleError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to load {0}\n{1}").format(
self.filename,
err),
parent=self.parent)
def quit(self, event=None, restartAfterQuit=False):
if self.okayToContinue():
self.modelManager.close()
logging.shutdown()
global restartMain
restartMain = restartAfterQuit
state = self.parent.state()
if state == "normal":
self.config["windowGeometry"] = self.parent.geometry()
if state in ("normal", "zoomed"):
self.config["windowState"] = state
if self.isMSW: adjustW = 4; adjustH = 6 # tweak to prevent splitter regions from growing on reloading
elif self.isMac: adjustW = 54; adjustH = 39
else: adjustW = 2; adjustH = 2 # linux (tested on ubuntu)
self.config["tabWinTopLeftSize"] = (self.tabWinTopLeft.winfo_width() - adjustW,
self.tabWinTopLeft.winfo_height() - adjustH)
super(CntlrWinMain, self).close(saveConfig=True)
self.parent.unbind_all(())
self.parent.destroy()
if self.logFile:
self.logFile.close()
self.logFile = None
def restart(self, event=None):
self.quit(event, restartAfterQuit=True)
def setWorkOffline(self, *args):
self.webCache.workOffline = self.workOffline.get()
self.config["workOffline"] = self.webCache.workOffline
self.saveConfig()
def setNoCertificateCheck(self, *args):
self.webCache.noCertificateCheck = self.noCertificateCheck.get() # resets proxy handlers
self.config["noCertificateCheck"] = self.webCache.noCertificateCheck
self.saveConfig()
def confirmClearWebCache(self):
if tkinter.messagebox.askyesno(
_("arelle - Clear Internet Cache"),
_("Are you sure you want to clear the internet cache?"),
parent=self.parent):
def backgroundClearCache():
self.showStatus(_("Clearing internet cache"))
self.webCache.clear()
self.showStatus(_("Internet cache cleared"), 5000)
thread = threading.Thread(target=backgroundClearCache, daemon=True).start()
def manageWebCache(self):
if sys.platform.startswith("win"):
command = 'explorer'
elif sys.platform in ("darwin", "macos"):
command = 'open'
else: # linux/unix
command = 'xdg-open'
try:
subprocess.Popen([command,self.webCache.cacheDir])
except:
pass
def setupProxy(self):
from arelle.DialogUserPassword import askProxy
proxySettings = askProxy(self.parent, self.config.get("proxySettings"))
if proxySettings:
self.webCache.resetProxies(proxySettings)
self.config["proxySettings"] = proxySettings
self.saveConfig()
def setValidateDisclosureSystem(self, *args):
self.modelManager.validateDisclosureSystem = self.validateDisclosureSystem.get()
self.config["validateDisclosureSystem"] = self.modelManager.validateDisclosureSystem
self.saveConfig()
if self.modelManager.validateDisclosureSystem:
if not self.modelManager.disclosureSystem or not self.modelManager.disclosureSystem.selection:
self.selectDisclosureSystem()
self.setValidateTooltipText()
def selectDisclosureSystem(self, *args):
from arelle import DialogOpenArchive
self.config["disclosureSystem"] = DialogOpenArchive.selectDisclosureSystem(self, self.modelManager.disclosureSystem)
self.saveConfig()
self.setValidateTooltipText()
def formulaParametersDialog(self, *args):
DialogFormulaParameters.getParameters(self)
self.setValidateTooltipText()
def rssWatchOptionsDialog(self, *args):
from arelle import DialogRssWatch
DialogRssWatch.getOptions(self)
# find or open rssWatch view
def rssWatchControl(self, start=False, stop=False, close=False):
from arelle.ModelDocument import Type
from arelle import WatchRss
if not self.modelManager.rssWatchOptions.get("feedSourceUri"):
tkinter.messagebox.showwarning(_("RSS Watch Control Error"),
_("RSS Feed is not set up, please select options and select feed"),
parent=self.parent)
return False
rssModelXbrl = None
for loadedModelXbrl in self.modelManager.loadedModelXbrls:
if (loadedModelXbrl.modelDocument.type == Type.RSSFEED and
loadedModelXbrl.modelDocument.uri == self.modelManager.rssWatchOptions.get("feedSourceUri")):
rssModelXbrl = loadedModelXbrl
break
#not loaded
if start:
if not rssModelXbrl:
rssModelXbrl = self.modelManager.create(Type.RSSFEED, self.modelManager.rssWatchOptions.get("feedSourceUri"))
self.showLoadedXbrl(rssModelXbrl, False)
if not hasattr(rssModelXbrl,"watchRss"):
WatchRss.initializeWatcher(rssModelXbrl)
rssModelXbrl.watchRss.start()
elif stop:
if rssModelXbrl and rssModelXbrl.watchRss:
rssModelXbrl.watchRss.stop()
# for ui thread option updating
def rssWatchUpdateOption(self, latestPubDate=None):
self.uiThreadQueue.put((self.uiRssWatchUpdateOption, [latestPubDate]))
# ui thread addToLog
def uiRssWatchUpdateOption(self, latestPubDate):
if latestPubDate:
self.modelManager.rssWatchOptions["latestPubDate"] = latestPubDate
self.config["rssWatchOptions"] = self.modelManager.rssWatchOptions
self.saveConfig()
def languagesDialog(self, *args):
override = self.lang if self.lang != self.modelManager.defaultLang else ""
import tkinter.simpledialog
newValue = tkinter.simpledialog.askstring(_("arelle - Labels language code setting"),
_("The system default language is: {0} \n\n"
"You may override with a different language for labels display. \n\n"
"Current language override code: {1} \n"
"(Leave empty to use the system default language.)").format(
self.modelManager.defaultLang, override),
parent=self.parent)
if newValue is not None:
self.config["labelLangOverride"] = newValue
if newValue:
self.lang = newValue
else:
self.lang = self.modelManager.defaultLang
if self.modelManager.modelXbrl and self.modelManager.modelXbrl.modelDocument:
self.showLoadedXbrl(self.modelManager.modelXbrl, True) # reload views
self.saveConfig()
def setValidateTooltipText(self):
if self.modelManager.modelXbrl and not self.modelManager.modelXbrl.isClosed and self.modelManager.modelXbrl.modelDocument is not None:
valType = self.modelManager.modelXbrl.modelDocument.type
if valType in (ModelDocument.Type.SCHEMA, ModelDocument.Type.LINKBASE):
valName = "DTS"
else:
valName = ModelDocument.Type.typeName[valType]
if valType == ModelDocument.Type.VERSIONINGREPORT:
v = _("Validate versioning report")
else:
if self.modelManager.validateCalcLB:
if self.modelManager.validateInferDecimals:
c = _("\nCheck calculations (infer decimals)")
else:
c = _("\nCheck calculations (infer precision)")
if self.modelManager.validateDedupCalcs:
c += _("\nDeduplicate calculations")
else:
c = ""
if self.modelManager.validateUtr:
u = _("\nCheck unit type registry")
else:
u = ""
if self.modelManager.validateDisclosureSystem:
v = _("Validate {0}\nCheck disclosure system rules\n{1}{2}{3}").format(
valName, self.modelManager.disclosureSystem.selection,c,u)
else:
v = _("Validate {0}{1}{2}").format(valName, c, u)
else:
v = _("Validate")
self.validateTooltipText.set(v)
def setValidateCalcLB(self, *args):
self.modelManager.validateCalcLB = self.validateCalcLB.get()
self.config["validateCalcLB"] = self.modelManager.validateCalcLB
self.saveConfig()
self.setValidateTooltipText()
def setValidateInferDecimals(self, *args):
self.modelManager.validateInferDecimals = self.validateInferDecimals.get()
self.config["validateInferDecimals"] = self.modelManager.validateInferDecimals
self.saveConfig()
self.setValidateTooltipText()
def setValidateDedupCalcs(self, *args):
self.modelManager.validateDedupCalcs = self.validateDedupCalcs.get()
self.config["validateDedupCalcs"] = self.modelManager.validateDedupCalcs
self.saveConfig()
self.setValidateTooltipText()
def setValidateUtr(self, *args):
self.modelManager.validateUtr = self.validateUtr.get()
self.config["validateUtr"] = self.modelManager.validateUtr
self.saveConfig()
self.setValidateTooltipText()
def setCollectProfileStats(self, *args):
self.modelManager.collectProfileStats = self.collectProfileStats.get()
self.config["collectProfileStats"] = self.modelManager.collectProfileStats
self.saveConfig()
def setShowDebugMessages(self, *args):
self.config["showDebugMessages"] = self.showDebugMessages.get()
self.saveConfig()
def find(self, *args):
from arelle.DialogFind import find
find(self)
def helpAbout(self, event=None):
from arelle import DialogAbout, Version
from lxml import etree
DialogAbout.about(self.parent,
_("About arelle"),
os.path.join(self.imagesDir, "arelle32.gif"),
_("arelle\u00ae {0} ({1}bit)\n"
"An open source XBRL platform\n"
"\u00a9 2010-{2} Mark V Systems Limited\n"
"All rights reserved\nhttp://www.arelle.org\nsupport@arelle.org\n\n"
"Licensed under the Apache License, Version 2.0 (the \"License\"); "
"you may not use this file except in compliance with the License. "
"You may obtain a copy of the License at\n\n"
"http://www.apache.org/licenses/LICENSE-2.0\n\n"
"Unless required by applicable law or agreed to in writing, software "
"distributed under the License is distributed on an \"AS IS\" BASIS, "
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. "
"See the License for the specific language governing permissions and "
"limitations under the License."
"\n\nIncludes:"
"\n Python\u00ae {4[0]}.{4[1]}.{4[2]} \u00a9 2001-2016 Python Software Foundation"
"\n Tcl/Tk {6} \u00a9 Univ. of Calif., Sun, Scriptics, ActiveState, and others"
"\n PyParsing \u00a9 2003-2013 Paul T. McGuire"
"\n lxml {5[0]}.{5[1]}.{5[2]} \u00a9 2004 Infrae, ElementTree \u00a9 1999-2004 by Fredrik Lundh"
"{3}"
"\n May include installable plug-in modules with author-specific license terms"
)
.format(Version.__version__, self.systemWordSize, Version.copyrightLatestYear,
_("\n Bottle \u00a9 2011-2013 Marcel Hellkamp"
"\n CherryPy \u00a9 2002-2013 CherryPy Team") if self.hasWebServer else "",
sys.version_info, etree.LXML_VERSION, Tcl().eval('info patchlevel')
))
# worker threads addToLog
def addToLog(self, message, messageCode="", messageArgs=None, file="", refs=[], level=logging.INFO):
if level < logging.INFO and not self.showDebugMessages.get():
return # skip DEBUG and INFO-RESULT messages
if messageCode and messageCode not in message: # prepend message code
message = "[{}] {}".format(messageCode, message)
if refs:
message += " - " + Cntlr.logRefsFileLines(refs)
elif file:
if isinstance(file, (tuple,list,set)):
message += " - " + ", ".join(file)
elif isinstance(file, _STR_BASE):
message += " - " + file
if isinstance(messageArgs, dict):
try:
message = message % messageArgs
except (KeyError, TypeError, ValueError) as ex:
message += " \nMessage log error: " + str(ex) + " \nMessage arguments: " + str(messageArgs)
self.uiThreadQueue.put((self.uiAddToLog, [message]))
# ui thread addToLog
def uiAddToLog(self, message):
try:
self.logView.append(message)
except:
pass
def logClear(self, *ignore):
self.logView.clear()
def logSelect(self, *ignore):
self.logView.select()
def logSaveToFile(self, *ignore):
filename = self.uiFileDialog("save",
title=_("arelle - Save Messages Log"),
initialdir=".",
filetypes=[(_("Txt file"), "*.txt")],
defaultextension=".txt")
if not filename:
return False
try:
self.logView.saveToFile(filename)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True;
# worker threads viewModelObject
def viewModelObject(self, modelXbrl, objectId):
self.waitForUiThreadQueue() # force prior ui view updates if any
self.uiThreadQueue.put((self.uiViewModelObject, [modelXbrl, objectId]))
# ui thread viewModelObject
def uiViewModelObject(self, modelXbrl, objectId):
modelXbrl.viewModelObject(objectId)
# worker threads viewModelObject
def reloadViews(self, modelXbrl):
self.uiThreadQueue.put((self.uiReloadViews, [modelXbrl]))
# ui thread viewModelObject
def uiReloadViews(self, modelXbrl):
for view in modelXbrl.views:
view.view()
# worker threads showStatus
def showStatus(self, message, clearAfter=None):
self.uiThreadQueue.put((self.uiShowStatus, [message, clearAfter]))
# ui thread showStatus
def uiClearStatusTimerEvent(self):
if self.statusbarTimerId: # if timer still wanted, clear status
self.statusbar["text"] = ""
self.statusbarTimerId = None
def uiShowStatus(self, message, clearAfter=None):
if self.statusbarTimerId: # ignore timer
self.statusbarTimerId = None
self.statusbar["text"] = message
if clearAfter is not None and clearAfter > 0:
self.statusbarTimerId = self.statusbar.after(clearAfter, self.uiClearStatusTimerEvent)
# web authentication password request
def internet_user_password(self, host, realm):
from arelle.DialogUserPassword import askUserPassword
untilDone = threading.Event()
result = []
self.uiThreadQueue.put((askUserPassword, [self.parent, host, realm, untilDone, result]))
untilDone.wait()
return result[0]
# web file login requested
def internet_logon(self, url, quotedUrl, dialogCaption, dialogText):
from arelle.DialogUserPassword import askInternetLogon
untilDone = threading.Event()
result = []
self.uiThreadQueue.put((askInternetLogon, [self.parent, url, quotedUrl, dialogCaption, dialogText, untilDone, result]))
untilDone.wait()
return result[0]
def waitForUiThreadQueue(self):
for i in range(40): # max 2 secs
if self.uiThreadQueue.empty():
break
time.sleep(0.05)
def uiThreadChecker(self, widget, delayMsecs=100): # 10x per second
# process callback on main (UI) thread
while not self.uiThreadQueue.empty():
try:
(callback, args) = self.uiThreadQueue.get(block=False)
except queue.Empty:
pass
else:
callback(*args)
widget.after(delayMsecs, lambda: self.uiThreadChecker(widget))
def uiFileDialog(self, action, title=None, initialdir=None, filetypes=[], defaultextension=None, owner=None, multiple=False, parent=None):
if parent is None: parent = self.parent
if multiple and action == "open": # return as simple list of file names
multFileNames = tkinter.filedialog.askopenfilename(
multiple=True,
title=title,
initialdir=initialdir,
filetypes=[] if self.isMac else filetypes,
defaultextension=defaultextension,
parent=parent)
if isinstance(multFileNames, (tuple,list)):
return multFileNames
return re.findall("[{]([^}]+)[}]", # older multiple returns "{file1} {file2}..."
multFileNames)
elif self.hasWin32gui:
import win32gui
try:
filename, filter, flags = {"open":win32gui.GetOpenFileNameW,
"save":win32gui.GetSaveFileNameW}[action](
hwndOwner=(owner if owner else parent).winfo_id(),
hInstance=win32gui.GetModuleHandle(None),
Filter='\0'.join(e for t in filetypes+['\0'] for e in t),
MaxFile=4096,
InitialDir=initialdir,
Title=title,
DefExt=defaultextension)
return filename
except win32gui.error:
return ''
else:
return {"open":tkinter.filedialog.askopenfilename,
"save":tkinter.filedialog.asksaveasfilename}[action](
title=title,
initialdir=initialdir,
filetypes=[] if self.isMac else filetypes,
defaultextension=defaultextension,
parent=parent)
from arelle import DialogFormulaParameters
class WinMainLogHandler(logging.Handler):
def __init__(self, cntlr):
super(WinMainLogHandler, self).__init__()
self.cntlr = cntlr
#formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s - %(file)s %(sourceLine)s")
formatter = Cntlr.LogFormatter("[%(messageCode)s] %(message)s - %(file)s")
self.setFormatter(formatter)
self.logRecordBuffer = None
def startLogBuffering(self):
if self.logRecordBuffer is None:
self.logRecordBuffer = []
def endLogBuffering(self):
self.logRecordBuffer = None
def flush(self):
''' Nothing to flush '''
def emit(self, logRecord):
if self.logRecordBuffer is not None:
self.logRecordBuffer.append(logRecord)
# add to logView
msg = self.format(logRecord)
try:
self.cntlr.addToLog(msg, level=logRecord.levelno)
except:
pass
class TkinterCallWrapper:
"""Replacement for internal tkinter class. Stores function to call when some user
defined Tcl function is called e.g. after an event occurred."""
def __init__(self, func, subst, widget):
"""Store FUNC, SUBST and WIDGET as members."""
self.func = func
self.subst = subst
self.widget = widget
def __call__(self, *args):
"""Apply first function SUBST to arguments, than FUNC."""
try:
if self.subst:
args = self.subst(*args)
return self.func(*args)
except SystemExit as msg:
raise SystemExit(msg)
except Exception:
# this was tkinter's standard coding: self.widget._report_exception()
exc_type, exc_value, exc_traceback = sys.exc_info()
msg = ''.join(traceback.format_exception_only(exc_type, exc_value))
tracebk = ''.join(traceback.format_tb(exc_traceback, limit=30))
tkinter.messagebox.showerror(_("Exception"),
_("{0}\nCall trace\n{1}").format(msg, tracebk))
def main():
# this is the entry called by arelleGUI.pyw for windows
if getattr(sys, 'frozen', False):
if sys.platform in ("darwin", "linux"): # Use frozen tcl, tk and TkTable libraries
_resourcesDir = os.path.join(Cntlr.resourcesDir(), "lib")
for _tcltk in ("tcl", "tk", "Tktable"):
for _tcltkVer in ("8.5", "8.6", "2.11"): # Tktable ver is 2.11
_d = _resourcesDir
while len(_d) > 3: # stop at root directory
_tcltkDir = os.path.join(_d, _tcltk + _tcltkVer)
if os.path.exists(_tcltkDir):
os.environ[_tcltk.upper() + "_LIBRARY"] = _tcltkDir
break
_d = os.path.dirname(_d)
elif sys.platform == 'win32': # windows requires fake stdout/stderr because no write/flush (e.g., EdgarRenderer LocalViewer pybottle)
class dummyFrozenStream:
def __init__(self): pass
def write(self,data): pass
def read(self,data): pass
def flush(self): pass
def close(self): pass
sys.stdout = dummyFrozenStream()
sys.stderr = dummyFrozenStream()
sys.stdin = dummyFrozenStream()
global restartMain
while restartMain:
restartMain = False
try:
application = Tk()
cntlrWinMain = CntlrWinMain(application)
application.protocol("WM_DELETE_WINDOW", cntlrWinMain.quit)
if sys.platform == "darwin" and not __file__.endswith(".app/Contents/MacOS/arelleGUI"):
# not built app - launches behind python or eclipse
application.lift()
application.call('wm', 'attributes', '.', '-topmost', True)
cntlrWinMain.uiThreadQueue.put((application.call, ['wm', 'attributes', '.', '-topmost', False]))
os.system('''/usr/bin/osascript -e 'tell app "Finder" to set frontmost of process "Python" to true' ''')
application.mainloop()
except Exception: # unable to start Tk or other fatal error
exc_type, exc_value, exc_traceback = sys.exc_info()
msg = ''.join(traceback.format_exception_only(exc_type, exc_value))
tracebk = ''.join(traceback.format_tb(exc_traceback, limit=7))
logMsg = "{}\nCall Trace\n{}\nEnvironment {}".format(msg, tracebk, os.environ)
#print(logMsg, file=sys.stderr)
if syslog is not None:
syslog.openlog("Arelle")
syslog.syslog(syslog.LOG_ALERT, logMsg)
try: # this may crash. Note syslog has 1k message length
logMsg = "tcl_pkgPath {} tcl_library {} tcl version {}".format(
Tcl().getvar("tcl_pkgPath"), Tcl().getvar("tcl_library"), Tcl().eval('info patchlevel'))
if syslog is not None:
syslog.syslog(syslog.LOG_ALERT, logMsg)
#print(logMsg, file=sys.stderr)
except:
pass
if syslog is not None:
syslog.closelog()
if __name__ == "__main__":
# this is the entry called by MacOS open and MacOS shell scripts
# check if ARELLE_ARGS are used to emulate command line operation
if os.getenv("ARELLE_ARGS"):
# command line mode
from arelle import CntlrCmdLine
CntlrCmdLine.main()
else:
# GUI mode
main()
|
jt6562/XX-Net
|
refs/heads/master
|
python27/1.0/lib/code.py
|
62
|
"""Utilities needed to emulate Python's interactive interpreter.
"""
# Inspired by similar code by Jeff Epler and Fredrik Lundh.
import sys
import traceback
from codeop import CommandCompiler, compile_command
__all__ = ["InteractiveInterpreter", "InteractiveConsole", "interact",
"compile_command"]
def softspace(file, newvalue):
oldvalue = 0
try:
oldvalue = file.softspace
except AttributeError:
pass
try:
file.softspace = newvalue
except (AttributeError, TypeError):
# "attribute-less object" or "read-only attributes"
pass
return oldvalue
class InteractiveInterpreter:
"""Base class for InteractiveConsole.
This class deals with parsing and interpreter state (the user's
namespace); it doesn't deal with input buffering or prompting or
input file naming (the filename is always passed in explicitly).
"""
def __init__(self, locals=None):
"""Constructor.
The optional 'locals' argument specifies the dictionary in
which code will be executed; it defaults to a newly created
dictionary with key "__name__" set to "__console__" and key
"__doc__" set to None.
"""
if locals is None:
locals = {"__name__": "__console__", "__doc__": None}
self.locals = locals
self.compile = CommandCompiler()
def runsource(self, source, filename="<input>", symbol="single"):
"""Compile and run some source in the interpreter.
Arguments are as for compile_command().
One several things can happen:
1) The input is incorrect; compile_command() raised an
exception (SyntaxError or OverflowError). A syntax traceback
will be printed by calling the showsyntaxerror() method.
2) The input is incomplete, and more input is required;
compile_command() returned None. Nothing happens.
3) The input is complete; compile_command() returned a code
object. The code is executed by calling self.runcode() (which
also handles run-time exceptions, except for SystemExit).
The return value is True in case 2, False in the other cases (unless
an exception is raised). The return value can be used to
decide whether to use sys.ps1 or sys.ps2 to prompt the next
line.
"""
try:
code = self.compile(source, filename, symbol)
except (OverflowError, SyntaxError, ValueError):
# Case 1
self.showsyntaxerror(filename)
return False
if code is None:
# Case 2
return True
# Case 3
self.runcode(code)
return False
def runcode(self, code):
"""Execute a code object.
When an exception occurs, self.showtraceback() is called to
display a traceback. All exceptions are caught except
SystemExit, which is reraised.
A note about KeyboardInterrupt: this exception may occur
elsewhere in this code, and may not always be caught. The
caller should be prepared to deal with it.
"""
try:
exec code in self.locals
except SystemExit:
raise
except:
self.showtraceback()
else:
if softspace(sys.stdout, 0):
print
def showsyntaxerror(self, filename=None):
"""Display the syntax error that just occurred.
This doesn't display a stack trace because there isn't one.
If a filename is given, it is stuffed in the exception instead
of what was there before (because Python's parser always uses
"<string>" when reading from a string).
The output is written by self.write(), below.
"""
type, value, sys.last_traceback = sys.exc_info()
sys.last_type = type
sys.last_value = value
if filename and type is SyntaxError:
# Work hard to stuff the correct filename in the exception
try:
msg, (dummy_filename, lineno, offset, line) = value
except:
# Not the format we expect; leave it alone
pass
else:
# Stuff in the right filename
value = SyntaxError(msg, (filename, lineno, offset, line))
sys.last_value = value
list = traceback.format_exception_only(type, value)
map(self.write, list)
def showtraceback(self):
"""Display the exception that just occurred.
We remove the first stack item because it is our own code.
The output is written by self.write(), below.
"""
try:
type, value, tb = sys.exc_info()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
tblist = traceback.extract_tb(tb)
del tblist[:1]
list = traceback.format_list(tblist)
if list:
list.insert(0, "Traceback (most recent call last):\n")
list[len(list):] = traceback.format_exception_only(type, value)
finally:
tblist = tb = None
map(self.write, list)
def write(self, data):
"""Write a string.
The base implementation writes to sys.stderr; a subclass may
replace this with a different implementation.
"""
sys.stderr.write(data)
class InteractiveConsole(InteractiveInterpreter):
"""Closely emulate the behavior of the interactive Python interpreter.
This class builds on InteractiveInterpreter and adds prompting
using the familiar sys.ps1 and sys.ps2, and input buffering.
"""
def __init__(self, locals=None, filename="<console>"):
"""Constructor.
The optional locals argument will be passed to the
InteractiveInterpreter base class.
The optional filename argument should specify the (file)name
of the input stream; it will show up in tracebacks.
"""
InteractiveInterpreter.__init__(self, locals)
self.filename = filename
self.resetbuffer()
def resetbuffer(self):
"""Reset the input buffer."""
self.buffer = []
def interact(self, banner=None):
"""Closely emulate the interactive Python console.
The optional banner argument specify the banner to print
before the first interaction; by default it prints a banner
similar to the one printed by the real Python interpreter,
followed by the current class name in parentheses (so as not
to confuse this with the real interpreter -- since it's so
close!).
"""
try:
sys.ps1
except AttributeError:
sys.ps1 = ">>> "
try:
sys.ps2
except AttributeError:
sys.ps2 = "... "
cprt = 'Type "help", "copyright", "credits" or "license" for more information.'
if banner is None:
self.write("Python %s on %s\n%s\n(%s)\n" %
(sys.version, sys.platform, cprt,
self.__class__.__name__))
else:
self.write("%s\n" % str(banner))
more = 0
while 1:
try:
if more:
prompt = sys.ps2
else:
prompt = sys.ps1
try:
line = self.raw_input(prompt)
# Can be None if sys.stdin was redefined
encoding = getattr(sys.stdin, "encoding", None)
if encoding and not isinstance(line, unicode):
line = line.decode(encoding)
except EOFError:
self.write("\n")
break
else:
more = self.push(line)
except KeyboardInterrupt:
self.write("\nKeyboardInterrupt\n")
self.resetbuffer()
more = 0
def push(self, line):
"""Push a line to the interpreter.
The line should not have a trailing newline; it may have
internal newlines. The line is appended to a buffer and the
interpreter's runsource() method is called with the
concatenated contents of the buffer as source. If this
indicates that the command was executed or invalid, the buffer
is reset; otherwise, the command is incomplete, and the buffer
is left as it was after the line was appended. The return
value is 1 if more input is required, 0 if the line was dealt
with in some way (this is the same as runsource()).
"""
self.buffer.append(line)
source = "\n".join(self.buffer)
more = self.runsource(source, self.filename)
if not more:
self.resetbuffer()
return more
def raw_input(self, prompt=""):
"""Write a prompt and read a line.
The returned line does not include the trailing newline.
When the user enters the EOF key sequence, EOFError is raised.
The base implementation uses the built-in function
raw_input(); a subclass may replace this with a different
implementation.
"""
return raw_input(prompt)
def interact(banner=None, readfunc=None, local=None):
"""Closely emulate the interactive Python interpreter.
This is a backwards compatible interface to the InteractiveConsole
class. When readfunc is not specified, it attempts to import the
readline module to enable GNU readline if it is available.
Arguments (all optional, all default to None):
banner -- passed to InteractiveConsole.interact()
readfunc -- if not None, replaces InteractiveConsole.raw_input()
local -- passed to InteractiveInterpreter.__init__()
"""
console = InteractiveConsole(local)
if readfunc is not None:
console.raw_input = readfunc
else:
try:
import readline
except ImportError:
pass
console.interact(banner)
if __name__ == "__main__":
interact()
|
ZeitOnline/zeit.content.cp
|
refs/heads/master
|
src/zeit/content/cp/browser/blocks/tests/test_podcast.py
|
1
|
import zeit.cms.testing
import zeit.content.cp
import zeit.content.cp.centerpage
class TestPodcast(zeit.cms.testing.BrowserTestCase):
layer = zeit.content.cp.testing.ZCML_LAYER
def setUp(self):
super(TestPodcast, self).setUp()
self.centerpage = zeit.content.cp.centerpage.CenterPage()
self.centerpage['lead'].create_item('podcast')
self.repository['centerpage'] = self.centerpage
b = self.browser
b.open(
'http://localhost/++skin++vivi/repository/centerpage/@@checkout')
b.open('contents')
self.content_url = b.url
def test_can_create_podcast_module_via_drag_n_drop_from_sidebar(self):
b = self.browser
self.assertEqual(1, b.contents.count('type-podcast'))
b.open('informatives/@@landing-zone-drop-module?block_type=podcast')
b.open(self.content_url)
self.assertEqual(2, b.contents.count('type-podcast'))
def test_podcast_id_is_editable(self):
b = self.browser
b.getLink('Edit block properties', index=0).click()
b.getControl('Podcast id').value = '12345'
zeit.content.cp.centerpage._test_helper_cp_changed = False
b.getControl('Apply').click()
self.assertTrue(zeit.content.cp.centerpage._test_helper_cp_changed)
self.assertEllipsis('...Updated on...', b.contents)
b.open(self.content_url)
self.assertEllipsis('...ID:...12345...', b.contents)
b.getLink('Edit block properties', index=0).click()
self.assertEqual('12345', b.getControl('Podcast id').value.strip())
|
aavanian/bokeh
|
refs/heads/master
|
examples/custom/font-awesome/font-awesome.py
|
13
|
from bokeh.plotting import show
from bokeh.layouts import column
from bokeh.models import CustomJS
from bokeh.models.widgets import Button
from fontawesome_icon import FontAwesomeIcon
btn = Button(icon=FontAwesomeIcon(icon_name="thumbs-o-up", size=2),
label="It works!",
callback=CustomJS(code="alert('It works!');"))
show(column(btn))
|
javier-ruiz-b/docker-rasppi-images
|
refs/heads/master
|
raspberry-google-home/env/lib/python3.7/site-packages/pip/_internal/wheel_builder.py
|
11
|
"""Orchestrator for building wheels from InstallRequirements.
"""
import logging
import os.path
import re
import shutil
from pip._internal.models.link import Link
from pip._internal.operations.build.wheel import build_wheel_pep517
from pip._internal.operations.build.wheel_legacy import build_wheel_legacy
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import ensure_dir, hash_file, is_wheel_installed
from pip._internal.utils.setuptools_build import make_setuptools_clean_args
from pip._internal.utils.subprocess import call_subprocess
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.urls import path_to_url
from pip._internal.vcs import vcs
if MYPY_CHECK_RUNNING:
from typing import (
Any, Callable, Iterable, List, Optional, Tuple,
)
from pip._internal.cache import WheelCache
from pip._internal.req.req_install import InstallRequirement
BinaryAllowedPredicate = Callable[[InstallRequirement], bool]
BuildResult = Tuple[List[InstallRequirement], List[InstallRequirement]]
logger = logging.getLogger(__name__)
_egg_info_re = re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.IGNORECASE)
def _contains_egg_info(s):
# type: (str) -> bool
"""Determine whether the string looks like an egg_info.
:param s: The string to parse. E.g. foo-2.1
"""
return bool(_egg_info_re.search(s))
def _should_build(
req, # type: InstallRequirement
need_wheel, # type: bool
check_binary_allowed, # type: BinaryAllowedPredicate
):
# type: (...) -> bool
"""Return whether an InstallRequirement should be built into a wheel."""
if req.constraint:
# never build requirements that are merely constraints
return False
if req.is_wheel:
if need_wheel:
logger.info(
'Skipping %s, due to already being wheel.', req.name,
)
return False
if need_wheel:
# i.e. pip wheel, not pip install
return True
# From this point, this concerns the pip install command only
# (need_wheel=False).
if req.editable or not req.source_dir:
return False
if not check_binary_allowed(req):
logger.info(
"Skipping wheel build for %s, due to binaries "
"being disabled for it.", req.name,
)
return False
if not req.use_pep517 and not is_wheel_installed():
# we don't build legacy requirements if wheel is not installed
logger.info(
"Using legacy 'setup.py install' for %s, "
"since package 'wheel' is not installed.", req.name,
)
return False
return True
def should_build_for_wheel_command(
req, # type: InstallRequirement
):
# type: (...) -> bool
return _should_build(
req, need_wheel=True, check_binary_allowed=_always_true
)
def should_build_for_install_command(
req, # type: InstallRequirement
check_binary_allowed, # type: BinaryAllowedPredicate
):
# type: (...) -> bool
return _should_build(
req, need_wheel=False, check_binary_allowed=check_binary_allowed
)
def _should_cache(
req, # type: InstallRequirement
):
# type: (...) -> Optional[bool]
"""
Return whether a built InstallRequirement can be stored in the persistent
wheel cache, assuming the wheel cache is available, and _should_build()
has determined a wheel needs to be built.
"""
if req.editable or not req.source_dir:
# never cache editable requirements
return False
if req.link and req.link.is_vcs:
# VCS checkout. Do not cache
# unless it points to an immutable commit hash.
assert not req.editable
assert req.source_dir
vcs_backend = vcs.get_backend_for_scheme(req.link.scheme)
assert vcs_backend
if vcs_backend.is_immutable_rev_checkout(req.link.url, req.source_dir):
return True
return False
assert req.link
base, ext = req.link.splitext()
if _contains_egg_info(base):
return True
# Otherwise, do not cache.
return False
def _get_cache_dir(
req, # type: InstallRequirement
wheel_cache, # type: WheelCache
):
# type: (...) -> str
"""Return the persistent or temporary cache directory where the built
wheel need to be stored.
"""
cache_available = bool(wheel_cache.cache_dir)
assert req.link
if cache_available and _should_cache(req):
cache_dir = wheel_cache.get_path_for_link(req.link)
else:
cache_dir = wheel_cache.get_ephem_path_for_link(req.link)
return cache_dir
def _always_true(_):
# type: (Any) -> bool
return True
def _build_one(
req, # type: InstallRequirement
output_dir, # type: str
build_options, # type: List[str]
global_options, # type: List[str]
):
# type: (...) -> Optional[str]
"""Build one wheel.
:return: The filename of the built wheel, or None if the build failed.
"""
try:
ensure_dir(output_dir)
except OSError as e:
logger.warning(
"Building wheel for %s failed: %s",
req.name, e,
)
return None
# Install build deps into temporary directory (PEP 518)
with req.build_env:
return _build_one_inside_env(
req, output_dir, build_options, global_options
)
def _build_one_inside_env(
req, # type: InstallRequirement
output_dir, # type: str
build_options, # type: List[str]
global_options, # type: List[str]
):
# type: (...) -> Optional[str]
with TempDirectory(kind="wheel") as temp_dir:
assert req.name
if req.use_pep517:
assert req.metadata_directory
wheel_path = build_wheel_pep517(
name=req.name,
backend=req.pep517_backend,
metadata_directory=req.metadata_directory,
build_options=build_options,
tempd=temp_dir.path,
)
else:
wheel_path = build_wheel_legacy(
name=req.name,
setup_py_path=req.setup_py_path,
source_dir=req.unpacked_source_directory,
global_options=global_options,
build_options=build_options,
tempd=temp_dir.path,
)
if wheel_path is not None:
wheel_name = os.path.basename(wheel_path)
dest_path = os.path.join(output_dir, wheel_name)
try:
wheel_hash, length = hash_file(wheel_path)
shutil.move(wheel_path, dest_path)
logger.info('Created wheel for %s: '
'filename=%s size=%d sha256=%s',
req.name, wheel_name, length,
wheel_hash.hexdigest())
logger.info('Stored in directory: %s', output_dir)
return dest_path
except Exception as e:
logger.warning(
"Building wheel for %s failed: %s",
req.name, e,
)
# Ignore return, we can't do anything else useful.
if not req.use_pep517:
_clean_one_legacy(req, global_options)
return None
def _clean_one_legacy(req, global_options):
# type: (InstallRequirement, List[str]) -> bool
clean_args = make_setuptools_clean_args(
req.setup_py_path,
global_options=global_options,
)
logger.info('Running setup.py clean for %s', req.name)
try:
call_subprocess(clean_args, cwd=req.source_dir)
return True
except Exception:
logger.error('Failed cleaning build dir for %s', req.name)
return False
def build(
requirements, # type: Iterable[InstallRequirement]
wheel_cache, # type: WheelCache
build_options, # type: List[str]
global_options, # type: List[str]
):
# type: (...) -> BuildResult
"""Build wheels.
:return: The list of InstallRequirement that succeeded to build and
the list of InstallRequirement that failed to build.
"""
if not requirements:
return [], []
# Build the wheels.
logger.info(
'Building wheels for collected packages: %s',
', '.join(req.name for req in requirements), # type: ignore
)
with indent_log():
build_successes, build_failures = [], []
for req in requirements:
cache_dir = _get_cache_dir(req, wheel_cache)
wheel_file = _build_one(
req, cache_dir, build_options, global_options
)
if wheel_file:
# Update the link for this.
req.link = Link(path_to_url(wheel_file))
req.local_file_path = req.link.file_path
assert req.link.is_wheel
build_successes.append(req)
else:
build_failures.append(req)
# notify success/failure
if build_successes:
logger.info(
'Successfully built %s',
' '.join([req.name for req in build_successes]), # type: ignore
)
if build_failures:
logger.info(
'Failed to build %s',
' '.join([req.name for req in build_failures]), # type: ignore
)
# Return a list of requirements that failed to build
return build_successes, build_failures
|
artefactual/archivematica-history
|
refs/heads/master
|
src/archivematicaCommon/lib/externals/pyes/tests/test_aliases.py
|
2
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from .estestcase import ESTestCase
import unittest
from .. import exceptions
class ErrorReportingTestCase(ESTestCase):
def setUp(self):
super(ErrorReportingTestCase, self).setUp()
#self.conn.set_alias('test-alias', ['_river'])
#self.conn.delete_alias('test-alias', ['_river'])
self.conn.delete_index_if_exists('test-index2')
def tearDown(self):
#self.conn.set_alias('test-alias', ['_river'])
#self.conn.delete_alias('test-alias', ['_river'])
self.conn.delete_index_if_exists('test-index2')
def testCreateDeleteAliases(self):
"""Test errors thrown when creating or deleting aliases.
"""
self.assertTrue('ok' in self.conn.create_index(self.index_name))
# Check initial output of get_indices.
result = self.conn.get_indices(include_aliases=True)
self.assertTrue('test-index' in result)
self.assertEqual(result['test-index'], {'num_docs': 0})
self.assertTrue('test-alias' not in result)
# Check getting a missing alias.
err = self.checkRaises(exceptions.IndexMissingException,
self.conn.get_alias, 'test-alias')
self.assertEqual(str(err), '[test-alias] missing')
# Check deleting a missing alias (doesn't return a error).
self.conn.delete_alias("test-alias", self.index_name)
# Add an alias from test-alias to test-index
self.conn.change_aliases([['add', 'test-index', 'test-alias']])
self.assertEqual(self.conn.get_alias("test-alias"), ['test-index'])
# Adding an alias to a missing index fails
err = self.checkRaises(exceptions.IndexMissingException,
self.conn.change_aliases,
[['add', 'test-missing-index', 'test-alias']])
self.assertEqual(str(err), '[test-missing-index] missing')
self.assertEqual(self.conn.get_alias("test-alias"), ['test-index'])
# # An alias can't be deleted using delete_index.
# err = self.checkRaises(exceptions.NotFoundException,
# self.conn.delete_index, 'test-alias')
# self.assertEqual(str(err), '[test-alias] missing')
# Check return value from get_indices now.
result = self.conn.get_indices(include_aliases=True)
self.assertTrue('test-index' in result)
self.assertEqual(result['test-index'], {'num_docs': 0})
self.assertTrue('test-alias' in result)
self.assertEqual(result['test-alias'], {'alias_for': ['test-index'], 'num_docs': 0})
result = self.conn.get_indices(include_aliases=False)
self.assertTrue('test-index' in result)
self.assertEqual(result['test-index'], {'num_docs': 0})
self.assertTrue('test-alias' not in result)
# Add an alias to test-index2.
self.assertTrue('ok' in self.conn.create_index("test-index2"))
self.conn.change_aliases([['add', 'test-index2', 'test-alias']])
self.assertEqual(sorted(self.conn.get_alias("test-alias")),
['test-index', 'test-index2'])
# Check deleting multiple indices from an alias.
self.conn.delete_alias("test-alias", [self.index_name, "test-index2"])
self.checkRaises(exceptions.IndexMissingException, self.conn.get_alias, 'test-alias')
# Check deleting multiple indices from a missing alias (still no error)
self.conn.delete_alias("test-alias", [self.index_name, "test-index2"])
# Check that we still get an error for a missing alias.
err = self.checkRaises(exceptions.IndexMissingException,
self.conn.get_alias, 'test-alias')
self.assertEqual(str(err), '[test-alias] missing')
def testWriteToAlias(self):
self.assertTrue('ok' in self.conn.create_index(self.index_name))
self.assertTrue('ok' in self.conn.create_index("test-index2"))
self.assertTrue('ok' in self.conn.set_alias("test-alias", ['test-index']))
self.assertTrue('ok' in self.conn.set_alias("test-alias2", ['test-index', 'test-index2']))
# Can write to aliases only if they point to exactly one index.
self.conn.index(dict(title='doc1'), 'test-index', 'testtype')
self.conn.index(dict(title='doc1'), 'test-index2', 'testtype')
self.conn.index(dict(title='doc1'), 'test-alias', 'testtype')
self.checkRaises(exceptions.ElasticSearchIllegalArgumentException,
self.conn.index, dict(title='doc1'),
'test-alias2', 'testtype')
self.conn.refresh() # ensure that the documents have been indexed.
# Check the document counts for each index or alias.
result = self.conn.get_indices(include_aliases=True)
self.assertEqual(result['test-index'], {'num_docs': 2})
self.assertEqual(result['test-index2'], {'num_docs': 1})
self.assertEqual(result['test-alias'], {'alias_for': ['test-index'], 'num_docs': 2})
self.assertEqual(result['test-alias2'], {'alias_for': ['test-index', 'test-index2'], 'num_docs': 3})
if __name__ == "__main__":
unittest.main()
|
v-iam/azure-sdk-for-python
|
refs/heads/master
|
azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/virtual_network.py
|
2
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualNetwork(Resource):
"""Virtual Network resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict
:param address_space: The AddressSpace that contains an array of IP
address ranges that can be used by subnets.
:type address_space: :class:`AddressSpace
<azure.mgmt.network.v2017_06_01.models.AddressSpace>`
:param dhcp_options: The dhcpOptions that contains an array of DNS servers
available to VMs deployed in the virtual network.
:type dhcp_options: :class:`DhcpOptions
<azure.mgmt.network.v2017_06_01.models.DhcpOptions>`
:param subnets: A list of subnets in a Virtual Network.
:type subnets: list of :class:`Subnet
<azure.mgmt.network.v2017_06_01.models.Subnet>`
:param virtual_network_peerings: A list of peerings in a Virtual Network.
:type virtual_network_peerings: list of :class:`VirtualNetworkPeering
<azure.mgmt.network.v2017_06_01.models.VirtualNetworkPeering>`
:param resource_guid: The resourceGuid property of the Virtual Network
resource.
:type resource_guid: str
:param provisioning_state: The provisioning state of the PublicIP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param etag: Gets a unique read-only string that changes whenever the
resource is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'address_space': {'key': 'properties.addressSpace', 'type': 'AddressSpace'},
'dhcp_options': {'key': 'properties.dhcpOptions', 'type': 'DhcpOptions'},
'subnets': {'key': 'properties.subnets', 'type': '[Subnet]'},
'virtual_network_peerings': {'key': 'properties.virtualNetworkPeerings', 'type': '[VirtualNetworkPeering]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, location=None, tags=None, address_space=None, dhcp_options=None, subnets=None, virtual_network_peerings=None, resource_guid=None, provisioning_state=None, etag=None):
super(VirtualNetwork, self).__init__(id=id, location=location, tags=tags)
self.address_space = address_space
self.dhcp_options = dhcp_options
self.subnets = subnets
self.virtual_network_peerings = virtual_network_peerings
self.resource_guid = resource_guid
self.provisioning_state = provisioning_state
self.etag = etag
|
40223137/2015cd_midterm
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/jqueryui/__init__.py
|
603
|
"""Wrapper around the jQuery UI library
Exposes a single object, jq, to manipulate the widgets designed in the library
This object supports :
- subscription : js[elt_id] returns an object matching the element with the
specified id
- a method get(**kw). The only keyword currently supported is "selector". The
method returns a list of instances of the class Element, each instance wraps
the elements matching the CSS selector passed
jq(selector="button") : returns instances of Element for all button tags
The value can be a list or tuple of CSS selector strings :
js(selector=("input[type=submit]","a")) : instances of Element for all
"input" tags with attribute "type" set to "submit" + "a" tags (anchors)
Instances of Element have the same interface as the selections made by the
jQuery function $, with the additional methods provided by jQuery UI. For
instance, to turn an element into a dialog :
jq[elt_id].dialog()
When jQuery UI methods expect a Javascript object, they can be passed as
key/value pairs :
jq['tags'].autocomplete(source=availableTags)
"""
from browser import html, document, window
import javascript
_path = __file__[:__file__.rfind('/')]+'/'
document <= html.LINK(rel="stylesheet",
href=_path+'css/smoothness/jquery-ui.css')
# The scripts must be loaded in blocking mode, by using the function
# load(script_url[, names]) in module javascript
# If we just add them to the document with script tags, eg :
#
# document <= html.SCRIPT(sciprt_url)
# _jqui = window.jQuery.noConflict(True)
#
# the name "jQuery" is not in the Javascript namespace until the script is
# fully loaded in the page, so "window.jQuery" raises an exception
# Load jQuery and put name 'jQuery' in the global Javascript namespace
javascript.load(_path+'jquery-1.11.2.js', ['jQuery'])
javascript.load(_path+'jquery-ui.js')
_jqui = window.jQuery.noConflict(True)
_events = ['abort',
'beforeinput',
'blur',
'click',
'compositionstart',
'compositionupdate',
'compositionend',
'dblclick',
'error',
'focus',
'focusin',
'focusout',
'input',
'keydown',
'keyup',
'load',
'mousedown',
'mouseenter',
'mouseleave',
'mousemove',
'mouseout',
'mouseover',
'mouseup',
'resize',
'scroll',
'select',
'unload']
class JQFunction:
def __init__(self, func):
self.func = func
def __call__(self, *args, **kw):
if kw:
# keyword arguments are passed as a single Javascript object
return self.func(*args, kw)
else:
return self.func(*args)
class Element:
"""Wrapper around the objects returned by jQuery selections"""
def __init__(self, item):
self.item = item
def bind(self, event, callback):
"""Binds an event on the element to function callback"""
getattr(self.item, event)(callback)
def __getattr__(self, attr):
res = getattr(self.item, attr)
if attr in _events:
# elt.click(f) is handled like elt.bind('click', f)
return lambda f:self.bind(attr, f)
if callable(res):
res = JQFunction(res)
return res
class jq:
@staticmethod
def get(**selectors):
items = []
for k,v in selectors.items():
if k=='selector':
if isinstance(v,[list, tuple]):
values = v
else:
values = [v]
for value in values:
items.append(Element(_jqui(value)))
elif k=='element':
items = Element(_jqui(v))
return items
@staticmethod
def __getitem__(element_id):
return jq.get(selector='#'+element_id)[0]
|
Benniphx/server-tools
|
refs/heads/8.0
|
database_cleanup/model/purge_menus.py
|
32
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2015 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
from openerp.tools.translate import _
class CleanupPurgeLineMenu(orm.TransientModel):
_inherit = 'cleanup.purge.line'
_name = 'cleanup.purge.line.menu'
_columns = {
'wizard_id': fields.many2one(
'cleanup.purge.wizard.menu', 'Purge Wizard', readonly=True),
'menu_id': fields.many2one('ir.ui.menu', 'Menu entry'),
}
def purge(self, cr, uid, ids, context=None):
self.pool['ir.ui.menu'].unlink(
cr, uid,
[this.menu_id.id for this in self.browse(cr, uid, ids,
context=context)],
context=context)
return self.write(cr, uid, ids, {'purged': True}, context=context)
class CleanupPurgeWizardMenu(orm.TransientModel):
_inherit = 'cleanup.purge.wizard'
_name = 'cleanup.purge.wizard.menu'
def default_get(self, cr, uid, fields, context=None):
res = super(CleanupPurgeWizardMenu, self).default_get(
cr, uid, fields, context=context)
if 'name' in fields:
res['name'] = _('Purge menus')
return res
def find(self, cr, uid, context=None):
"""
Search for models that cannot be instantiated.
"""
res = []
for menu in self.pool['ir.ui.menu'].browse(
cr, uid, self.pool['ir.ui.menu'].search(
cr, uid, [], context=dict(
context or {}, active_test=False))):
if not menu.action or menu.action.type != 'ir.actions.act_window':
continue
if not self.pool.get(menu.action.res_model):
res.append((0, 0, {
'name': menu.complete_name,
'menu_id': menu.id,
}))
if not res:
raise orm.except_orm(
_('Nothing to do'),
_('No dangling menu entries found'))
return res
_columns = {
'purge_line_ids': fields.one2many(
'cleanup.purge.line.menu',
'wizard_id', 'Menus to purge'),
}
|
Belgabor/django
|
refs/heads/master
|
tests/regressiontests/datatypes/models.py
|
11
|
"""
This is a basic model to test saving and loading boolean and date-related
types, which in the past were problematic for some database backends.
"""
from django.db import models, DEFAULT_DB_ALIAS
from django.conf import settings
class Donut(models.Model):
name = models.CharField(max_length=100)
is_frosted = models.BooleanField(default=False)
has_sprinkles = models.NullBooleanField()
baked_date = models.DateField(null=True)
baked_time = models.TimeField(null=True)
consumed_at = models.DateTimeField(null=True)
review = models.TextField()
class Meta:
ordering = ('consumed_at',)
def __str__(self):
return self.name
__test__ = {'API_TESTS': """
# No donuts are in the system yet.
>>> Donut.objects.all()
[]
>>> d = Donut(name='Apple Fritter')
# Ensure we're getting True and False, not 0 and 1
>>> d.is_frosted
False
>>> d.has_sprinkles
>>> d.has_sprinkles = True
>>> d.has_sprinkles == True
True
>>> d.save()
>>> d2 = Donut.objects.all()[0]
>>> d2
<Donut: Apple Fritter>
>>> d2.is_frosted == False
True
>>> d2.has_sprinkles == True
True
>>> import datetime
>>> d2.baked_date = datetime.date(year=1938, month=6, day=4)
>>> d2.baked_time = datetime.time(hour=5, minute=30)
>>> d2.consumed_at = datetime.datetime(year=2007, month=4, day=20, hour=16, minute=19, second=59)
>>> d2.save()
>>> d3 = Donut.objects.all()[0]
>>> d3.baked_date
datetime.date(1938, 6, 4)
>>> d3.baked_time
datetime.time(5, 30)
>>> d3.consumed_at
datetime.datetime(2007, 4, 20, 16, 19, 59)
# Test for ticket #12059: TimeField wrongly handling datetime.datetime object.
>>> d2.baked_time = datetime.datetime(year=2007, month=4, day=20, hour=16, minute=19, second=59)
>>> d2.save()
>>> d3 = Donut.objects.all()[0]
>>> d3.baked_time
datetime.time(16, 19, 59)
# Year boundary tests (ticket #3689)
>>> d = Donut(name='Date Test 2007', baked_date=datetime.datetime(year=2007, month=12, day=31), consumed_at=datetime.datetime(year=2007, month=12, day=31, hour=23, minute=59, second=59))
>>> d.save()
>>> d1 = Donut(name='Date Test 2006', baked_date=datetime.datetime(year=2006, month=1, day=1), consumed_at=datetime.datetime(year=2006, month=1, day=1))
>>> d1.save()
>>> Donut.objects.filter(baked_date__year=2007)
[<Donut: Date Test 2007>]
>>> Donut.objects.filter(baked_date__year=2006)
[<Donut: Date Test 2006>]
>>> Donut.objects.filter(consumed_at__year=2007).order_by('name')
[<Donut: Apple Fritter>, <Donut: Date Test 2007>]
>>> Donut.objects.filter(consumed_at__year=2006)
[<Donut: Date Test 2006>]
>>> Donut.objects.filter(consumed_at__year=2005)
[]
>>> Donut.objects.filter(consumed_at__year=2008)
[]
# Regression test for #10238: TextField values returned from the database
# should be unicode.
>>> d2 = Donut.objects.create(name=u'Jelly Donut', review=u'Outstanding')
>>> Donut.objects.get(id=d2.id).review
u'Outstanding'
"""}
# Regression test for #8354: the MySQL backend should raise an error if given
# a timezone-aware datetime object.
if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'] == 'django.db.backends.mysql':
__test__['API_TESTS'] += """
>>> from django.utils import tzinfo
>>> dt = datetime.datetime(2008, 8, 31, 16, 20, tzinfo=tzinfo.FixedOffset(0))
>>> d = Donut(name='Bear claw', consumed_at=dt)
>>> d.save()
Traceback (most recent call last):
....
ValueError: MySQL backend does not support timezone-aware datetimes.
"""
|
erichuang1994/tornado
|
refs/heads/master
|
docs/conf.py
|
40
|
# Ensure we get the local copy of tornado instead of what's on the standard path
import os
import sys
import time
sys.path.insert(0, os.path.abspath(".."))
import tornado
master_doc = "index"
project = "Tornado"
copyright = "2009-%s, The Tornado Authors" % time.strftime("%Y")
version = release = tornado.version
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
]
primary_domain = 'py'
default_role = 'py:obj'
autodoc_member_order = "bysource"
autoclass_content = "both"
# Without this line sphinx includes a copy of object.__init__'s docstring
# on any class that doesn't define __init__.
# https://bitbucket.org/birkenfeld/sphinx/issue/1337/autoclass_content-both-uses-object__init__
autodoc_docstring_signature = False
coverage_skip_undoc_in_source = True
coverage_ignore_modules = [
"tornado.platform.asyncio",
"tornado.platform.caresresolver",
"tornado.platform.twisted",
]
# I wish this could go in a per-module file...
coverage_ignore_classes = [
# tornado.concurrent
"TracebackFuture",
# tornado.gen
"Runner",
# tornado.ioloop
"PollIOLoop",
# tornado.web
"ChunkedTransferEncoding",
"GZipContentEncoding",
"OutputTransform",
"TemplateModule",
"url",
# tornado.websocket
"WebSocketProtocol",
"WebSocketProtocol13",
"WebSocketProtocol76",
]
coverage_ignore_functions = [
# various modules
"doctests",
"main",
# tornado.escape
# parse_qs_bytes should probably be documented but it's complicated by
# having different implementations between py2 and py3.
"parse_qs_bytes",
]
html_favicon = 'favicon.ico'
latex_documents = [
('index', 'tornado.tex', 'Tornado Documentation', 'The Tornado Authors', 'manual', False),
]
# HACK: sphinx has limited support for substitutions with the |version|
# variable, but there doesn't appear to be any way to use this in a link
# target.
# http://stackoverflow.com/questions/1227037/substitutions-inside-links-in-rest-sphinx
# The extlink extension can be used to do link substitutions, but it requires a
# portion of the url to be literally contained in the document. Therefore,
# this link must be referenced as :current_tarball:`z`
extlinks = {
'current_tarball': (
'https://pypi.python.org/packages/source/t/tornado/tornado-%s.tar.g%%s' % version,
'tornado-%s.tar.g' % version),
}
intersphinx_mapping = {
'python': ('https://docs.python.org/3.4/', None),
}
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# On RTD we can't import sphinx_rtd_theme, but it will be applied by
# default anyway. This block will use the same theme when building locally
# as on RTD.
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
oscar9/statistics_viewer
|
refs/heads/master
|
processmanager/processdirectory/stat5.py
|
1
|
# encoding: utf-8
import gvsig
import addons.statistics_viewer.statisticprocess
reload(addons.statistics_viewer.statisticprocess)
from addons.statistics_viewer.statisticprocess.abstractprocess import AbstractStatisticProcess
import os
from org.apache.commons.math3.stat.correlation import PearsonsCorrelation
class StatProcess(AbstractStatisticProcess):
name = u"Pearson's correlation"
description = u"Person's correlation between all the field in a vector layer"
idprocess = "correlation-fields-all-1"
allowZoomProcess = False
def processParameters(self): #o: dynclass
params = self.createInputParameters("PearsonsCorrelation", "ProcessPropertiesCorrelation", "aqui va la descripcion")
params.addDynFieldString("Capa").setMandatory(True)
def process(self, params):
params_layer = params.get("Capa")
layer = gvsig.currentView().getLayer(params_layer)
flayer = layer.features()
cox = []
coy = []
sch = layer.getSchema()
listfields = []
columnNames = ["***"]
# get potential numeric fields
for field in sch:
dt = field.getDataTypeName()
if dt=="Integer" or dt=="Long" or dt=="Double":
listfields.append(field.getName())
columnNames.append(field.getName())
# Show first line table
#print "\t\t",
#listfields = ["COORX", "COORY"]
#columnNames = ["***", "COORX", "COORY"]
#for f1 in listfields:
# print f1+"\t",
#print ""
# Iterate table
data = []
for f1 in listfields:
f1v = [f.get(f1) for f in flayer]
d = [f1]
for f2 in listfields:
f2v = [f.get(f2) for f in flayer]
c = PearsonsCorrelation().correlation(f1v,f2v)
d.append(c)
data.append(d)
#print "Data:", data
#print "columenNames: ", columnNames
#print "listfields: ", listfields
from javax.swing import JTable
table = JTable(data, columnNames)
from javax.swing import JScrollPane
table = JScrollPane(table)
self.setOutputPanel(table)
return None #self.createdchart
def main(*args):
print "* stat5.py: Pearsons Correlation"
proc = StatProcess()
dynobject = proc.createParameters()
dynobject.setDynValue("Capa", "parcelas_Valencia")
values = dynobject.getValues()
proc.process(values)
print proc.getOutputConsole()
print proc.getOutputPanel()
|
crdoconnor/olympia
|
refs/heads/master
|
apps/amo/monitors.py
|
15
|
import os
import socket
import StringIO
import traceback
from django.conf import settings
import commonware.log
from PIL import Image
import amo.search
from amo.helpers import user_media_path
from applications.management.commands import dump_apps
monitor_log = commonware.log.getLogger('z.monitor')
def memcache():
memcache = getattr(settings, 'CACHES', {}).get('default')
memcache_results = []
status = ''
if memcache and 'memcache' in memcache['BACKEND']:
hosts = memcache['LOCATION']
using_twemproxy = False
if not isinstance(hosts, (tuple, list)):
hosts = [hosts]
for host in hosts:
ip, port = host.split(':')
if ip == '127.0.0.1':
using_twemproxy = True
try:
s = socket.socket()
s.connect((ip, int(port)))
except Exception, e:
result = False
status = 'Failed to connect to memcached (%s): %s' % (host, e)
monitor_log.critical(status)
else:
result = True
finally:
s.close()
memcache_results.append((ip, port, result))
if not using_twemproxy and len(memcache_results) < 2:
status = ('2+ memcache servers are required.'
'%s available') % len(memcache_results)
monitor_log.warning(status)
if not memcache_results:
status = 'Memcache is not configured'
monitor_log.info(status)
return status, memcache_results
def libraries():
# Check Libraries and versions
libraries_results = []
status = ''
try:
Image.new('RGB', (16, 16)).save(StringIO.StringIO(), 'JPEG')
libraries_results.append(('PIL+JPEG', True, 'Got it!'))
except Exception, e:
msg = "Failed to create a jpeg image: %s" % e
libraries_results.append(('PIL+JPEG', False, msg))
try:
import M2Crypto # NOQA
libraries_results.append(('M2Crypto', True, 'Got it!'))
except ImportError:
libraries_results.append(('M2Crypto', False, 'Failed to import'))
if settings.SPIDERMONKEY:
if os.access(settings.SPIDERMONKEY, os.R_OK):
libraries_results.append(('Spidermonkey is ready!', True, None))
# TODO: see if it works?
else:
msg = "You said spidermonkey was at (%s)" % settings.SPIDERMONKEY
libraries_results.append(('Spidermonkey', False, msg))
else:
msg = "Please set SPIDERMONKEY in your settings file."
libraries_results.append(('Spidermonkey', False, msg))
missing_libs = [l for l, s, m in libraries_results if not s]
if missing_libs:
status = 'missing libs: %s' % ",".join(missing_libs)
return status, libraries_results
def elastic():
elastic_results = None
status = ''
try:
es = amo.search.get_es()
health = es.cluster.health()
if health['status'] == 'red':
status = 'ES is red'
elastic_results = health
except Exception:
elastic_results = {'exception': traceback.format_exc()}
return status, elastic_results
def path():
# Check file paths / permissions
rw = (settings.TMP_PATH,
settings.MEDIA_ROOT,
user_media_path('addons'),
user_media_path('guarded_addons'),
user_media_path('addon_icons'),
user_media_path('collection_icons'),
user_media_path('previews'),
user_media_path('userpics'),
user_media_path('reviewer_attachments'),
dump_apps.Command.JSON_PATH,)
r = [os.path.join(settings.ROOT, 'locale'),
# The deploy process will want write access to this.
# We do not want Django to have write access though.
settings.PROD_DETAILS_DIR]
filepaths = [(path, os.R_OK | os.W_OK, "We want read + write")
for path in rw]
filepaths += [(path, os.R_OK, "We want read") for path in r]
filepath_results = []
filepath_status = True
for path, perms, notes in filepaths:
path_exists = os.path.exists(path)
path_perms = os.access(path, perms)
filepath_status = filepath_status and path_exists and path_perms
filepath_results.append((path, path_exists, path_perms, notes))
status = filepath_status
status = ''
if not filepath_status:
status = 'check main status page for broken perms'
return status, filepath_results
def redis():
# Check Redis
redis_results = [None, 'REDIS_BACKENDS is not set']
status = 'REDIS_BACKENDS is not set'
if getattr(settings, 'REDIS_BACKENDS', False):
import redisutils
status = []
redis_results = {}
for alias, redis in redisutils.connections.iteritems():
try:
redis_results[alias] = redis.info()
except Exception, e:
redis_results[alias] = None
status.append('Failed to chat with redis:%s' % alias)
monitor_log.critical('Failed to chat with redis: (%s)' % e)
status = ','.join(status)
return status, redis_results
|
nzavagli/UnrealPy
|
refs/heads/master
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/django-1.8.2/tests/model_validation/tests.py
|
38
|
from django.core import management
from django.core.checks import Error, run_checks
from django.db.models.signals import post_init
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import six
class OnPostInit(object):
def __call__(self, **kwargs):
pass
def on_post_init(**kwargs):
pass
@override_settings(
INSTALLED_APPS=['django.contrib.auth', 'django.contrib.contenttypes'],
SILENCED_SYSTEM_CHECKS=['fields.W342'], # ForeignKey(unique=True)
)
class ModelValidationTest(TestCase):
def test_models_validate(self):
# All our models should validate properly
# Validation Tests:
# * choices= Iterable of Iterables
# See: https://code.djangoproject.com/ticket/20430
# * related_name='+' doesn't clash with another '+'
# See: https://code.djangoproject.com/ticket/21375
management.call_command("check", stdout=six.StringIO())
def test_model_signal(self):
unresolved_references = post_init.unresolved_references.copy()
post_init.connect(on_post_init, sender='missing-app.Model')
post_init.connect(OnPostInit(), sender='missing-app.Model')
errors = run_checks()
expected = [
Error(
"The 'on_post_init' function was connected to the 'post_init' "
"signal with a lazy reference to the 'missing-app.Model' "
"sender, which has not been installed.",
hint=None,
obj='model_validation.tests',
id='signals.E001',
),
Error(
"An instance of the 'OnPostInit' class was connected to "
"the 'post_init' signal with a lazy reference to the "
"'missing-app.Model' sender, which has not been installed.",
hint=None,
obj='model_validation.tests',
id='signals.E001',
)
]
self.assertEqual(errors, expected)
post_init.unresolved_references = unresolved_references
|
Edu-Glez/Bank_sentiment_analysis
|
refs/heads/master
|
env/lib/python3.6/site-packages/pygments/styles/xcode.py
|
31
|
# -*- coding: utf-8 -*-
"""
pygments.styles.xcode
~~~~~~~~~~~~~~~~~~~~~
Style similar to the `Xcode` default theme.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Literal
class XcodeStyle(Style):
"""
Style similar to the Xcode default colouring theme.
"""
default_style = ''
styles = {
Comment: '#177500',
Comment.Preproc: '#633820',
String: '#C41A16',
String.Char: '#2300CE',
Operator: '#000000',
Keyword: '#A90D91',
Name: '#000000',
Name.Attribute: '#836C28',
Name.Class: '#3F6E75',
Name.Function: '#000000',
Name.Builtin: '#A90D91',
# In Obj-C code this token is used to colour Cocoa types
Name.Builtin.Pseudo: '#5B269A',
Name.Variable: '#000000',
Name.Tag: '#000000',
Name.Decorator: '#000000',
# Workaround for a BUG here: lexer treats multiline method signatres as labels
Name.Label: '#000000',
Literal: '#1C01CE',
Number: '#1C01CE',
Error: '#000000',
}
|
bcornwellmott/frappe
|
refs/heads/develop
|
frappe/utils/help.py
|
2
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals, print_function
import frappe
import hashlib
from frappe.model.db_schema import DbManager
from frappe.installer import get_root_connection
from frappe.database import Database
import os
from markdown2 import markdown
from bs4 import BeautifulSoup
import jinja2.exceptions
def sync():
# make table
print('Syncing help database...')
help_db = HelpDatabase()
help_db.make_database()
help_db.connect()
help_db.make_table()
help_db.sync_pages()
help_db.build_index()
@frappe.whitelist()
def get_help(text):
return HelpDatabase().search(text)
@frappe.whitelist()
def get_help_content(path):
return HelpDatabase().get_content(path)
class HelpDatabase(object):
def __init__(self):
self.global_help_setup = frappe.conf.get('global_help_setup')
if self.global_help_setup:
bench_name = os.path.basename(os.path.abspath(frappe.get_app_path('frappe')).split('/apps/')[0])
self.help_db_name = hashlib.sha224(bench_name).hexdigest()[:15]
def make_database(self):
'''make database for global help setup'''
if not self.global_help_setup:
return
dbman = DbManager(get_root_connection())
dbman.drop_database(self.help_db_name)
# make database
if not self.help_db_name in dbman.get_database_list():
try:
dbman.create_user(self.help_db_name, self.help_db_name)
except Exception as e:
# user already exists
if e.args[0] != 1396: raise
dbman.create_database(self.help_db_name)
dbman.grant_all_privileges(self.help_db_name, self.help_db_name)
dbman.flush_privileges()
def connect(self):
if self.global_help_setup:
self.db = Database(user=self.help_db_name, password=self.help_db_name)
else:
self.db = frappe.db
def make_table(self):
if not 'help' in self.db.get_tables():
self.db.sql('''create table help(
path varchar(255),
content text,
title text,
intro text,
full_path text,
fulltext(title),
fulltext(content),
index (path))
COLLATE=utf8mb4_unicode_ci
ENGINE=MyISAM
CHARACTER SET=utf8mb4''')
def search(self, words):
self.connect()
return self.db.sql('''
select title, intro, path from help where title like %s union
select title, intro, path from help where match(content) against (%s) limit 10''', ('%'+words+'%', words))
def get_content(self, path):
self.connect()
query = '''select title, content from help
where path like "{path}%" order by path desc limit 1'''
result = None
if not path.endswith('index'):
result = self.db.sql(query.format(path=os.path.join(path, 'index')))
if not result:
result = self.db.sql(query.format(path=path))
return {'title':result[0][0], 'content':result[0][1]} if result else {}
def sync_pages(self):
self.db.sql('truncate help')
doc_contents = '<ol>'
apps = os.listdir('../apps') if self.global_help_setup else frappe.get_installed_apps()
for app in apps:
docs_folder = '../apps/{app}/{app}/docs/user'.format(app=app)
self.out_base_path = '../apps/{app}/{app}/docs'.format(app=app)
if os.path.exists(docs_folder):
app_name = getattr(frappe.get_module(app), '__title__', None) or app.title()
doc_contents += '<li><a data-path="/{app}/index">{app_name}</a></li>'.format(
app=app, app_name=app_name)
for basepath, folders, files in os.walk(docs_folder):
files = self.reorder_files(files)
for fname in files:
if fname.rsplit('.', 1)[-1] in ('md', 'html'):
fpath = os.path.join(basepath, fname)
with open(fpath, 'r') as f:
try:
content = frappe.render_template(unicode(f.read(), 'utf-8'),
{'docs_base_url': '/assets/{app}_docs'.format(app=app)})
relpath = self.get_out_path(fpath)
relpath = relpath.replace("user", app)
content = markdown(content)
title = self.make_title(basepath, fname, content)
intro = self.make_intro(content)
content = self.make_content(content, fpath, relpath)
self.db.sql('''insert into help(path, content, title, intro, full_path)
values (%s, %s, %s, %s, %s)''', (relpath, content, title, intro, fpath))
except jinja2.exceptions.TemplateSyntaxError:
print("Invalid Jinja Template for {0}. Skipping".format(fpath))
doc_contents += "</ol>"
self.db.sql('''insert into help(path, content, title, intro, full_path) values (%s, %s, %s, %s, %s)''',
('/documentation/index', doc_contents, 'Documentation', '', ''))
def make_title(self, basepath, filename, html):
if '<h1>' in html:
title = html.split("<h1>", 1)[1].split("</h1>", 1)[0]
elif 'index' in filename:
title = basepath.rsplit('/', 1)[-1].title().replace("-", " ")
else:
title = filename.rsplit('.', 1)[0].title().replace("-", " ")
return title
def make_intro(self, html):
intro = ""
if '<p>' in html:
intro = html.split('<p>', 1)[1].split('</p>', 1)[0]
if 'Duration' in html:
intro = "Help Video: " + intro
return intro
def make_content(self, html, path, relpath):
if '<h1>' in html:
html = html.split('</h1>', 1)[1]
if '{next}' in html:
html = html.replace('{next}', '')
target = path.split('/', 3)[-1]
app_name = path.split('/', 3)[2]
html += '''
<div class="page-container">
<div class="page-content">
<div class="edit-container text-center">
<i class="fa fa-smile text-muted"></i>
<a class="edit text-muted" href="https://github.com/frappe/{app_name}/blob/develop/{target}">
Improve this page
</a>
</div>
</div>
</div>'''.format(app_name=app_name, target=target)
soup = BeautifulSoup(html, 'html.parser')
for link in soup.find_all('a'):
if link.has_attr('href'):
url = link['href']
if '/user' in url:
data_path = url[url.index('/user'):]
if '.' in data_path:
data_path = data_path[: data_path.rindex('.')]
if data_path:
link['data-path'] = data_path.replace("user", app_name)
parent = self.get_parent(relpath)
if parent:
parent_tag = soup.new_tag('a')
parent_tag.string = parent['title']
parent_tag['class'] = 'parent-link'
parent_tag['data-path'] = parent['path']
soup.find().insert_before(parent_tag)
return soup.prettify()
def build_index(self):
for data in self.db.sql('select path, full_path, content from help'):
self.make_index(data[0], data[1], data[2])
def make_index(self, original_path, full_path, content):
'''Make index from index.txt'''
if '{index}' in content:
path = os.path.dirname(full_path)
files = []
# get files from index.txt
index_path = os.path.join(path, "index.txt")
if os.path.exists(index_path):
with open(index_path, 'r') as f:
files = f.read().splitlines()
# files not in index.txt
for f in os.listdir(path):
if not os.path.isdir(os.path.join(path, f)):
name, extn = f.rsplit('.', 1)
if name not in files \
and name != 'index' and extn in ('md', 'html'):
files.append(name)
links_html = "<ol class='index-links'>"
for line in files:
fpath = os.path.join(os.path.dirname(original_path), line)
title = self.db.sql('select title from help where path like %s',
os.path.join(fpath, 'index') + '%')
if not title:
title = self.db.sql('select title from help where path like %s',
fpath + '%')
if title:
title = title[0][0]
links_html += "<li><a data-path='{fpath}'> {title} </a></li>".format(
fpath=fpath, title=title)
# else:
# bad entries in .txt files
# print fpath
links_html += "</ol>"
html = content.replace('{index}', links_html)
self.db.sql('update help set content=%s where path=%s', (html, original_path))
def get_out_path(self, path):
return '/' + os.path.relpath(path, self.out_base_path)
def get_parent(self, child_path):
if 'index' in child_path:
child_path = child_path[: child_path.rindex('index')]
if child_path[-1] == '/':
child_path = child_path[:-1]
child_path = child_path[: child_path.rindex('/')]
out = None
if child_path:
parent_path = child_path + "/index"
out = self.get_content(parent_path)
#if parent is documentation root
else:
parent_path = "/documentation/index"
out = {}
out['title'] = "Documentation"
if not out:
return None
out['path'] = parent_path
return out
def reorder_files(self, files):
pos = 0
if 'index.md' in files:
pos = files.index('index.md')
elif 'index.html' in files:
pos = files.index('index.html')
if pos:
files[0], files[pos] = files[pos], files[0]
return files
|
cbeighley/peregrine
|
refs/heads/master
|
tests/test_iqgen_tcxo.py
|
2
|
# Copyright (C) 2016 Swift Navigation Inc.
#
# Contact: Valeri Atamaniouk <valeri@swiftnav.com>
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
'''
Unit tests for IQgen TCXO controls
'''
from peregrine.iqgen.bits.tcxo_base import TCXOBase
from peregrine.iqgen.bits.tcxo_poly import TCXOPoly
from peregrine.iqgen.bits.tcxo_sine import TCXOSine
from peregrine.iqgen.if_iface import NormalRateConfig
import numpy
from scipy.constants import pi
EPSILON = 1e-10
def test_TCXOBase_abstract():
'''
Unit test for abstract methods in TCXOBase
'''
tcxo = TCXOBase()
try:
tcxo.computeTcxoTime(10, 20, NormalRateConfig)
assert False
except NotImplementedError:
pass
def test_TCXOPoly_init0():
'''
Unit test for empty TCXOPoly object
'''
tcxo = TCXOPoly(())
assert isinstance(tcxo, TCXOBase)
assert len(tcxo.coeffs) == 0
def test_TCXOPoly_init1():
'''
Unit test for TCXOPoly object
'''
tcxo = TCXOPoly((0., 0.))
assert isinstance(tcxo, TCXOBase)
assert len(tcxo.coeffs) == 2
assert tcxo.coeffs[0] == 0.
assert tcxo.coeffs[1] == 0.
def test_TCXOPoly_init2():
'''
Unit test for TCXOPoly object
'''
tcxo = TCXOPoly((1., 0.))
assert isinstance(tcxo, TCXOBase)
assert len(tcxo.coeffs) == 2
assert tcxo.coeffs[0] == 1.
assert tcxo.coeffs[1] == 0.
def test_TCXOPoly_compute0():
'''
Unit test for empty TCXOPoly object
'''
tcxo = TCXOPoly(())
time = tcxo.computeTcxoTime(0, 10, NormalRateConfig)
assert time is None
def test_TCXOPoly_compute1():
'''
Unit test for TCXOPoly with linear time shift (10e-6)
'''
tcxo = TCXOPoly((1.,))
time = tcxo.computeTcxoTime(0, 10, NormalRateConfig)
test_vector = numpy.linspace(0.,
10. * 1e-6 / NormalRateConfig.SAMPLE_RATE_HZ,
10.,
endpoint=False)
assert (time == test_vector).all()
def test_TCXOPoly_compute2():
'''
Unit test for TCXOPoly with linear time shift (10e-6)
'''
tcxo = TCXOPoly((1., 1.))
time = tcxo.computeTcxoTime(0, 10, NormalRateConfig)
test_vector = numpy.linspace(0.,
10. * 1e-6 / NormalRateConfig.SAMPLE_RATE_HZ,
10.,
endpoint=False)
test_vector = test_vector * test_vector / 2. + test_vector
assert (numpy.abs(time - test_vector) < EPSILON).all()
def test_TCXOSine_init():
'''
Unit test for TCXOSine object
'''
tcxo = TCXOSine(2e6, 1e6, 0.004)
assert isinstance(tcxo, TCXOBase)
assert tcxo.initial_ppm == 2e6
assert tcxo.amplitude_ppm == 1e6
assert tcxo.period_s == 0.004
def test_TCXOSine_compute0():
'''
Unit test for TCXOSine object: 0.+sin(2*pi*t/0.004)
The integral output is: (1. - cos(2*pi*t/0.004))*0.004/(2*pi);
Minimum value: 0
Maximum value: 0.002/pi
'''
tcxo = TCXOSine(0., 1e6, 0.004)
time = tcxo.computeTcxoTime(
0, NormalRateConfig.SAMPLE_RATE_HZ * 0.004 + 1, NormalRateConfig)
assert time[0] == 0.
assert time[-1] == 0.
_max = numpy.max(time)
_min = numpy.min(time)
assert numpy.abs(_min) < EPSILON
assert numpy.abs(_max - 0.004 / pi) < EPSILON
assert time[NormalRateConfig.SAMPLE_RATE_HZ * 0.002] == _max
def test_TCXOSine_compute1():
'''
Unit test for TCXOSine object: 1.+sin(2*pi*t/0.004)
The integral output is: 1.*t + (1. - cos(2*pi*t/0.004))*0.004/(2*pi);
After removing the time component:
Minimum value: 0
Maximum value: 0.002/pi
'''
tcxo = TCXOSine(1e6, 1e6, 0.004)
time = tcxo.computeTcxoTime(
0, NormalRateConfig.SAMPLE_RATE_HZ * 0.004 + 1, NormalRateConfig)
# Remove linear time component
timeX_s = (NormalRateConfig.SAMPLE_RATE_HZ * 0.004 + 1) / \
NormalRateConfig.SAMPLE_RATE_HZ
time -= numpy.linspace(0, timeX_s,
NormalRateConfig.SAMPLE_RATE_HZ * 0.004 + 1,
endpoint=False)
assert time[0] == 0.
assert time[-1] == 0.
_max = numpy.max(time)
_min = numpy.min(time)
assert numpy.abs(_min) < EPSILON
assert numpy.abs(_max - 0.004 / pi) < EPSILON
assert time[NormalRateConfig.SAMPLE_RATE_HZ * 0.002] == _max
def test_TCXOPoly_str0():
'''
String representation test for polynomial amplitude object
'''
value = str(TCXOPoly(()))
assert value.find('()') >= 0
assert value.find('Poly') >= 0
value = str(TCXOPoly((1.,)))
assert value.find('(1.0,)') >= 0
assert value.find('Poly') >= 0
def test_TXOSine_str0():
'''
String representation test for sine amplitude object
'''
value = str(TCXOSine(4., 3., 5.))
assert value.find('4.') >= 0
assert value.find('3.') >= 0
assert value.find('5.') >= 0
assert value.find('Sine') >= 0
|
D-L/SimpleBookMarks
|
refs/heads/master
|
src/tornado/autoreload.py
|
45
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""xAutomatically restart the server when a source file is modified.
Most applications should not access this module directly. Instead, pass the
keyword argument ``debug=True`` to the `tornado.web.Application` constructor.
This will enable autoreload mode as well as checking for changes to templates
and static resources. Note that restarting is a destructive operation
and any requests in progress will be aborted when the process restarts.
This module can also be used as a command-line wrapper around scripts
such as unit test runners. See the `main` method for details.
The command-line wrapper and Application debug modes can be used together.
This combination is encouraged as the wrapper catches syntax errors and
other import-time failures, while debug mode catches changes once
the server has started.
This module depends on `.IOLoop`, so it will not work in WSGI applications
and Google App Engine. It also will not work correctly when `.HTTPServer`'s
multi-process mode is used.
Reloading loses any Python interpreter command-line arguments (e.g. ``-u``)
because it re-executes Python using ``sys.executable`` and ``sys.argv``.
Additionally, modifying these variables will cause reloading to behave
incorrectly.
"""
from __future__ import absolute_import, division, print_function, with_statement
import os
import sys
# sys.path handling
# -----------------
#
# If a module is run with "python -m", the current directory (i.e. "")
# is automatically prepended to sys.path, but not if it is run as
# "path/to/file.py". The processing for "-m" rewrites the former to
# the latter, so subsequent executions won't have the same path as the
# original.
#
# Conversely, when run as path/to/file.py, the directory containing
# file.py gets added to the path, which can cause confusion as imports
# may become relative in spite of the future import.
#
# We address the former problem by setting the $PYTHONPATH environment
# variable before re-execution so the new process will see the correct
# path. We attempt to address the latter problem when tornado.autoreload
# is run as __main__, although we can't fix the general case because
# we cannot reliably reconstruct the original command line
# (http://bugs.python.org/issue14208).
if __name__ == "__main__":
# This sys.path manipulation must come before our imports (as much
# as possible - if we introduced a tornado.sys or tornado.os
# module we'd be in trouble), or else our imports would become
# relative again despite the future import.
#
# There is a separate __main__ block at the end of the file to call main().
if sys.path[0] == os.path.dirname(__file__):
del sys.path[0]
import functools
import logging
import os
import pkgutil
import sys
import traceback
import types
import subprocess
import weakref
from tornado import ioloop
from tornado.log import gen_log
from tornado import process
from tornado.util import exec_in
try:
import signal
except ImportError:
signal = None
_watched_files = set()
_reload_hooks = []
_reload_attempted = False
_io_loops = weakref.WeakKeyDictionary()
def start(io_loop=None, check_time=500):
"""Begins watching source files for changes using the given `.IOLoop`. """
io_loop = io_loop or ioloop.IOLoop.current()
if io_loop in _io_loops:
return
_io_loops[io_loop] = True
if len(_io_loops) > 1:
gen_log.warning("tornado.autoreload started more than once in the same process")
add_reload_hook(functools.partial(io_loop.close, all_fds=True))
modify_times = {}
callback = functools.partial(_reload_on_update, modify_times)
scheduler = ioloop.PeriodicCallback(callback, check_time, io_loop=io_loop)
scheduler.start()
def wait():
"""Wait for a watched file to change, then restart the process.
Intended to be used at the end of scripts like unit test runners,
to run the tests again after any source file changes (but see also
the command-line interface in `main`)
"""
io_loop = ioloop.IOLoop()
start(io_loop)
io_loop.start()
def watch(filename):
"""Add a file to the watch list.
All imported modules are watched by default.
"""
_watched_files.add(filename)
def add_reload_hook(fn):
"""Add a function to be called before reloading the process.
Note that for open file and socket handles it is generally
preferable to set the ``FD_CLOEXEC`` flag (using `fcntl` or
``tornado.platform.auto.set_close_exec``) instead
of using a reload hook to close them.
"""
_reload_hooks.append(fn)
def _reload_on_update(modify_times):
if _reload_attempted:
# We already tried to reload and it didn't work, so don't try again.
return
if process.task_id() is not None:
# We're in a child process created by fork_processes. If child
# processes restarted themselves, they'd all restart and then
# all call fork_processes again.
return
for module in sys.modules.values():
# Some modules play games with sys.modules (e.g. email/__init__.py
# in the standard library), and occasionally this can cause strange
# failures in getattr. Just ignore anything that's not an ordinary
# module.
if not isinstance(module, types.ModuleType):
continue
path = getattr(module, "__file__", None)
if not path:
continue
if path.endswith(".pyc") or path.endswith(".pyo"):
path = path[:-1]
_check_file(modify_times, path)
for path in _watched_files:
_check_file(modify_times, path)
def _check_file(modify_times, path):
try:
modified = os.stat(path).st_mtime
except Exception:
return
if path not in modify_times:
modify_times[path] = modified
return
if modify_times[path] != modified:
gen_log.info("%s modified; restarting server", path)
_reload()
def _reload():
global _reload_attempted
_reload_attempted = True
for fn in _reload_hooks:
fn()
if hasattr(signal, "setitimer"):
# Clear the alarm signal set by
# ioloop.set_blocking_log_threshold so it doesn't fire
# after the exec.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
# sys.path fixes: see comments at top of file. If sys.path[0] is an empty
# string, we were (probably) invoked with -m and the effective path
# is about to change on re-exec. Add the current directory to $PYTHONPATH
# to ensure that the new process sees the same path we did.
path_prefix = '.' + os.pathsep
if (sys.path[0] == '' and
not os.environ.get("PYTHONPATH", "").startswith(path_prefix)):
os.environ["PYTHONPATH"] = (path_prefix +
os.environ.get("PYTHONPATH", ""))
if sys.platform == 'win32':
# os.execv is broken on Windows and can't properly parse command line
# arguments and executable name if they contain whitespaces. subprocess
# fixes that behavior.
subprocess.Popen([sys.executable] + sys.argv)
sys.exit(0)
else:
try:
os.execv(sys.executable, [sys.executable] + sys.argv)
except OSError:
# Mac OS X versions prior to 10.6 do not support execv in
# a process that contains multiple threads. Instead of
# re-executing in the current process, start a new one
# and cause the current process to exit. This isn't
# ideal since the new process is detached from the parent
# terminal and thus cannot easily be killed with ctrl-C,
# but it's better than not being able to autoreload at
# all.
# Unfortunately the errno returned in this case does not
# appear to be consistent, so we can't easily check for
# this error specifically.
os.spawnv(os.P_NOWAIT, sys.executable,
[sys.executable] + sys.argv)
sys.exit(0)
_USAGE = """\
Usage:
python -m tornado.autoreload -m module.to.run [args...]
python -m tornado.autoreload path/to/script.py [args...]
"""
def main():
"""Command-line wrapper to re-run a script whenever its source changes.
Scripts may be specified by filename or module name::
python -m tornado.autoreload -m tornado.test.runtests
python -m tornado.autoreload tornado/test/runtests.py
Running a script with this wrapper is similar to calling
`tornado.autoreload.wait` at the end of the script, but this wrapper
can catch import-time problems like syntax errors that would otherwise
prevent the script from reaching its call to `wait`.
"""
original_argv = sys.argv
sys.argv = sys.argv[:]
if len(sys.argv) >= 3 and sys.argv[1] == "-m":
mode = "module"
module = sys.argv[2]
del sys.argv[1:3]
elif len(sys.argv) >= 2:
mode = "script"
script = sys.argv[1]
sys.argv = sys.argv[1:]
else:
print(_USAGE, file=sys.stderr)
sys.exit(1)
try:
if mode == "module":
import runpy
runpy.run_module(module, run_name="__main__", alter_sys=True)
elif mode == "script":
with open(script) as f:
global __file__
__file__ = script
# Use globals as our "locals" dictionary so that
# something that tries to import __main__ (e.g. the unittest
# module) will see the right things.
exec_in(f.read(), globals(), globals())
except SystemExit as e:
logging.basicConfig()
gen_log.info("Script exited with status %s", e.code)
except Exception as e:
logging.basicConfig()
gen_log.warning("Script exited with uncaught exception", exc_info=True)
# If an exception occurred at import time, the file with the error
# never made it into sys.modules and so we won't know to watch it.
# Just to make sure we've covered everything, walk the stack trace
# from the exception and watch every file.
for (filename, lineno, name, line) in traceback.extract_tb(sys.exc_info()[2]):
watch(filename)
if isinstance(e, SyntaxError):
# SyntaxErrors are special: their innermost stack frame is fake
# so extract_tb won't see it and we have to get the filename
# from the exception object.
watch(e.filename)
else:
logging.basicConfig()
gen_log.info("Script exited normally")
# restore sys.argv so subsequent executions will include autoreload
sys.argv = original_argv
if mode == 'module':
# runpy did a fake import of the module as __main__, but now it's
# no longer in sys.modules. Figure out where it is and watch it.
loader = pkgutil.get_loader(module)
if loader is not None:
watch(loader.get_filename())
wait()
if __name__ == "__main__":
# See also the other __main__ block at the top of the file, which modifies
# sys.path before our imports
main()
|
wilhelmryan/Arduino
|
refs/heads/master
|
arduino-core/src/processing/app/i18n/python/requests/packages/charade/hebrewprober.py
|
2928
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Shy Shalom
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe, eDetecting
from .compat import wrap_ord
# This prober doesn't actually recognize a language or a charset.
# It is a helper prober for the use of the Hebrew model probers
### General ideas of the Hebrew charset recognition ###
#
# Four main charsets exist in Hebrew:
# "ISO-8859-8" - Visual Hebrew
# "windows-1255" - Logical Hebrew
# "ISO-8859-8-I" - Logical Hebrew
# "x-mac-hebrew" - ?? Logical Hebrew ??
#
# Both "ISO" charsets use a completely identical set of code points, whereas
# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
# these code points. windows-1255 defines additional characters in the range
# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
# x-mac-hebrew defines similar additional code points but with a different
# mapping.
#
# As far as an average Hebrew text with no diacritics is concerned, all four
# charsets are identical with respect to code points. Meaning that for the
# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
# (including final letters).
#
# The dominant difference between these charsets is their directionality.
# "Visual" directionality means that the text is ordered as if the renderer is
# not aware of a BIDI rendering algorithm. The renderer sees the text and
# draws it from left to right. The text itself when ordered naturally is read
# backwards. A buffer of Visual Hebrew generally looks like so:
# "[last word of first line spelled backwards] [whole line ordered backwards
# and spelled backwards] [first word of first line spelled backwards]
# [end of line] [last word of second line] ... etc' "
# adding punctuation marks, numbers and English text to visual text is
# naturally also "visual" and from left to right.
#
# "Logical" directionality means the text is ordered "naturally" according to
# the order it is read. It is the responsibility of the renderer to display
# the text from right to left. A BIDI algorithm is used to place general
# punctuation marks, numbers and English text in the text.
#
# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
# what little evidence I could find, it seems that its general directionality
# is Logical.
#
# To sum up all of the above, the Hebrew probing mechanism knows about two
# charsets:
# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
# backwards while line order is natural. For charset recognition purposes
# the line order is unimportant (In fact, for this implementation, even
# word order is unimportant).
# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
#
# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
# specifically identified.
# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
# that contain special punctuation marks or diacritics is displayed with
# some unconverted characters showing as question marks. This problem might
# be corrected using another model prober for x-mac-hebrew. Due to the fact
# that x-mac-hebrew texts are so rare, writing another model prober isn't
# worth the effort and performance hit.
#
#### The Prober ####
#
# The prober is divided between two SBCharSetProbers and a HebrewProber,
# all of which are managed, created, fed data, inquired and deleted by the
# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
# fact some kind of Hebrew, Logical or Visual. The final decision about which
# one is it is made by the HebrewProber by combining final-letter scores
# with the scores of the two SBCharSetProbers to produce a final answer.
#
# The SBCSGroupProber is responsible for stripping the original text of HTML
# tags, English characters, numbers, low-ASCII punctuation characters, spaces
# and new lines. It reduces any sequence of such characters to a single space.
# The buffer fed to each prober in the SBCS group prober is pure text in
# high-ASCII.
# The two SBCharSetProbers (model probers) share the same language model:
# Win1255Model.
# The first SBCharSetProber uses the model normally as any other
# SBCharSetProber does, to recognize windows-1255, upon which this model was
# built. The second SBCharSetProber is told to make the pair-of-letter
# lookup in the language model backwards. This in practice exactly simulates
# a visual Hebrew model using the windows-1255 logical Hebrew model.
#
# The HebrewProber is not using any language model. All it does is look for
# final-letter evidence suggesting the text is either logical Hebrew or visual
# Hebrew. Disjointed from the model probers, the results of the HebrewProber
# alone are meaningless. HebrewProber always returns 0.00 as confidence
# since it never identifies a charset by itself. Instead, the pointer to the
# HebrewProber is passed to the model probers as a helper "Name Prober".
# When the Group prober receives a positive identification from any prober,
# it asks for the name of the charset identified. If the prober queried is a
# Hebrew model prober, the model prober forwards the call to the
# HebrewProber to make the final decision. In the HebrewProber, the
# decision is made according to the final-letters scores maintained and Both
# model probers scores. The answer is returned in the form of the name of the
# charset identified, either "windows-1255" or "ISO-8859-8".
# windows-1255 / ISO-8859-8 code points of interest
FINAL_KAF = 0xea
NORMAL_KAF = 0xeb
FINAL_MEM = 0xed
NORMAL_MEM = 0xee
FINAL_NUN = 0xef
NORMAL_NUN = 0xf0
FINAL_PE = 0xf3
NORMAL_PE = 0xf4
FINAL_TSADI = 0xf5
NORMAL_TSADI = 0xf6
# Minimum Visual vs Logical final letter score difference.
# If the difference is below this, don't rely solely on the final letter score
# distance.
MIN_FINAL_CHAR_DISTANCE = 5
# Minimum Visual vs Logical model score difference.
# If the difference is below this, don't rely at all on the model score
# distance.
MIN_MODEL_DISTANCE = 0.01
VISUAL_HEBREW_NAME = "ISO-8859-8"
LOGICAL_HEBREW_NAME = "windows-1255"
class HebrewProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mLogicalProber = None
self._mVisualProber = None
self.reset()
def reset(self):
self._mFinalCharLogicalScore = 0
self._mFinalCharVisualScore = 0
# The two last characters seen in the previous buffer,
# mPrev and mBeforePrev are initialized to space in order to simulate
# a word delimiter at the beginning of the data
self._mPrev = ' '
self._mBeforePrev = ' '
# These probers are owned by the group prober.
def set_model_probers(self, logicalProber, visualProber):
self._mLogicalProber = logicalProber
self._mVisualProber = visualProber
def is_final(self, c):
return wrap_ord(c) in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE,
FINAL_TSADI]
def is_non_final(self, c):
# The normal Tsadi is not a good Non-Final letter due to words like
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This
# apostrophe is converted to a space in FilterWithoutEnglishLetters
# causing the Non-Final tsadi to appear at an end of a word even
# though this is not the case in the original text.
# The letters Pe and Kaf rarely display a related behavior of not being
# a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
# for example legally end with a Non-Final Pe or Kaf. However, the
# benefit of these letters as Non-Final letters outweighs the damage
# since these words are quite rare.
return wrap_ord(c) in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE]
def feed(self, aBuf):
# Final letter analysis for logical-visual decision.
# Look for evidence that the received buffer is either logical Hebrew
# or visual Hebrew.
# The following cases are checked:
# 1) A word longer than 1 letter, ending with a final letter. This is
# an indication that the text is laid out "naturally" since the
# final letter really appears at the end. +1 for logical score.
# 2) A word longer than 1 letter, ending with a Non-Final letter. In
# normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
# should not end with the Non-Final form of that letter. Exceptions
# to this rule are mentioned above in isNonFinal(). This is an
# indication that the text is laid out backwards. +1 for visual
# score
# 3) A word longer than 1 letter, starting with a final letter. Final
# letters should not appear at the beginning of a word. This is an
# indication that the text is laid out backwards. +1 for visual
# score.
#
# The visual score and logical score are accumulated throughout the
# text and are finally checked against each other in GetCharSetName().
# No checking for final letters in the middle of words is done since
# that case is not an indication for either Logical or Visual text.
#
# We automatically filter out all 7-bit characters (replace them with
# spaces) so the word boundary detection works properly. [MAP]
if self.get_state() == eNotMe:
# Both model probers say it's not them. No reason to continue.
return eNotMe
aBuf = self.filter_high_bit_only(aBuf)
for cur in aBuf:
if cur == ' ':
# We stand on a space - a word just ended
if self._mBeforePrev != ' ':
# next-to-last char was not a space so self._mPrev is not a
# 1 letter word
if self.is_final(self._mPrev):
# case (1) [-2:not space][-1:final letter][cur:space]
self._mFinalCharLogicalScore += 1
elif self.is_non_final(self._mPrev):
# case (2) [-2:not space][-1:Non-Final letter][
# cur:space]
self._mFinalCharVisualScore += 1
else:
# Not standing on a space
if ((self._mBeforePrev == ' ') and
(self.is_final(self._mPrev)) and (cur != ' ')):
# case (3) [-2:space][-1:final letter][cur:not space]
self._mFinalCharVisualScore += 1
self._mBeforePrev = self._mPrev
self._mPrev = cur
# Forever detecting, till the end or until both model probers return
# eNotMe (handled above)
return eDetecting
def get_charset_name(self):
# Make the decision: is it Logical or Visual?
# If the final letter score distance is dominant enough, rely on it.
finalsub = self._mFinalCharLogicalScore - self._mFinalCharVisualScore
if finalsub >= MIN_FINAL_CHAR_DISTANCE:
return LOGICAL_HEBREW_NAME
if finalsub <= -MIN_FINAL_CHAR_DISTANCE:
return VISUAL_HEBREW_NAME
# It's not dominant enough, try to rely on the model scores instead.
modelsub = (self._mLogicalProber.get_confidence()
- self._mVisualProber.get_confidence())
if modelsub > MIN_MODEL_DISTANCE:
return LOGICAL_HEBREW_NAME
if modelsub < -MIN_MODEL_DISTANCE:
return VISUAL_HEBREW_NAME
# Still no good, back to final letter distance, maybe it'll save the
# day.
if finalsub < 0.0:
return VISUAL_HEBREW_NAME
# (finalsub > 0 - Logical) or (don't know what to do) default to
# Logical.
return LOGICAL_HEBREW_NAME
def get_state(self):
# Remain active as long as any of the model probers are active.
if (self._mLogicalProber.get_state() == eNotMe) and \
(self._mVisualProber.get_state() == eNotMe):
return eNotMe
return eDetecting
|
mj10777/QGIS
|
refs/heads/master
|
tests/src/python/test_qgsdatetimestatisticalsummary.py
|
45
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsDateTimeStatisticalSummary.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '07/05/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
import qgis # NOQA
from qgis.core import (QgsDateTimeStatisticalSummary,
QgsInterval,
NULL
)
from qgis.PyQt.QtCore import QDateTime, QDate, QTime
from qgis.testing import unittest
class PyQgsDateTimeStatisticalSummary(unittest.TestCase):
def testStats(self):
# we test twice, once with values added as a list and once using values
# added one-at-a-time
dates = [QDateTime(QDate(2015, 3, 4), QTime(11, 10, 54)),
QDateTime(QDate(2011, 1, 5), QTime(15, 3, 1)),
QDateTime(QDate(2015, 3, 4), QTime(11, 10, 54)),
QDateTime(QDate(2015, 3, 4), QTime(11, 10, 54)),
QDateTime(QDate(2019, 12, 28), QTime(23, 10, 1)),
QDateTime(),
QDateTime(QDate(1998, 1, 2), QTime(1, 10, 54)),
QDateTime(),
QDateTime(QDate(2011, 1, 5), QTime(11, 10, 54))]
s = QgsDateTimeStatisticalSummary()
self.assertEqual(s.statistics(), QgsDateTimeStatisticalSummary.All)
s.calculate(dates)
s2 = QgsDateTimeStatisticalSummary()
for d in dates:
s2.addValue(d)
s2.finalize()
self.assertEqual(s.count(), 9)
self.assertEqual(s2.count(), 9)
self.assertEqual(s.countDistinct(), 6)
self.assertEqual(s2.countDistinct(), 6)
self.assertEqual(set(s.distinctValues()),
set([QDateTime(QDate(2015, 3, 4), QTime(11, 10, 54)),
QDateTime(QDate(2011, 1, 5), QTime(15, 3, 1)),
QDateTime(QDate(2019, 12, 28), QTime(23, 10, 1)),
QDateTime(),
QDateTime(QDate(1998, 1, 2), QTime(1, 10, 54)),
QDateTime(QDate(2011, 1, 5), QTime(11, 10, 54))]))
self.assertEqual(s2.distinctValues(), s.distinctValues())
self.assertEqual(s.countMissing(), 2)
self.assertEqual(s2.countMissing(), 2)
self.assertEqual(s.min(), QDateTime(QDate(1998, 1, 2), QTime(1, 10, 54)))
self.assertEqual(s2.min(), QDateTime(QDate(1998, 1, 2), QTime(1, 10, 54)))
self.assertEqual(s.max(), QDateTime(QDate(2019, 12, 28), QTime(23, 10, 1)))
self.assertEqual(s2.max(), QDateTime(QDate(2019, 12, 28), QTime(23, 10, 1)))
self.assertEqual(s.range(), QgsInterval(693871147))
self.assertEqual(s2.range(), QgsInterval(693871147))
def testIndividualStats(self):
# tests calculation of statistics one at a time, to make sure statistic calculations are not
# dependent on each other
tests = [{'stat': QgsDateTimeStatisticalSummary.Count, 'expected': 9},
{'stat': QgsDateTimeStatisticalSummary.CountDistinct, 'expected': 6},
{'stat': QgsDateTimeStatisticalSummary.CountMissing, 'expected': 2},
{'stat': QgsDateTimeStatisticalSummary.Min, 'expected': QDateTime(QDate(1998, 1, 2), QTime(1, 10, 54))},
{'stat': QgsDateTimeStatisticalSummary.Max, 'expected': QDateTime(QDate(2019, 12, 28), QTime(23, 10, 1))},
{'stat': QgsDateTimeStatisticalSummary.Range, 'expected': QgsInterval(693871147)},
]
# we test twice, once with values added as a list and once using values
# added one-at-a-time
s = QgsDateTimeStatisticalSummary()
s3 = QgsDateTimeStatisticalSummary()
for t in tests:
# test constructor
s2 = QgsDateTimeStatisticalSummary(t['stat'])
self.assertEqual(s2.statistics(), t['stat'])
s.setStatistics(t['stat'])
self.assertEqual(s.statistics(), t['stat'])
s3.setStatistics(t['stat'])
dates = [QDateTime(QDate(2015, 3, 4), QTime(11, 10, 54)),
QDateTime(QDate(2011, 1, 5), QTime(15, 3, 1)),
QDateTime(QDate(2015, 3, 4), QTime(11, 10, 54)),
QDateTime(QDate(2015, 3, 4), QTime(11, 10, 54)),
QDateTime(QDate(2019, 12, 28), QTime(23, 10, 1)),
QDateTime(),
QDateTime(QDate(1998, 1, 2), QTime(1, 10, 54)),
QDateTime(),
QDateTime(QDate(2011, 1, 5), QTime(11, 10, 54))]
s.calculate(dates)
s3.reset()
for d in dates:
s3.addValue(d)
s3.finalize()
self.assertEqual(s.statistic(t['stat']), t['expected'])
self.assertEqual(s3.statistic(t['stat']), t['expected'])
# display name
self.assertTrue(len(QgsDateTimeStatisticalSummary.displayName(t['stat'])) > 0)
def testVariantStats(self):
""" test with non-datetime values """
s = QgsDateTimeStatisticalSummary()
self.assertEqual(s.statistics(), QgsDateTimeStatisticalSummary.All)
s.calculate([QDateTime(QDate(2015, 3, 4), QTime(11, 10, 54)),
'asdasd',
QDateTime(QDate(2015, 3, 4), QTime(11, 10, 54)),
34,
QDateTime(QDate(2019, 12, 28), QTime(23, 10, 1)),
QDateTime(),
QDateTime(QDate(1998, 1, 2), QTime(1, 10, 54)),
QDateTime(),
QDateTime(QDate(2011, 1, 5), QTime(11, 10, 54))])
self.assertEqual(s.count(), 9)
self.assertEqual(set(s.distinctValues()), set([QDateTime(QDate(2015, 3, 4), QTime(11, 10, 54)),
QDateTime(QDate(2019, 12, 28), QTime(23, 10, 1)),
QDateTime(QDate(1998, 1, 2), QTime(1, 10, 54)),
QDateTime(QDate(2011, 1, 5), QTime(11, 10, 54)),
QDateTime()]))
self.assertEqual(s.countMissing(), 4)
self.assertEqual(s.min(), QDateTime(QDate(1998, 1, 2), QTime(1, 10, 54)))
self.assertEqual(s.max(), QDateTime(QDate(2019, 12, 28), QTime(23, 10, 1)))
self.assertEqual(s.range(), QgsInterval(693871147))
def testDates(self):
""" test with date values """
s = QgsDateTimeStatisticalSummary()
self.assertEqual(s.statistics(), QgsDateTimeStatisticalSummary.All)
s.calculate([QDate(2015, 3, 4),
QDate(2015, 3, 4),
QDate(2019, 12, 28),
QDate(),
QDate(1998, 1, 2),
QDate(),
QDate(2011, 1, 5)])
self.assertEqual(s.count(), 7)
self.assertEqual(set(s.distinctValues()), set([
QDateTime(QDate(2015, 3, 4), QTime()),
QDateTime(QDate(2019, 12, 28), QTime()),
QDateTime(QDate(1998, 1, 2), QTime()),
QDateTime(),
QDateTime(QDate(2011, 1, 5), QTime())]))
self.assertEqual(s.countMissing(), 2)
self.assertEqual(s.min(), QDateTime(QDate(1998, 1, 2), QTime()))
self.assertEqual(s.max(), QDateTime(QDate(2019, 12, 28), QTime()))
self.assertEqual(s.range(), QgsInterval(693792000))
def testTimes(self):
""" test with time values """
s = QgsDateTimeStatisticalSummary()
self.assertEqual(s.statistics(), QgsDateTimeStatisticalSummary.All)
s.calculate([QTime(11, 3, 4),
QTime(15, 3, 4),
QTime(19, 12, 28),
QTime(),
QTime(8, 1, 2),
QTime(),
QTime(19, 12, 28)])
self.assertEqual(s.count(), 7)
self.assertEqual(s.countDistinct(), 5)
self.assertEqual(s.countMissing(), 2)
self.assertEqual(s.min().time(), QTime(8, 1, 2))
self.assertEqual(s.max().time(), QTime(19, 12, 28))
self.assertEqual(s.statistic(QgsDateTimeStatisticalSummary.Min), QTime(8, 1, 2))
self.assertEqual(s.statistic(QgsDateTimeStatisticalSummary.Max), QTime(19, 12, 28))
self.assertEqual(s.range(), QgsInterval(40286))
def testMissing(self):
s = QgsDateTimeStatisticalSummary()
s.calculate([NULL,
'not a date'])
self.assertEqual(s.countMissing(), 2)
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.