code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from thumbor.filters import BaseFilter, filter_method
from thumbor.ext.filters import _round_corner
class Filter(BaseFilter):
@filter_method(r'[\d]+(?:\|[\d]+)?', BaseFilter.PositiveNumber, BaseFilter.PositiveNumber, BaseFilter.PositiveNumber, BaseFilter.Boolean)
def round_corner(self, radius, r, g, b, transparent=False):
width, height = self.engine.size
radius_parts = radius.split('|')
a_radius = int(radius_parts[0])
b_radius = int(radius_parts[1]) if len(radius_parts) > 1 else a_radius
if transparent:
self.engine.enable_alpha()
mode, data = self.engine.image_data_as_rgb()
imgdata = _round_corner.apply(
1, mode, a_radius, b_radius, r, g, b,
width, height, data, transparent
)
self.engine.set_image_data(imgdata)
|
okor/thumbor
|
thumbor/filters/round_corner.py
|
Python
|
mit
| 1,094
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_cobral_bandit_bith_female_01.iff"
result.attribute_template_id = 9
result.stfName("npc_name","bith_base_female")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
obi-two/Rebelion
|
data/scripts/templates/object/mobile/shared_dressed_cobral_bandit_bith_female_01.py
|
Python
|
mit
| 462
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_only_resource import ProxyOnlyResource
class TriggeredWebJob(ProxyOnlyResource):
"""Triggered Web Job Information.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param latest_run: Latest job run information.
:type latest_run: ~azure.mgmt.web.models.TriggeredJobRun
:param history_url: History URL.
:type history_url: str
:param scheduler_logs_url: Scheduler Logs URL.
:type scheduler_logs_url: str
:ivar triggered_web_job_name: Job name. Used as job identifier in ARM
resource URI.
:vartype triggered_web_job_name: str
:param run_command: Run command.
:type run_command: str
:param url: Job URL.
:type url: str
:param extra_info_url: Extra Info URL.
:type extra_info_url: str
:param job_type: Job type. Possible values include: 'Continuous',
'Triggered'
:type job_type: str or ~azure.mgmt.web.models.WebJobType
:param error: Error information.
:type error: str
:param using_sdk: Using SDK?
:type using_sdk: bool
:param settings: Job settings.
:type settings: dict[str, object]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'triggered_web_job_name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'latest_run': {'key': 'properties.latestRun', 'type': 'TriggeredJobRun'},
'history_url': {'key': 'properties.historyUrl', 'type': 'str'},
'scheduler_logs_url': {'key': 'properties.schedulerLogsUrl', 'type': 'str'},
'triggered_web_job_name': {'key': 'properties.name', 'type': 'str'},
'run_command': {'key': 'properties.runCommand', 'type': 'str'},
'url': {'key': 'properties.url', 'type': 'str'},
'extra_info_url': {'key': 'properties.extraInfoUrl', 'type': 'str'},
'job_type': {'key': 'properties.jobType', 'type': 'WebJobType'},
'error': {'key': 'properties.error', 'type': 'str'},
'using_sdk': {'key': 'properties.usingSdk', 'type': 'bool'},
'settings': {'key': 'properties.settings', 'type': '{object}'},
}
def __init__(self, kind=None, latest_run=None, history_url=None, scheduler_logs_url=None, run_command=None, url=None, extra_info_url=None, job_type=None, error=None, using_sdk=None, settings=None):
super(TriggeredWebJob, self).__init__(kind=kind)
self.latest_run = latest_run
self.history_url = history_url
self.scheduler_logs_url = scheduler_logs_url
self.triggered_web_job_name = None
self.run_command = run_command
self.url = url
self.extra_info_url = extra_info_url
self.job_type = job_type
self.error = error
self.using_sdk = using_sdk
self.settings = settings
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-web/azure/mgmt/web/models/triggered_web_job.py
|
Python
|
mit
| 3,707
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2016-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .tektronixDPO4000 import *
class tektronixDPO4054B(tektronixDPO4000):
"Tektronix DPO4054B IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'DPO4054B')
super(tektronixDPO4054B, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 0
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 500e6
self._init_channels()
|
alexforencich/python-ivi
|
ivi/tektronix/tektronixDPO4054B.py
|
Python
|
mit
| 1,650
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
import numpy as np
import tensorflow as tf
from tensorboard.plugins.beholder import colormaps
# pylint: disable=not-context-manager
def global_extrema(arrays):
return min([x.min() for x in arrays]), max([x.max() for x in arrays])
def scale_sections(sections, scaling_scope):
'''
input: unscaled sections.
returns: sections scaled to [0, 255]
'''
new_sections = []
if scaling_scope == 'layer':
for section in sections:
new_sections.append(scale_image_for_display(section))
elif scaling_scope == 'network':
global_min, global_max = global_extrema(sections)
for section in sections:
new_sections.append(scale_image_for_display(section,
global_min,
global_max))
return new_sections
def scale_image_for_display(image, minimum=None, maximum=None):
image = image.astype(float)
minimum = image.min() if minimum is None else minimum
image -= minimum
maximum = image.max() if maximum is None else maximum
if maximum == 0:
return image
else:
image *= 255 / maximum
return image.astype(np.uint8)
def pad_to_shape(array, shape, constant=245):
padding = []
for actual_dim, target_dim in zip(array.shape, shape):
start_padding = 0
end_padding = target_dim - actual_dim
padding.append((start_padding, end_padding))
return np.pad(array, padding, mode='constant', constant_values=constant)
def apply_colormap(image, colormap='magma'):
if colormap == 'grayscale':
return image
cm = getattr(colormaps, colormap)
return image if cm is None else cm[image]
# Taken from https://github.com/tensorflow/tensorboard/blob/
# /28f58888ebb22e2db0f4f1f60cd96138ef72b2ef/tensorboard/util.py
# Modified by Chris Anderson to not use the GPU.
class PersistentOpEvaluator(object):
"""Evaluate a fixed TensorFlow graph repeatedly, safely, efficiently.
Extend this class to create a particular kind of op evaluator, like an
image encoder. In `initialize_graph`, create an appropriate TensorFlow
graph with placeholder inputs. In `run`, evaluate this graph and
return its result. This class will manage a singleton graph and
session to preserve memory usage, and will ensure that this graph and
session do not interfere with other concurrent sessions.
A subclass of this class offers a threadsafe, highly parallel Python
entry point for evaluating a particular TensorFlow graph.
Example usage:
class FluxCapacitanceEvaluator(PersistentOpEvaluator):
\"\"\"Compute the flux capacitance required for a system.
Arguments:
x: Available power input, as a `float`, in jigawatts.
Returns:
A `float`, in nanofarads.
\"\"\"
def initialize_graph(self):
self._placeholder = tf.placeholder(some_dtype)
self._op = some_op(self._placeholder)
def run(self, x):
return self._op.eval(feed_dict: {self._placeholder: x})
evaluate_flux_capacitance = FluxCapacitanceEvaluator()
for x in xs:
evaluate_flux_capacitance(x)
"""
def __init__(self):
super(PersistentOpEvaluator, self).__init__()
self._session = None
self._initialization_lock = threading.Lock()
def _lazily_initialize(self):
"""Initialize the graph and session, if this has not yet been done."""
with self._initialization_lock:
if self._session:
return
graph = tf.Graph()
with graph.as_default():
self.initialize_graph()
config = tf.ConfigProto(device_count={'GPU': 0})
self._session = tf.Session(graph=graph, config=config)
def initialize_graph(self):
"""Create the TensorFlow graph needed to compute this operation.
This should write ops to the default graph and return `None`.
"""
raise NotImplementedError('Subclasses must implement "initialize_graph".')
def run(self, *args, **kwargs):
"""Evaluate the ops with the given input.
When this function is called, the default session will have the
graph defined by a previous call to `initialize_graph`. This
function should evaluate any ops necessary to compute the result of
the query for the given *args and **kwargs, likely returning the
result of a call to `some_op.eval(...)`.
"""
raise NotImplementedError('Subclasses must implement "run".')
def __call__(self, *args, **kwargs):
self._lazily_initialize()
with self._session.as_default():
return self.run(*args, **kwargs)
class PNGDecoder(PersistentOpEvaluator):
def __init__(self):
super(PNGDecoder, self).__init__()
self._image_placeholder = None
self._decode_op = None
def initialize_graph(self):
self._image_placeholder = tf.placeholder(dtype=tf.string)
self._decode_op = tf.image.decode_png(self._image_placeholder)
# pylint: disable=arguments-differ
def run(self, image):
return self._decode_op.eval(feed_dict={
self._image_placeholder: image,
})
class PNGEncoder(PersistentOpEvaluator):
def __init__(self):
super(PNGEncoder, self).__init__()
self._image_placeholder = None
self._encode_op = None
def initialize_graph(self):
self._image_placeholder = tf.placeholder(dtype=tf.uint8)
self._encode_op = tf.image.encode_png(self._image_placeholder)
# pylint: disable=arguments-differ
def run(self, image):
if len(image.shape) == 2:
image = image.reshape([image.shape[0], image.shape[1], 1])
return self._encode_op.eval(feed_dict={
self._image_placeholder: image,
})
class Resizer(PersistentOpEvaluator):
def __init__(self):
super(Resizer, self).__init__()
self._image_placeholder = None
self._size_placeholder = None
self._resize_op = None
def initialize_graph(self):
self._image_placeholder = tf.placeholder(dtype=tf.float32)
self._size_placeholder = tf.placeholder(dtype=tf.int32)
self._resize_op = tf.image.resize_nearest_neighbor(self._image_placeholder,
self._size_placeholder)
# pylint: disable=arguments-differ
def run(self, image, height, width):
if len(image.shape) == 2:
image = image.reshape([image.shape[0], image.shape[1], 1])
resized = np.squeeze(self._resize_op.eval(feed_dict={
self._image_placeholder: [image],
self._size_placeholder: [height, width]
}))
return resized
decode_png = PNGDecoder()
encode_png = PNGEncoder()
resize = Resizer()
def read_image(filename):
with tf.gfile.Open(filename, 'rb') as image_file:
return np.array(decode_png(image_file.read()))
def write_image(array, filename):
with tf.gfile.Open(filename, 'w') as image_file:
image_file.write(encode_png(array))
def get_image_relative_to_script(filename):
script_directory = os.path.dirname(__file__)
filename = os.path.join(script_directory, 'resources', filename)
return read_image(filename)
|
ryfeus/lambda-packs
|
Tensorflow_Pandas_Numpy/source3.6/tensorboard/plugins/beholder/im_util.py
|
Python
|
mit
| 7,736
|
from pubnub import utils
from pubnub.endpoints.endpoint import Endpoint
from pubnub.enums import HttpMethod, PNOperationType
from pubnub.errors import PNERR_CHANNEL_OR_GROUP_MISSING
from pubnub.exceptions import PubNubException
class Heartbeat(Endpoint):
# /v2/presence/sub-key/<subscribe_key>/channel/<channel>/heartbeat?uuid=<uuid>
HEARTBEAT_PATH = "/v2/presence/sub-key/%s/channel/%s/heartbeat"
def __init__(self, pubnub):
super(Heartbeat, self).__init__(pubnub)
self._channels = []
self._groups = []
self._state = None
def channels(self, channels):
utils.extend_list(self._channels, channels)
return self
def channel_groups(self, channel_groups):
utils.extend_list(self._groups, channel_groups)
return self
def state(self, state):
self._state = state
return self
def http_method(self):
return HttpMethod.GET
def validate_params(self):
self.validate_subscribe_key()
if len(self._channels) == 0 and len(self._groups) == 0:
raise PubNubException(pn_error=PNERR_CHANNEL_OR_GROUP_MISSING)
def build_path(self):
channels = utils.join_channels(self._channels)
return Heartbeat.HEARTBEAT_PATH % (self.pubnub.config.subscribe_key, channels)
def custom_params(self):
params = {'heartbeat': str(self.pubnub.config.presence_timeout)}
if len(self._groups) > 0:
params['channel-group'] = utils.join_items(self._groups)
if self._state is not None and len(self._state) > 0:
params['state'] = utils.url_write(self._state)
return params
def create_response(self, envelope):
return True
def is_auth_required(self):
return True
def affected_channels(self):
return None
def affected_channels_groups(self):
return None
def request_timeout(self):
return self.pubnub.config.non_subscribe_request_timeout
def connect_timeout(self):
return self.pubnub.config.connect_timeout
def operation_type(self):
return PNOperationType.PNHeartbeatOperation
def name(self):
return "Heartbeat"
|
Haynie-Research-and-Development/jarvis
|
deps/lib/python3.4/site-packages/pubnub/endpoints/presence/heartbeat.py
|
Python
|
gpl-2.0
| 2,207
|
# -*- coding: utf-8 -*-
#
# AWL simulator - Debugging support
#
# Copyright 2014 Michael Buesch <m@bues.ch>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from __future__ import division, absolute_import, print_function, unicode_literals
from awlsim.common.compat import *
import gc as garbageCollector
#garbageCollector.set_debug(garbageCollector.DEBUG_LEAK)
class AwlDebug(object):
@classmethod
def getMemoryLeaks(cls):
garbageCollector.collect()
ret = [ "Memory leaks:", ]
for obj in garbageCollector.garbage:
ret.append("%s: %s" % (str(type(obj)), str(obj)))
return "\n".join(ret)
@classmethod
def printMemoryLeaks(cls):
print(cls.getMemoryLeaks())
|
gion86/awlsim
|
awlsim/common/debug.py
|
Python
|
gpl-2.0
| 1,336
|
"""
The ASTCache module helps to prevent unnecessary re-parsing, which can be
very slow when using pycparser.
Maintains a dictionary of {filenames -> ASTs}
Call get(filename) to get an AST. The file will be parsed if this is the
first request, otherwise the previously generated AST will be returned.
If code is being modified invalidate_ast_for(filename) can be used to
remove a cached AST and force a reparse.
"""
import errno
import os
import pycparser
from pycparser.plyparser import ParseError
import astcache
import cPickle as pickle
from utils import log, CaicosError, deglob_file, project_path, mkdir
filecache = None
memcache = {}
def get(filename, alternateincludepath = None, addparentlinks = True):
"""
Get the AST for the file pointed to by the absolute path filename
If alternateincludepath is provided then a different include path is passed to the
preprocessor before parsing
"""
try:
if filename in memcache:
return memcache[filename]
else:
cachedast = fetch_from_filecache(filename)
if cachedast != None:
memcache[filename] = cachedast
return cachedast
log().info("Parsing " + str(filename))
ast = parse_jamaica_output(filename, alternateincludepath)
if addparentlinks:
add_parent_links(ast)
memcache[filename] = ast
save_to_filecache(filename, ast)
return ast
except ParseError, pe:
raise CaicosError("Parse error in file '" + str(filename) + "'.\n\t" + str(pe.message))
def invalidate_ast_for(filename):
"""
If the filename has been parsed and is in memcache, this cached AST is deleted.
"""
if filename in memcache:
del memcache[filename]
def clear():
"""
Clear the memcache.
"""
memcache.clear()
def fetch_from_filecache(filename):
"""
Checks to see if we have a cached version of the AST in the file cache
If filecache is None then immediately returns None
If no suitable file exists, returns None
Else returns the requested AST.
"""
if filecache != None:
if os.path.isfile(cache_filename(filename)):
sourcefile_mtime = os.stat(filename).st_mtime
cache_mtime = os.stat(cache_filename(filename))
if cache_mtime > sourcefile_mtime:
log().info("Using cached AST for " + str(filename))
return pickle.load(open(cache_filename(filename), 'rb'))
else:
return None
def save_to_filecache(filename, ast):
"""
Save the provided AST (which was generated from the provided filename) to the file cache
"""
if filecache != None:
cdir = os.path.dirname(cache_filename(filename))
try:
os.makedirs(cdir)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(cdir):
pass
else: raise
f = open(cache_filename(filename), 'wb')
pickler = pickle.Pickler(f, protocol=pickle.HIGHEST_PROTOCOL)
pickler.dump(ast)
f.close()
def cache_filename(sourcefile):
"""
Return the filename that should be used in the file cache for the given source file
"""
if sourcefile.startswith(os.sep):
sourcefile = sourcefile[len(os.sep):]
return os.path.join(filecache, sourcefile)
def parse_jamaica_output(filename, includepath = None):
"""
Use pycparser to parse a Jamaica output file.
Because Jamaica's output is pretty complex, cpp is required, and even still
some features have to be removed because it uses GCC extensions unsupported by pycparser.
Returns a pycparser.c_ast or throws ParseError
If includepath is None then the project include files are used for parsing. Else, an absolute
path to alternate includes can be provided.
"""
if '*' in filename:
filename = deglob_file(filename)
cppargs = ['-E',
'-DNDEBUG', #Disable Jamaica debug support
'-U__GNUC__', #Prevents use of __attribute__, will cause an "unsupported compiler" #warning
#'-W#warnings', #And this hides that warning
'-DJAMAICA_NATIVE_TIME_GET_HIGH_RESOLUTION_TIMESTAMP', #These use volatile asm() which is not supported by pycparser
'-DJAMAICA_NATIVE_THREAD_COMPARE_AND_SWAP',
'-D__CAICOS__', #So we can tell if we are being parsed by caicos
r'-I' + project_path("stdlibheaders"), #Override stdlib headers with blank versions (Jamaica builder doesn't use them, but includes them)
]
if includepath == None:
cppargs.append(r'-I' + project_path("projectfiles", "include"))
else:
cppargs.append(r'-I' + str(includepath))
return pycparser.parse_file(filename, use_cpp=True, cpp_path="gcc", cpp_args=cppargs)
def add_parent_links(ast):
"""
Iterate over an AST annotating every node with a link to its parent node.
This adds a 'parent' attribute to the AST nodes and will cause problems if __slots__ is
present on the c_ast.Node class
"""
def recurse(node, parent):
node.parent = parent
for _, c in node.children():
recurse(c, node)
recurse(ast, None)
def activate_cache(path):
"""
Turn on the file cache by providing an absolute path to a directory.
ASTs are pickled here once created and used to speed subsequent runs of caicos.
"""
mkdir(path)
astcache.filecache = path
|
juniper-project/fpgas-caicos
|
astcache.py
|
Python
|
gpl-2.0
| 5,014
|
##
# This file is an EasyBuild reciPY as per https://github.com/easybuilders/easybuild
#
# Copyright:: Copyright 2012-2018 Uni.Lu/LCSB, NTUA
# Copyright:: Copyright 2016-2018 Forschungszentrum Juelich
# Authors:: Fotis Georgatos <fotis@cern.ch>
# Authors:: Damian Alvarez <d.alvarez@fz-juelich.de>
# License:: MIT/GPL
# $Id$
#
# This work implements a part of the HPCBIOS project and is a component of the policy:
# http://hpcbios.readthedocs.org/en/latest/HPCBIOS_06-05.html
##
"""
EasyBuild support for installing Totalview, implemented as an easyblock
@author: Fotis Georgatos (Uni.Lu)
"""
import os
import easybuild.tools.environment as env
from easybuild.easyblocks.generic.packedbinary import PackedBinary
from easybuild.framework.easyconfig.types import ensure_iterable_license_specs
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import find_flexlm_license
from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd
class EB_TotalView(PackedBinary):
"""EasyBlock for TotalView"""
def __init__(self, *args, **kwargs):
"""Initialisation of custom class variables for Totalview"""
super(EB_TotalView, self).__init__(*args, **kwargs)
self.license_file = 'UNKNOWN'
self.license_env_var = 'UNKNOWN'
def configure_step(self):
"""
Handle of license
"""
default_lic_env_var = 'LM_LICENSE_FILE'
license_specs = ensure_iterable_license_specs(self.cfg['license_file'])
lic_specs, self.license_env_var = find_flexlm_license(custom_env_vars=[default_lic_env_var],
lic_specs=license_specs)
if lic_specs:
if self.license_env_var is None:
self.log.info("Using Totalview license specifications from 'license_file': %s", lic_specs)
self.license_env_var = default_lic_env_var
else:
self.log.info("Using Totalview license specifications from %s: %s", self.license_env_var, lic_specs)
self.license_file = os.pathsep.join(lic_specs)
env.setvar(self.license_env_var, self.license_file)
else:
raise EasyBuildError("No viable license specifications found; specify 'license_file' or "+
"define $LM_LICENSE_FILE")
def install_step(self):
"""Custom install procedure for TotalView."""
cmd = "./Install -agree -platform linux-x86-64 -nosymlink -install totalview -directory %s" % self.installdir
run_cmd(cmd)
def sanity_check_step(self):
"""Custom sanity check for TotalView."""
binpath_t = 'toolworks/%s.%s/bin/' % (self.name.lower(), self.version) + 'tv%s'
custom_paths = {
'files': [binpath_t % i for i in ['8', '8cli', 'dbootstrap', 'dsvr', 'script']],
'dirs': [],
}
super(EB_TotalView, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
"""Specify TotalView custom values for PATH."""
guesses = super(EB_TotalView, self).make_module_req_guess()
prefix = os.path.join('toolworks', '%s.%s' % (self.name.lower(), self.version))
guesses.update({
'PATH': [os.path.join(prefix, 'bin')],
'MANPATH': [os.path.join(prefix, 'man')],
})
return guesses
def make_module_extra(self):
"""Add extra environment variables for license file and anything else."""
txt = super(EB_TotalView, self).make_module_extra()
txt += self.module_generator.prepend_paths(self.license_env_var, [self.license_file], allow_abs=True, expand_relpaths=False)
return txt
|
bartoldeman/easybuild-easyblocks
|
easybuild/easyblocks/t/totalview.py
|
Python
|
gpl-2.0
| 3,766
|
import xml.etree.ElementTree as ET
import requests
import os
giantBombWAPI = 'http://www.giantbomb.com/api/'
def grabImage(id):
_gameImages = requests.get(giantBombWAPI+'game/%s/?api_key=%s&field_list=image&format=xml' % (id, giantBombKEY))
images = (ET.fromstring(_gameImages.text.encode('utf-8')).find('results')).find('image')
if len(images) > 0:
if len(images) < 5:
return images[0].text
else:
return images[5].text
else:
return 'NONE'
def main():
for i in range(16408,41803):
if os.path.isfile('games/g'+str(i+1)+'.xml'):
root = ET.parse('games/g'+str(i+1)+'.xml').getroot()
with open("IDs.txt",'a') as file:
file.write(str(i+1)+ ' ' + root[1].text.encode('utf-8')+'\n')
_image = ET.SubElement(root, 'image')
image = grabImage(i+1)
_image.text = image
tree = ET.ElementTree(root)
f=open('Games/'+str(i+1)+'.xml', 'w')
tree.write(f, encoding='utf-8', xml_declaration=True)
f.close()
#print image
if __name__ == '__main__':
main()
|
kidaa/gamers-galore
|
Backend/grabImages.py
|
Python
|
gpl-2.0
| 992
|
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from numpy import ones
def get_mask_for_gridcells(index, n, m):
"""index is an array of size n x m where should be no mask placed."""
mask = ones((n, m))
for i in index:
mask[i,:] = 0
return mask
|
christianurich/VIBe2UrbanSim
|
3rdparty/opus/src/urbansim/household_x_gridcell/variable_functions.py
|
Python
|
gpl-2.0
| 353
|
from django.conf import settings
from django import template
from webassets.loaders import GlobLoader, LoaderError
try:
set
except NameError:
from sets import Set as set
from django_assets.templatetags.assets import AssetsNode as AssetsNodeOriginal
try:
from django.templatetags.assets import AssetsNode as AssetsNodeMapped
except ImportError:
# Since Django #12295, custom templatetags are no longer mapped into
# the Django namespace. Support both versions.
AssetsNodeMapped = None
AssetsNodeClasses = filter(lambda c: bool(c),
(AssetsNodeOriginal, AssetsNodeMapped))
__all__ = ('DjangoLoader', 'get_django_template_dirs',)
def _shortpath(abspath):
"""Make an absolute path relative to the project's settings module,
which would usually be the project directory.
"""
b = os.path.dirname(os.path.normpath(sys.modules[settings.SETTINGS_MODULE].__file__))
p = os.path.normpath(abspath)
return p[len(os.path.commonprefix([b, p])):]
def uniq(seq):
"""Remove duplicate items, preserve order.
http://www.peterbe.com/plog/uniqifiers-benchmark
"""
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
FILESYSTEM_LOADERS = [
'django.template.loaders.filesystem.load_template_source', # <= 1.1
'django.template.loaders.filesystem.Loader', # > 1.2
]
APPDIR_LOADERS = [
'django.template.loaders.app_directories.load_template_source', # <= 1.1
'django.template.loaders.app_directories.Loader' # > 1.2
]
def get_django_template_dirs(loader_list=None):
"""Build a list of template directories based on configured loaders.
"""
if not loader_list:
loader_list = settings.TEMPLATE_LOADERS
template_dirs = []
for loader in loader_list:
if loader in FILESYSTEM_LOADERS:
template_dirs.extend(settings.TEMPLATE_DIRS)
if loader in APPDIR_LOADERS:
from django.template.loaders.app_directories import app_template_dirs
template_dirs.extend(app_template_dirs)
if isinstance(loader, (list, tuple)) and len(loader) >= 2:
# The cached loader uses the tuple syntax, but simply search all
# tuples for nested loaders; thus possibly support custom ones too.
template_dirs.extend(get_django_template_dirs(loader[1]))
return uniq(template_dirs)
class DjangoLoader(GlobLoader):
"""Parse all the templates of the current Django project, try to find
bundles in active use.
"""
def load_bundles(self):
bundles = []
for template_dir in get_django_template_dirs():
for filename in self.glob_files((template_dir, '*.html'), True):
bundles.extend(self.with_file(filename, self._parse) or [])
return bundles
def _parse(self, filename, contents):
# parse the template for asset nodes
try:
t = template.Template(contents)
except template.TemplateSyntaxError, e:
raise LoaderError('Django parser failed: %s' % e)
else:
result = []
def _recurse_node(node):
# depending on whether the template tag is added to
# builtins, or loaded via {% load %}, it will be
# available in a different module
if node is not None and \
isinstance(node, AssetsNodeClasses):
# try to resolve this node's data; if we fail,
# then it depends on view data and we cannot
# manually rebuild it.
try:
bundle = node.resolve()
except template.VariableDoesNotExist:
raise LoaderError('skipping bundle %s, depends on runtime data' % node.output)
else:
result.append(bundle)
# see Django #7430
for subnode in hasattr(node, 'nodelist') \
and node.nodelist\
or []:
_recurse_node(subnode)
for node in t: # don't move into _recurse_node, ``Template`` has a .nodelist attribute
_recurse_node(node)
return result
|
mozilla/verbatim
|
vendor/lib/python/django_assets/loaders.py
|
Python
|
gpl-2.0
| 4,299
|
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
#----------------------------------------------------------------------------------------------------------------------*
import sys, time, os, json
import makefile, default_build_options
import generic_galgas_makefile
import tool_chain_installation_path
import cross_compiler_download
#----------------------------------------------------------------------------------------------------------------------*
def buildForWin32OnMacOSX (dictionary, jsonFilePath, EXECUTABLE, GOAL, maxParallelJobs, displayCommands) :
#--- Too chain installation
GCC_VERSION = "7.2.0"
BINUTILS_VERSION = "2.28"
TOOL_CHAIN_NAME = "binutils-" + BINUTILS_VERSION + "-gcc-" + GCC_VERSION + "-for-mingw32"
installDir = tool_chain_installation_path.toolChainInstallationPath ()
TOOL_CHAIN_INSTALL_PATH = installDir + "/" + TOOL_CHAIN_NAME
if not os.path.exists (TOOL_CHAIN_INSTALL_PATH):
cross_compiler_download.downloadToolChain (TOOL_CHAIN_NAME)
#---
gmf = generic_galgas_makefile.GenericGalgasMakefile ()
gmf.mJSONfilePath = jsonFilePath
gmf.mDictionary = dictionary
gmf.mExecutable = EXECUTABLE
gmf.mGoal = GOAL
gmf.mMaxParallelJobs = maxParallelJobs
gmf.mDisplayCommands = displayCommands
gmf.mTargetName = "win32"
gmf.mLinkerOptions = ["-lws2_32", "-lComdlg32"]
gmf.mExecutableSuffix = ".exe"
#---
gmf.mCompilerTool = [TOOL_CHAIN_INSTALL_PATH + "/bin/i586-mingw32-gcc", "-m32", "-D_WIN32_WINNT=0x501"]
gmf.mLinkerTool = [TOOL_CHAIN_INSTALL_PATH + "/bin/i586-mingw32-g++", "-m32", "--enable-auto-import", "-Wl,--gc-sections"]
gmf.mStripTool = [TOOL_CHAIN_INSTALL_PATH + "/bin/i586-mingw32-strip", "--strip-all"]
gmf.mCompilationMessage = "Compiling for Win32"
gmf.mLinkingMessage = "Linking for Win32"
gmf.mStripMessage = "Stripping"
#--- Options for all compilers
gmf.mAllCompilerOptions = default_build_options.allCompilerOptions (["-Wconversion"])
#--- Options for release mode
gmf.mCompilerReleaseOptions = default_build_options.compilerReleaseOptions (["-O2"])
#--- Options for debug mode
gmf.mCompilerDebugOptions = default_build_options.compilerDebugOptions ([])
#--- Options for C compiling (.c extension)
gmf.m_C_CompilerOptions = default_build_options.C_CompilerOptions ([])
#--- Options for C++ compiling (.cpp extension)
gmf.m_Cpp_CompilerOptions = default_build_options.Cpp_CompilerOptions (["-Weffc++", "-Wsign-promo"])
#--- Options for Objective-C compiling (.m extension)
gmf.m_ObjectiveC_CompilerOptions = default_build_options.ObjectiveC_CompilerOptions ([])
#--- Options for Objective-C++ compiling (.mm extension)
gmf.m_ObjectiveCpp_CompilerOptions = default_build_options.ObjectiveCpp_CompilerOptions ([])
#--- Library to use for gmp
gmf.mCrossCompilation = "win32"
#--- Run makefile
gmf.run ()
#----------------------------------------------------------------------------------------------------------------------*
|
TrampolineRTOS/trampoline
|
goil/build/libpm/python-makefiles/mingw32_on_macosx_gcc_tools.py
|
Python
|
gpl-2.0
| 2,932
|
# Special Commands
#
# Author: Balkansoft.BlogSpot.com
# GNU GPL licensed
import os
import re
import nanolp.core as core
class UseCmd(core.Cmd):
"""Processor of <<use...>> command"""
descr = 'Import one LP file to another'
gpath = "use"
infile = '' # path to included file or file-obj (i.e., url-object)
def onpost(self, parser=None, flush=None):
chunk = parser.chunkdict.getbykey(self)
infile = ''.join(self.body)
mnt = self.getarg('mnt', '')
fmt = self.getarg('fmt', '')
self.infile = parser.ensureinput(infile)
if not self.infile:
raise core.ParserError("'use' can not ensure '%s' input file"%infile)
core.prn("using '%s' mounted to '%s'..."%(self.infile, mnt), engine=parser.engine, file='stdout')
inclparser = core.Parser.create_parser(parser.engine, self.infile, fmt=fmt, parent=parser)
inclparser.parsefile(self.infile, flush=False)
parser.importparser(inclparser, mnt)
return core.Cmd.onpost(self, parser=parser, flush=flush)
core.Cmd.register(UseCmd)
################################################################################
class FileCmd(core.Cmd):
"""Processor of <<file...>> command"""
descr = 'Save it\'s content to file'
gpath = "file.*"
outfile = '' # path to output file
def onpost(self, parser=None, flush=None):
if flush:
jpath = self.jpath()
chunk = parser.chunkdict.getbykey(self)
if not parser.expand(jpath):
raise core.ParserError("'%s' can not be expanded"%jpath)
# body of out is output file name
self.outfile = ''.join(self.body)
self.outfile = os.path.join(parser.outdir, self.outfile)
self.outfile = os.path.abspath(self.outfile)
# if self.outfile has relative path, real outdir can be different then
# in parser. If not exists, create it
outdir = os.path.dirname(self.outfile)
if not os.path.exists(outdir):
os.makedirs(outdir)
core.prn("writing to '%s'..."%self.outfile, engine=parser.engine, file='stdout')
core.fwrite23(self.outfile, chunk.tangle)
return core.Cmd.onpost(self, parser=parser, flush=flush)
core.Cmd.register(FileCmd)
################################################################################
class VarsCmd(core.Cmd):
"""Processor of <<vars...>> command"""
descr = 'Create variables dictionary'
gpath = "vars"
def ondefine(self, parser=None, chunktext=None):
"""Add dictionary of vars, body[0] of Cmd will be name of dict
otherwise anonym. dict is used
"""
parser.updatevars(self.body[0] if self.body else '', dict(self.args))
return core.Cmd.ondefine(self, parser=parser, chunktext=None)
core.Cmd.register(VarsCmd)
################################################################################
class OnCmd(core.Cmd):
"""Processor of <<on.CLASS...>> command"""
descr = 'Set event handler'
gpath = 'on.*'
def __ondefine__(self, parser=None, chunktext=None):
"""__ - means out of event-handling (hidden for handlers)"""
handlers = []
classname = self.path[1]
class_ = getattr(core, classname)
gpath = self.getarg('gpath')
if len(self.path) > 2:
# 'on.CLASS.EVENT, gpath:xxx, do:xxx'
event = self.path[2]
functext = self.getarg('do')
func = core.LineExp().parse(functext)
func.setcontext(resolver=parser,
stdargs=core.EventHandler.getargspec(class_, event).args)
params = dict(classname=classname, gpath=gpath, event=event)
handlers.append(core.EventHandler(func, **params))
else:
# 'on.CLASS, gpath:xxx, do.EVENT1:xxx, do.EVENT2:xxx...'
for argname, argvalue in self.args:
if argname.startswith('do.'):
# some handler
event = argname[3:] # 3==len('do.')
functext = argvalue
func = core.LineExp().parse(functext)
func.setcontext(resolver=parser,
stdargs=core.EventHandler.getargspec(class_, event).args)
params = dict(classname=classname, gpath=gpath, event=event)
handlers.append(core.EventHandler(func, **params))
parser.updatehandlers(handlers)
return core.Cmd.ondefine(self, parser=parser, chunktext=None)
core.Cmd.register(OnCmd)
################################################################################
|
bapcyk/nanolp
|
nanolp/commands.py
|
Python
|
gpl-2.0
| 4,688
|
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Implementing communication with MySQL servers.
"""
import os
import time
import re
import weakref
from .network import MySQLUnixSocket, MySQLTCPSocket
from .constants import (ClientFlag, ServerCmd, CharacterSet, ServerFlag,
flag_is_set)
from .conversion import MySQLConverter
from .protocol import MySQLProtocol
from . import errors
from .utils import (int4store)
from .cursor import (CursorBase, MySQLCursor, MySQLCursorRaw,
MySQLCursorBuffered, MySQLCursorBufferedRaw)
DEFAULT_CONFIGURATION = {
'database': None,
'user': '',
'password': '',
'host': '127.0.0.1',
'port': 3306,
'unix_socket': None,
'use_unicode': True,
'charset': 'utf8',
'collation': None,
'autocommit': False,
'time_zone': None,
'sql_mode': None,
'get_warnings': False,
'raise_on_warnings': False,
'connection_timeout': None,
'client_flags': 0,
'buffered': False,
'raw': False,
'ssl_ca': None,
'ssl_cert': None,
'ssl_key': None,
'passwd': None,
'db': None,
'connect_timeout': None,
'dsn': None
}
class MySQLConnection(object):
"""Connection to a MySQL Server"""
def __init__(self, *args, **kwargs):
self._protocol = None
self._socket = None
self._handshake = None
self._server_version = None
self.converter = None
self._converter_class = None
self._client_flags = ClientFlag.get_default()
self._charset_id = 33
self._sql_mode = None
self._time_zone = None
self._autocommit = False
self._user = ''
self._password = ''
self._database = ''
self._host = '127.0.0.1'
self._port = 3306
self._unix_socket = None
self._client_host = ''
self._client_port = 0
self._ssl = None
self._use_unicode = True
self._get_warnings = False
self._raise_on_warnings = False
self._connection_timeout = None
self._buffered = False
self._unread_result = False
self._have_next_result = False
self._raw = False
if len(kwargs) > 0:
self.connect(**kwargs)
def _get_self(self):
"""Return self for weakref.proxy
This method is used when the original object is needed when using
weakref.proxy.
"""
return self
def _do_handshake(self):
"""Get the handshake from the MySQL server"""
packet = self._socket.recv()
if packet[4] == 255:
raise errors.get_exception(packet)
try:
handshake = self._protocol.parse_handshake(packet)
except Exception as err:
raise errors.InterfaceError(
'Failed parsing handshake; {}'.format(err))
regex_ver = re.compile(b"^(\d{1,2})\.(\d{1,2})\.(\d{1,3})(.*)")
match = regex_ver.match(handshake['server_version_original'])
if not match:
raise errors.InterfaceError("Failed parsing MySQL version")
version = tuple([ int(v) for v in match.groups()[0:3]])
if version < (4, 1):
raise errors.InterfaceError(
"MySQL Version '{}' is not supported.".format(
handshake['server_version_original']))
self._handshake = handshake
self._server_version = version
def _do_auth(self, username=None, password=None, database=None,
client_flags=0, charset=33, ssl_options=None):
"""Authenticate with the MySQL server
"""
if client_flags & ClientFlag.SSL and ssl_options:
packet = self._protocol.make_auth_ssl(charset=charset,
client_flags=client_flags)
self._socket.send(packet)
self._socket.switch_to_ssl(**ssl_options)
packet = self._protocol.make_auth(
seed=self._handshake['scramble'],
username=username, password=password, database=database,
charset=charset, client_flags=client_flags)
self._socket.send(packet)
packet = self._socket.recv()
if packet[4] == 254:
raise errors.NotSupportedError(
"Authentication with old (insecure) passwords "\
"is not supported. For more information, lookup "\
"Password Hashing in the latest MySQL manual")
elif packet[4] == 255:
raise errors.get_exception(packet)
try:
if (not (client_flags & ClientFlag.CONNECT_WITH_DB)
and database):
self.cmd_init_db(database)
except:
raise
return True
def config(self, *args, **kwargs):
"""Configure the MySQL Connection
This method allows you to configure the MySQLConnection instance.
Raises on errors.
"""
config = kwargs.copy()
if 'dsn' in config:
raise errors.NotSupportedError("Data source name is not supported")
# Configure how we handle MySQL warnings
try:
self.get_warnings = config['get_warnings']
del config['get_warnings']
except KeyError:
pass # Leave what was set or default
try:
self.raise_on_warnings = config['raise_on_warnings']
del config['raise_on_warnings']
except KeyError:
pass # Leave what was set or default
# Configure client flags
try:
default = ClientFlag.get_default()
self.set_client_flags(config['client_flags'] or default)
del config['client_flags']
except KeyError:
pass # Missing client_flags-argument is OK
# Configure character set and collation
if ('charset' in config or 'collation' in config):
try:
charset = config['charset']
del config['charset']
except KeyError:
charset = None
try:
collation = config['collation']
del config['collation']
except KeyError:
collation = None
self._charset_id = CharacterSet.get_charset_info(charset,
collation)[0]
# Compatible configuration with other drivers
compat_map = [
# (<other driver argument>,<translates to>)
('db','database'),
('passwd','password'),
('connect_timeout','connection_timeout'),
]
for compat,translate in compat_map:
try:
if translate not in config:
config[translate] = config[compat]
del config[compat]
except KeyError:
pass # Missing compat argument is OK
# Configure login information
if ('user' in config or 'password' in config):
try:
user = config['user']
del config['user']
except KeyError:
user = self._user
try:
password = config['password']
del config['password']
except KeyError:
password = self._password
self.set_login(user, password)
# Check network locations
try:
self._port = int(config['port'])
del config['port']
except KeyError:
pass # Missing port argument is OK
except ValueError:
raise errors.InterfaceError(
"TCP/IP port number should be an integer")
# Other configuration
for key,value in config.items():
try:
DEFAULT_CONFIGURATION[key]
except KeyError:
raise AttributeError("Unsupported argument '{}'".format(key))
# SSL Configuration
if key.startswith('ssl_'):
try:
self._ssl[key.replace('ssl_', '')] = value
except TypeError:
self._ssl = { key.replace('ssl_', ''): value }
else:
attribute = '_' + key
try:
setattr(self, attribute, value.strip())
except AttributeError:
setattr(self, attribute, value)
def _get_connection(self, prtcls=None):
"""Get connection based on configuration
This method will return the appropriated connection object using
the connection parameters.
Returns subclass of MySQLBaseSocket.
"""
conn = None
if self.unix_socket and os.name != 'nt':
conn = MySQLUnixSocket(unix_socket=self.unix_socket)
else:
conn = MySQLTCPSocket(host=self.server_host,
port=self.server_port)
conn.set_connection_timeout(self._connection_timeout)
return conn
def _open_connection(self):
"""Open the connection to the MySQL server
This method sets up and opens the connection to the MySQL server.
Raises on errors.
"""
self._socket = self._get_connection()
self._socket.open_connection()
self._do_handshake()
if self._client_flags & ClientFlag.SSL:
self._socket.set_ssl(self._ssl['ca'], self._ssl['cert'],
self._ssl['key'])
self._do_auth(self._user, self._password,
self._database, self._client_flags, self._charset_id,
self._ssl)
self.set_converter_class(MySQLConverter)
if self._client_flags & ClientFlag.COMPRESS:
self._socket.recv = self._socket.recv_compressed
self._socket.send = self._socket.send_compressed
def _post_connection(self):
"""Executes commands after connection has been established
This method executes commands after the connection has been
established. Some setting like autocommit, character set, and SQL mode
are set using this method.
"""
self.set_charset_collation(self._charset_id)
self.autocommit = self._autocommit
if self._time_zone:
self.time_zone = self._time_zone
if self._sql_mode:
self.sql_mode = self._sql_mode
def connect(self, **kwargs):
"""Connect to the MySQL server
This method sets up the connection to the MySQL server. If no
arguments are given, it will use the already configured or default
values.
"""
if len(kwargs) > 0:
self.config(**kwargs)
self._protocol = MySQLProtocol()
self.disconnect()
self._open_connection()
self._post_connection()
def __del__(self):
self.close()
def disconnect(self):
"""Disconnect from the MySQL server
"""
if not self._socket:
return
try:
self.cmd_quit()
self._socket.close_connection()
except errors.Error:
pass # Getting an exception would mean we are disconnected.
close = disconnect
def _send_cmd(self, command, argument=None, packet_number=0):
"""Send a command to the MySQL server
This method sends a command with an optional argument.
Returns a MySQL packet
"""
if self.unread_result:
raise errors.InternalError("Unread result found.")
try:
self._socket.send(self._protocol.make_command(command, argument),
packet_number)
except AttributeError:
raise errors.OperationalError("MySQL Connection not available.")
else:
return self._socket.recv()
def _toggle_have_next_result(self, flags):
"""Toggle whether there more results
This method checks the whether MORE_RESULTS_EXISTS is set in flags.
"""
if flag_is_set(ServerFlag.MORE_RESULTS_EXISTS, flags):
self._have_next_result = True
else:
self._have_next_result = False
def _handle_ok(self, packet):
"""Handle a MySQL OK packet
This method handles a MySQL OK packet. When the packet is found to
be an Error packet, an error will be raised. If the packet is neither
an OK or an Error packet, errors.InterfaceError will be raised.
Returns a dict()
"""
if packet[4] == 0:
ok = self._protocol.parse_ok(packet)
self._toggle_have_next_result(ok['server_status'])
return ok
elif packet[4] == 255:
raise errors.get_exception(packet)
raise errors.InterfaceError('Expected OK packet')
def _handle_eof(self, packet):
"""Handle a MySQL EOF packet
This method handles a MySQL EOF packet. When the packet is found to
be an Error packet, an error will be raised. If the packet is neither
and OK or an Error packet, errors.InterfaceError will be raised.
Returns a dict()
"""
if packet[4] == 254:
eof = self._protocol.parse_eof(packet)
self._toggle_have_next_result(eof['status_flag'])
return eof
elif packet[4] == 255:
raise errors.get_exception(packet)
raise errors.InterfaceError('Expected EOF packet')
def _handle_result(self, packet):
"""Handle a MySQL Result
This method handles a MySQL result, for example, after sending the
query command. OK and EOF packets will be handled and returned. If
the packet is an Error packet, an errors.Error-exception will be
raised.
The dictionary returned of:
- columns: column information
- eof: the EOF-packet information
Returns a dict()
"""
if not packet or len(packet) < 4:
raise errors.InterfaceError('Empty response')
elif packet[4] == 0:
return self._handle_ok(packet)
elif packet[4] == 254:
return self._handle_eof(packet)
elif packet[4] == 255:
raise errors.get_exception(packet)
# We have a text result set
column_count = self._protocol.parse_column_count(packet)
if not column_count or not isinstance(column_count, int):
raise errors.InterfaceError('Illegal result set.')
columns = [None,]*column_count
for i in range(0, column_count):
columns[i] = self._protocol.parse_column(self._socket.recv())
eof = self._handle_eof(self._socket.recv())
self.unread_result = True
return {'columns': columns, 'eof': eof}
def get_rows(self, count=None):
"""Get all rows returned by the MySQL server
This method gets one row from the result set after sending, for
example, the query command. The result is a tuple consisting of the
row and the EOF packet.
Returns a tuple()
"""
if not self.unread_result:
raise errors.InternalError("No result set available.")
rows = self._protocol.read_text_result(self._socket, count)
if rows[-1] is not None:
self._toggle_have_next_result(rows[-1]['status_flag'])
self.unread_result = False
return rows
def get_row(self):
"""Get the next rows returned by the MySQL server
This method gets one row from the result set after sending, for
example, the query command. The result is a tuple consisting of the
row and the EOF packet.
If no row was available in the result set, the row data will be None.
Returns a tuple.
"""
(rows, eof) = self.get_rows(count=1)
if len(rows):
return (rows[0], eof)
return (None, eof)
def cmd_init_db(self, database):
"""Change the current database
This method changes the current (default) database by sending the
INIT_DB command. The result is a dictionary containing the OK packet
information.
Returns a dict()
"""
return self._handle_ok(
self._send_cmd(ServerCmd.INIT_DB, database.encode('utf-8')))
def cmd_query(self, query):
"""Send a query to the MySQL server
This method send the query to the MySQL server and returns the result.
If there was a text result, a tuple will be returned consisting of
the number of columns and a list containing information about these
columns.
When the query doesn't return a text result, the OK or EOF packet
information as dictionary will be returned. In case the result was
an error, exception errors.Error will be raised.
Returns a tuple()
"""
if not isinstance(query, bytes):
query = query.encode('utf-8')
result = self._handle_result(self._send_cmd(ServerCmd.QUERY, query))
if self._have_next_result:
raise errors.InterfaceError(
'Use cmd_query_iter for statements with multiple queries.')
return result
def cmd_query_iter(self, statements):
"""Send one or more statements to the MySQL server
Similar to the cmd_query method, but instead returns a generator
object to iterate through results. It sends the statements to the
MySQL server and through the iterator you can get the results.
statement = 'SELECT 1; INSERT INTO t1 VALUES (); SELECT 2'
for result in cnx.cmd_query(statement, iterate=True):
if 'columns' in result:
columns = result['columns']
rows = cnx.get_rows()
else:
# do something useful with INSERT result
Returns a generator.
"""
if not isinstance(statements, bytes):
statements = statements.encode('utf-8')
# Handle the first query result
yield self._handle_result(self._send_cmd(ServerCmd.QUERY, statements))
# Handle next results, if any
while self._have_next_result:
if self.unread_result:
raise errors.InternalError("Unread result found.")
else:
result = self._handle_result(self._socket.recv())
yield result
def cmd_refresh(self, options):
"""Send the Refresh command to the MySQL server
This method sends the Refresh command to the MySQL server. The options
argument should be a bitwise value using contants.RefreshOption.
Usage example:
RefreshOption = mysql.connector.RefreshOption
refresh = RefreshOption.LOG | RefreshOption.THREADS
cnx.cmd_refresh(refresh)
The result is a dictionary with the OK packat information.
Returns a dict()
"""
return self._handle_ok(
self._send_cmd(ServerCmd.REFRESH, int4store(options)))
def cmd_quit(self):
"""Close the current connection with the server
This method sends the QUIT command to the MySQL server, closing the
current connection. Since the no response can be returned to the
client, cmd_quit() will return the packet it send.
Returns a str()
"""
if self.unread_result:
raise errors.InternalError("Unread result found.")
packet = self._protocol.make_command(ServerCmd.QUIT)
self._socket.send(packet, 0)
return packet
def cmd_shutdown(self):
"""Shut down the MySQL Server
This method sends the SHUTDOWN command to the MySQL server and is only
possible if the current user has SUPER privileges. The result is a
dictionary containing the OK packet information.
Note: Most applications and scripts do not the SUPER privilege.
Returns a dict()
"""
return self._handle_eof(self._send_cmd(ServerCmd.SHUTDOWN))
def cmd_statistics(self):
"""Send the statistics command to the MySQL Server
This method sends the STATISTICS command to the MySQL server. The
result is a dictionary with various statistical information.
Returns a dict()
"""
if self.unread_result:
raise errors.InternalError("Unread result found.")
packet = self._protocol.make_command(ServerCmd.STATISTICS)
self._socket.send(packet, 0)
return self._protocol.parse_statistics(self._socket.recv())
def cmd_process_info(self):
"""Get the process list of the MySQL Server
This method is a placeholder to notify that the PROCESS_INFO command
is not supported by raising the NotSupportedError. The command
"SHOW PROCESSLIST" should be send using the cmd_query()-method or
using the INFORMATION_SCHEMA database.
Raises NotSupportedError exception
"""
raise errors.NotSupportedError(
"Not implemented. Use SHOW PROCESSLIST or INFORMATION_SCHEMA")
def cmd_process_kill(self, mysql_pid):
"""Kill a MySQL process
This method send the PROCESS_KILL command to the server along with
the process ID. The result is a dictionary with the OK packet
information.
Returns a dict()
"""
return self._handle_ok(
self._send_cmd(ServerCmd.PROCESS_KILL, int4store(mysql_pid)))
def cmd_debug(self):
"""Send the DEBUG command
This method sends the DEBUG command to the MySQL server, which
requires the MySQL user to have SUPER priviledge. The output will go
to the MySQL server error log and the result of this method is a
dictionary with EOF packet information.
Returns a dict()
"""
return self._handle_eof(self._send_cmd(ServerCmd.DEBUG))
def cmd_ping(self):
"""Send the PING command
This method sends the PING command to the MySQL server. It is used to
check if the the connection is still valid. The result of this
method is dictionary with OK packet information.
Returns a dict()
"""
return self._handle_ok(self._send_cmd(ServerCmd.PING))
def cmd_change_user(self, username='', password='', database='',
charset=33):
"""Change the current logged in user
This method allows to change the current logged in user information.
The result is a dictionary with OK packet information.
Returns a dict()
"""
if self.unread_result:
raise errors.InternalError("Unread result found.")
packet = self._protocol.make_change_user(
seed=self._handshake['scramble'],
username=username, password=password, database=database,
charset=charset, client_flags=self._client_flags)
self._socket.send(packet, 0)
return self._handle_ok(self._socket.recv())
def is_connected(self):
"""Reports whether the connection to MySQL Server is available
This method checks whether the connection to MySQL is available.
It is similar to ping(), but unlike the ping()-method, either True
or False is returned and no exception is raised.
Returns True or False.
"""
try:
self.cmd_ping()
except:
return False # This method does not raise
return True
def reconnect(self, attempts=1, delay=0):
"""Attempt to reconnect to the MySQL server
The argument attempts should be the number of times a reconnect
is tried. The delay argument is the number of seconds to wait between
each retry.
You may want to set the number of attempts higher and use delay when
you expect the MySQL server to be down for maintenance or when you
expect the network to be temporary unavailable.
Raises InterfaceError on errors.
"""
counter = 0
while counter != attempts:
counter = counter + 1
try:
self.disconnect()
self.connect()
except Exception as e:
if counter == attempts:
msg = "Can not reconnect to MySQL after {} "\
"attempt(s): {}".format(attempts, str(e))
raise errors.InterfaceError(msg)
if delay > 0:
time.sleep(delay)
def ping(self, reconnect=False, attempts=1, delay=0):
"""Check availability to the MySQL server
When reconnect is set to True, one or more attempts are made to try
to reconnect to the MySQL server using the reconnect()-method.
delay is the number of seconds to wait between each retry.
When the connection is not available, an InterfaceError is raised. Use
the is_connected()-method if you just want to check the connection
without raising an error.
Raises InterfaceError on errors.
"""
try:
self.cmd_ping()
except:
if reconnect:
self.reconnect(attempts=attempts, delay=delay)
else:
raise errors.InterfaceError("Connection to MySQL is"
" not available.")
def set_converter_class(self, convclass):
"""
Set the converter class to be used. This should be a class overloading
methods and members of conversion.MySQLConverter.
"""
charset_name = CharacterSet.get_info(self._charset_id)[0]
self._converter_class = convclass
self.converter = convclass(charset_name, self._use_unicode)
def get_server_version(self):
"""Get the MySQL version
This method returns the MySQL server version as a tuple. If not
perviously connected, it will return None.
Returns a tuple or None.
"""
return self._server_version
def get_server_info(self):
"""Get the original MySQL version information
This method returns the original MySQL server as text. If not
previously connected, it will return None.
Returns a string or None.
"""
try:
return self._handshake['server_version_original']
except (TypeError, KeyError):
return None
@property
def connection_id(self):
"""MySQL connection ID"""
try:
return self._handshake['server_threadid']
except KeyError:
return None
def set_login(self, username=None, password=None):
"""Set login information for MySQL
Set the username and/or password for the user connecting to
the MySQL Server.
"""
if username is not None:
self._user = username.strip()
else:
self._user = ''
if password is not None:
self._password = password.strip()
else:
self._password = ''
def set_unicode(self, value=True):
"""Toggle unicode mode
Set whether we return string fields as unicode or not.
Default is True.
"""
self._use_unicode = value
if self.converter:
self.converter.set_unicode(value)
def set_charset_collation(self, charset=None, collation=None):
"""Sets the character set and collation for the current connection
This method sets the character set and collation to be used for
the current connection. The charset argument can be either the
name of a character set as a string, or the numerical equivalent
as defined in constants.CharacterSet.
When the collation is not given, the default will be looked up and
used.
For example, the following will set the collation for the latin1
character set to latin1_general_ci:
set_charset('latin1','latin1_general_ci')
"""
if charset:
if isinstance(charset, int):
self._charset_id = charset
(charset_name, collation_name) = CharacterSet.get_info(
self._charset_id)
elif isinstance(charset, str):
(self._charset_id, charset_name, collation_name) = \
CharacterSet.get_charset_info(charset, collation)
else:
raise ValueError(
"charset should be either integer, string or None")
elif collation:
(self._charset_id, charset_name, collation_name) = \
CharacterSet.get_charset_info(collation=collation)
self._execute_query("SET NAMES '{}' COLLATE '{}'".format(
charset_name, collation_name))
self.converter.set_charset(charset_name)
@property
def charset(self):
"""Returns the character set for current connection
This property returns the character set name of the current connection.
The server is queried when the connection is active. If not connected,
the configured character set name is returned.
Returns a string.
"""
return CharacterSet.get_info(self._charset_id)[0]
@property
def collation(self):
"""Returns the collation for current connection
This property returns the collation name of the current connection.
The server is queried when the connection is active. If not connected,
the configured collation name is returned.
Returns a string.
"""
return CharacterSet.get_charset_info(self._charset_id)[2]
def set_client_flags(self, flags):
"""Set the client flags
The flags-argument can be either an int or a list (or tuple) of
ClientFlag-values. If it is an integer, it will set client_flags
to flags as is.
If flags is a list (or tuple), each flag will be set or unset
when it's negative.
set_client_flags([ClientFlag.FOUND_ROWS,-ClientFlag.LONG_FLAG])
Raises ProgrammingError when the flags argument is not a set or
an integer bigger than 0.
Returns self.client_flags
"""
if isinstance(flags, int) and flags > 0:
self._client_flags = flags
elif isinstance(flags,(tuple, list)):
for flag in flags:
if flag < 0:
self._client_flags &= ~abs(flag)
else:
self._client_flags |= flag
else:
raise errors.ProgrammingError(
"set_client_flags expect integer (>0) or set")
return self._client_flags
def isset_client_flag(self, flag):
"""Check if a client flag is set"""
if (self._client_flags & flag) > 0:
return True
return False
@property
def user(self):
"""User used while connecting to MySQL"""
return self._user
@property
def server_host(self):
"""MySQL server IP address or name"""
return self._host
@property
def server_port(self):
"MySQL server TCP/IP port"
return self._port
@property
def unix_socket(self):
"MySQL Unix socket file location"
return self._unix_socket
def _set_unread_result(self, toggle):
"""Set whether there is an unread result
This method is used by cursors to let other cursors know there is
still a result set that needs to be retrieved.
Raises ValueError on errors.
"""
if not isinstance(toggle, bool):
raise ValueError("Expected a boolean type")
self._unread_result = toggle
def _get_unread_result(self):
"""Get whether there is an unread result
This method is used by cursors to check whether another cursor still
needs to retrieve its result set.
Returns True, or False when there is no unread result.
"""
return self._unread_result
unread_result = property(_get_unread_result, _set_unread_result,
doc="Unread result for this MySQL connection")
def set_database(self, value):
"""Set the current database"""
self.cmd_query("USE %s" % value)
def get_database(self):
"""Get the current database"""
return self._info_query("SELECT DATABASE()")[0]
database = property(get_database, set_database,
doc="Current database")
def set_time_zone(self, value):
"""Set the time zone"""
self.cmd_query("SET @@session.time_zone = '{}'".format(value))
self._time_zone = value
def get_time_zone(self):
"""Get the current time zone"""
return self._info_query("SELECT @@session.time_zone")[0]
time_zone = property(get_time_zone, set_time_zone,
doc="time_zone value for current MySQL session")
def set_sql_mode(self, value):
"""Set the SQL mode
This method sets the SQL Mode for the current connection. The value
argument can be either a string with comma sepearate mode names, or
a sequence of mode names.
It is good practice to use the constants class SQLMode:
from mysql.connector.constants import SQLMode
cnx.sql_mode = [SQLMode.NO_ZERO_DATE, SQLMode.REAL_AS_FLOAT]
"""
if isinstance(value, (list, tuple)):
value = ','.join(value)
self.cmd_query("SET @@session.sql_mode = '{}'".format(value))
self._sql_mode = value
def get_sql_mode(self):
"""Get the SQL mode"""
return self._info_query("SELECT @@session.sql_mode")[0]
sql_mode = property(get_sql_mode, set_sql_mode,
doc="sql_mode value for current MySQL session")
def set_autocommit(self, value):
"""Toggle autocommit"""
switch = 'ON' if value else 'OFF'
self._execute_query("SET @@session.autocommit = {}".format(switch))
def get_autocommit(self):
"""Get whether autocommit is on or off"""
value = self._info_query("SELECT @@session.autocommit")[0]
return True if value == 1 else False
autocommit = property(get_autocommit, set_autocommit,
doc="autocommit value for current MySQL session")
def _set_getwarnings(self, toggle):
"""Set whether warnings should be automatically retrieved
The toggle-argument must be a boolean. When True, cursors for this
connection will retrieve information about warnings (if any).
Raises ValueError on error.
"""
if not isinstance(toggle, bool):
raise ValueError("Expected a boolean type")
self._get_warnings = toggle
def _get_getwarnings(self):
"""Get whether this connection retrieves warnings automatically
This method returns whether this connection retrieves warnings
automatically.
Returns True, or False when warnings are not retrieved.
"""
return self._get_warnings
get_warnings = property(_get_getwarnings, _set_getwarnings,
doc="Toggle and check wheter to retrieve "\
"warnings automatically")
def _set_raise_on_warnings(self, toggle):
"""Set whether warnings raise an error
The toggle-argument must be a boolean. When True, cursors for this
connection will raise an error when MySQL reports warnings.
Raising on warnings implies retrieving warnings automatically. In
other words: warnings will be set to True. If set to False, warnings
will be also set to False.
Raises ValueError on error.
"""
if not isinstance(toggle, bool):
raise ValueError("Expected a boolean type")
self._raise_on_warnings = toggle
self._get_warnings = toggle
def _get_raise_on_warnings(self):
"""Get whether this connection raises an error on warnings
This method returns whether this connection will raise errors when
MySQL reports warnings.
Returns True or False.
"""
return self._raise_on_warnings
raise_on_warnings = property(_get_raise_on_warnings,
_set_raise_on_warnings,
doc="Toggle wheter to raise on warnings "\
"(emplies retrieving warnings).")
def close(self):
self.disconnect()
def cursor(self, buffered=None, raw=None, cursor_class=None):
"""Instantiates and returns a cursor
By default, MySQLCursor is returned. Depending on the options
while connecting, a buffered and/or raw cursor instantiated
instead.
It is possible to also give a custom cursor through the
cursor_class paramter, but it needs to be a subclass of
mysql.connector.cursor.CursorBase.
Returns a cursor-object
"""
if not self.is_connected():
raise errors.OperationalError("MySQL Connection not available.")
if cursor_class is not None:
if not issubclass(cursor_class, CursorBase):
raise errors.ProgrammingError(
"Cursor class needs be to subclass of cursor.CursorBase")
return (cursor_class)(self)
buffered = buffered or self._buffered
raw = raw or self._raw
cursor_type = 0
if buffered is True:
cursor_type |= 1
if raw is True:
cursor_type |= 2
types = (
MySQLCursor, # 0
MySQLCursorBuffered,
MySQLCursorRaw,
MySQLCursorBufferedRaw,
)
return (types[cursor_type])(self)
def commit(self):
"""Commit current transaction"""
self._execute_query("COMMIT")
def rollback(self):
"""Rollback current transaction"""
self._execute_query("ROLLBACK")
def _execute_query(self, query):
"""Execute a query
This method simply calles cmd_query() after checking for unread
result. If there are still unread result, an errors.InterfaceError
is raised. Otherwise whatever cmd_query() returns is returned.
Returns a dict()
"""
if self._unread_result is True:
raise errors.InternalError("Unread result found.")
self.cmd_query(query)
def _info_query(self, query):
"""Send a query which only returns 1 row"""
cursor = self.cursor(buffered=True)
cursor.execute(query)
return cursor.fetchone()
|
mitchcapper/mythbox
|
resources/lib/mysql-connector-python/python3/mysql/connector/connection.py
|
Python
|
gpl-2.0
| 39,855
|
"""
Gcodec is a collection of utilities to decode and encode gcode.
To run gcodec, install python 2.x on your machine, which is avaliable from http://www.python.org/download/
Then in the folder which gcodec is in, type 'python' in a shell to run the python interpreter. Finally type 'from gcodec import *' to import this program.
Below is an example of gcodec use. This example is run in a terminal in the folder which contains gcodec and Screw Holder Bottom_export.gcode.
>>> from gcodec import *
>>> getFileText('Screw Holder Bottom_export.gcode')
'G90\nG21\nM103\nM105\nM106\nM110 S60.0\nM111 S30.0\nM108 S210.0\nM104 S235.0\nG1 X0.37 Y-4.07 Z1.9 F60.0\nM101\n
..
many lines of text
..
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
import cStringIO
import math
import os
import sys
import traceback
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def addLineAndNewlineIfNecessary(line, output):
'Add the line and if the line does not end with a newline add a newline.'
output.write(line)
if len(line) < 1:
return
if not line.endswith('\n'):
output.write('\n')
def addLinesToCString(cString, lines):
'Add lines which have something to cStringIO.'
for line in lines:
if line != '':
cString.write(line + '\n')
def getArcDistance(relativeLocation, splitLine):
'Get arc distance.'
halfPlaneLineDistance = 0.5 * abs(relativeLocation.dropAxis())
radius = getDoubleFromCharacterSplitLine('R', splitLine)
if radius == None:
iFloat = getDoubleFromCharacterSplitLine('I', splitLine)
jFloat = getDoubleFromCharacterSplitLine('J', splitLine)
radius = abs(complex(iFloat, jFloat))
angle = 0.0
if radius > 0.0:
halfPlaneLineDistanceOverRadius = halfPlaneLineDistance / radius
if halfPlaneLineDistance < radius:
angle = 2.0 * math.asin(halfPlaneLineDistanceOverRadius)
else:
angle = math.pi * halfPlaneLineDistanceOverRadius
return abs(complex(angle * radius, relativeLocation.z))
def getDoubleAfterFirstLetter(word):
'Get the double value of the word after the first letter.'
return float(word[1 :])
def getDoubleForLetter(letter, splitLine):
'Get the double value of the word after the first occurence of the letter in the split line.'
return getDoubleAfterFirstLetter(splitLine[getIndexOfStartingWithSecond(letter, splitLine)])
def getDoubleFromCharacterSplitLine(character, splitLine):
'Get the double value of the string after the first occurence of the character in the split line.'
indexOfCharacter = getIndexOfStartingWithSecond(character, splitLine)
if indexOfCharacter < 0:
return None
floatString = splitLine[indexOfCharacter][1 :]
try:
return float(floatString)
except ValueError:
return None
def getDoubleFromCharacterSplitLineValue(character, splitLine, value):
'Get the double value of the string after the first occurence of the character in the split line, if it does not exist return the value.'
splitLineFloat = getDoubleFromCharacterSplitLine(character, splitLine)
if splitLineFloat == None:
return value
return splitLineFloat
def getFeedRateMinute(feedRateMinute, splitLine):
'Get the feed rate per minute if the split line has a feed rate.'
indexOfF = getIndexOfStartingWithSecond('F', splitLine)
if indexOfF > 0:
return getDoubleAfterFirstLetter( splitLine[indexOfF] )
return feedRateMinute
def getFirstWord(splitLine):
'Get the first word of a split line.'
if len(splitLine) > 0:
return splitLine[0]
return ''
def getFirstWordFromLine(line):
'Get the first word of a line.'
return getFirstWord(line.split())
def getGcodeFileText(fileName, gcodeText):
'Get the gcode text from a file if it the gcode text is empty and if the file is a gcode file.'
if gcodeText != '':
return gcodeText
if fileName.endswith('.gcode'):
return archive.getFileText(fileName)
return ''
def getIndexOfStartingWithSecond(letter, splitLine):
'Get index of the first occurence of the given letter in the split line, starting with the second word. Return - 1 if letter is not found'
for wordIndex in xrange( 1, len(splitLine) ):
word = splitLine[ wordIndex ]
firstLetter = word[0]
if firstLetter == letter:
return wordIndex
return - 1
def getLineWithValueString(character, line, splitLine, valueString):
'Get the line with a valueString.'
roundedValueString = character + valueString
indexOfValue = getIndexOfStartingWithSecond(character, splitLine)
if indexOfValue == - 1:
return line + ' ' + roundedValueString
word = splitLine[indexOfValue]
return line.replace(word, roundedValueString)
def getLocationFromSplitLine(oldLocation, splitLine):
'Get the location from the split line.'
if oldLocation == None:
oldLocation = Vector3()
return Vector3(
getDoubleFromCharacterSplitLineValue('X', splitLine, oldLocation.x),
getDoubleFromCharacterSplitLineValue('Y', splitLine, oldLocation.y),
getDoubleFromCharacterSplitLineValue('Z', splitLine, oldLocation.z))
def getSplitLineBeforeBracketSemicolon(line):
'Get the split line before a bracket or semicolon.'
line = line.split(';')[0]
bracketIndex = line.find('(')
if bracketIndex > 0:
return line[: bracketIndex].split()
return line.split()
def getStringFromCharacterSplitLine(character, splitLine):
'Get the string after the first occurence of the character in the split line.'
indexOfCharacter = getIndexOfStartingWithSecond(character, splitLine)
if indexOfCharacter < 0:
return None
return splitLine[indexOfCharacter][1 :]
def getWithoutBracketsEqualTab(line):
'Get a string without the greater than sign, the bracket and less than sign, the equal sign or the tab.'
line = line.replace('=', ' ')
line = line.replace('(<', '')
line = line.replace('>', '')
return line.replace('\t', '')
def isProcedureDone(gcodeText, procedure):
'Determine if the procedure has been done on the gcode text.'
if gcodeText == '':
return False
lines = archive.getTextLines(gcodeText)
for line in lines:
withoutBracketsEqualTabQuotes = getWithoutBracketsEqualTab(line).replace('"', '').replace("'", '')
splitLine = getWithoutBracketsEqualTab( withoutBracketsEqualTabQuotes ).split()
firstWord = getFirstWord(splitLine)
if firstWord == 'procedureName':
if splitLine[1].find(procedure) != -1:
return True
elif firstWord == 'extrusionStart':
return False
procedureIndex = line.find(procedure)
if procedureIndex != -1:
if 'procedureName' in splitLine:
nextIndex = splitLine.index('procedureName') + 1
if nextIndex < len(splitLine):
nextWordSplit = splitLine[nextIndex].split(',')
if procedure in nextWordSplit:
return True
return False
def isProcedureDoneOrFileIsEmpty(gcodeText, procedure):
'Determine if the procedure has been done on the gcode text or the file is empty.'
if gcodeText == '':
return True
return isProcedureDone(gcodeText, procedure)
def getFirstWordIndexReverse(firstWord, lines, startIndex):
'Parse gcode in reverse order until the first word if there is one, otherwise return -1.'
for lineIndex in xrange(len(lines) - 1, startIndex - 1, -1):
if firstWord == getFirstWord(getSplitLineBeforeBracketSemicolon(lines[lineIndex])):
return lineIndex
return -1
def isThereAFirstWord(firstWord, lines, startIndex):
'Parse gcode until the first word if there is one.'
for lineIndex in xrange(startIndex, len(lines)):
line = lines[lineIndex]
splitLine = getSplitLineBeforeBracketSemicolon(line)
if firstWord == getFirstWord(splitLine):
return True
return False
class BoundingRectangle:
'A class to get the corners of a gcode text.'
def getFromGcodeLines(self, lines, radius):
'Parse gcode text and get the minimum and maximum corners.'
self.cornerMaximum = complex(-987654321.0, -987654321.0)
self.cornerMinimum = complex(987654321.0, 987654321.0)
self.oldLocation = None
self.cornerRadius = complex(radius, radius)
for line in lines:
self.parseCorner(line)
return self
def isPointInside(self, point):
'Determine if the point is inside the bounding rectangle.'
return point.imag >= self.cornerMinimum.imag and point.imag <= self.cornerMaximum.imag and point.real >= self.cornerMinimum.real and point.real <= self.cornerMaximum.real
def parseCorner(self, line):
'Parse a gcode line and use the location to update the bounding corners.'
splitLine = getSplitLineBeforeBracketSemicolon(line)
firstWord = getFirstWord(splitLine)
if firstWord == '(<boundaryPoint>':
locationComplex = getLocationFromSplitLine(None, splitLine).dropAxis()
self.cornerMaximum = euclidean.getMaximum(self.cornerMaximum, locationComplex)
self.cornerMinimum = euclidean.getMinimum(self.cornerMinimum, locationComplex)
elif firstWord == 'G1':
location = getLocationFromSplitLine(self.oldLocation, splitLine)
locationComplex = location.dropAxis()
self.cornerMaximum = euclidean.getMaximum(self.cornerMaximum, locationComplex + self.cornerRadius)
self.cornerMinimum = euclidean.getMinimum(self.cornerMinimum, locationComplex - self.cornerRadius)
self.oldLocation = location
class DistanceFeedRate:
'A class to limit the z feed rate and round values.'
def __init__(self):
'Initialize.'
self.decimalPlacesCarried = 3
self.output = cStringIO.StringIO()
def addGcodeFromFeedRateThreadZ(self, feedRateMinute, thread, travelFeedRateMinute, z):
'Add a thread to the output.'
if len(thread) > 0:
self.addGcodeMovementZWithFeedRate(travelFeedRateMinute, thread[0], z)
else:
print('zero length vertex positions array which was skipped over, this should never happen.')
if len(thread) < 2:
print('thread of only one point in addGcodeFromFeedRateThreadZ in gcodec, this should never happen.')
print(thread)
return
self.addLine('M101') # Turn extruder on.
for point in thread[1 :]:
self.addGcodeMovementZWithFeedRate(feedRateMinute, point, z)
self.addLine('M103') # Turn extruder off.
def addGcodeFromLoop(self, loop, z):
'Add the gcode loop.'
euclidean.addSurroundingLoopBeginning(self, loop, z)
self.addPerimeterBlock(loop, z)
self.addLine('(</boundaryPerimeter>)')
self.addLine('(</nestedRing>)')
def addGcodeFromThreadZ(self, thread, z):
'Add a thread to the output.'
if len(thread) > 0:
self.addGcodeMovementZ(thread[0], z)
else:
print('zero length vertex positions array which was skipped over, this should never happen.')
if len(thread) < 2:
print('thread of only one point in addGcodeFromThreadZ in gcodec, this should never happen.')
print(thread)
return
self.addLine('M101') # Turn extruder on.
for point in thread[1 :]:
self.addGcodeMovementZ(point, z)
self.addLine('M103') # Turn extruder off.
def addGcodeMovementZ(self, point, z):
'Add a movement to the output.'
self.addLine(self.getLinearGcodeMovement(point, z))
def addGcodeMovementZWithFeedRate(self, feedRateMinute, point, z):
'Add a movement to the output.'
self.addLine(self.getLinearGcodeMovementWithFeedRate(feedRateMinute, point, z))
def addLine(self, line):
'Add a line of text and a newline to the output.'
if len(line) > 0:
self.output.write(line + '\n')
def addLines(self, lines):
'Add lines of text to the output.'
addLinesToCString(self.output, lines)
def addLinesSetAbsoluteDistanceMode(self, lines):
'Add lines of text to the output and ensure the absolute mode is set.'
if len(lines) < 1:
return
if len(lines[0]) < 1:
return
absoluteDistanceMode = True
self.addLine('(<alteration>)')
for line in lines:
splitLine = getSplitLineBeforeBracketSemicolon(line)
firstWord = getFirstWord(splitLine)
if firstWord == 'G90':
absoluteDistanceMode = True
elif firstWord == 'G91':
absoluteDistanceMode = False
self.addLine(line)
if not absoluteDistanceMode:
self.addLine('G90')
self.addLine('(</alteration>)')
def addParameter(self, firstWord, parameter):
'Add the parameter.'
self.addLine(firstWord + ' S' + euclidean.getRoundedToThreePlaces(parameter))
def addPerimeterBlock(self, loop, z):
'Add the perimeter gcode block for the loop.'
if len(loop) < 2:
return
if euclidean.isWiddershins(loop): # Indicate that a perimeter is beginning.
self.addLine('(<perimeter> outer )')
else:
self.addLine('(<perimeter> inner )')
self.addGcodeFromThreadZ(loop + [loop[0]], z)
self.addLine('(</perimeter>)') # Indicate that a perimeter is beginning.
def addTagBracketedLine(self, tagName, value):
'Add a begin tag, value and end tag.'
self.addLine('(<%s> %s </%s>)' % (tagName, value, tagName))
def addTagRoundedLine(self, tagName, value):
'Add a begin tag, rounded value and end tag.'
self.addLine('(<%s> %s </%s>)' % (tagName, self.getRounded(value), tagName))
def getBoundaryLine(self, location):
'Get boundary gcode line.'
return '(<boundaryPoint> X%s Y%s Z%s </boundaryPoint>)' % (self.getRounded(location.x), self.getRounded(location.y), self.getRounded(location.z))
def getFirstWordMovement(self, firstWord, location):
'Get the start of the arc line.'
return '%s X%s Y%s Z%s' % (firstWord, self.getRounded(location.x), self.getRounded(location.y), self.getRounded(location.z))
def getLinearGcodeMovement(self, point, z):
'Get a linear gcode movement.'
return 'G1 X%s Y%s Z%s' % ( self.getRounded( point.real ), self.getRounded( point.imag ), self.getRounded(z) )
def getLinearGcodeMovementWithFeedRate(self, feedRateMinute, point, z):
'Get a z limited gcode movement.'
linearGcodeMovement = self.getLinearGcodeMovement(point, z)
if feedRateMinute == None:
return linearGcodeMovement
return linearGcodeMovement + ' F' + self.getRounded(feedRateMinute)
def getLineWithFeedRate(self, feedRateMinute, line, splitLine):
'Get the line with a feed rate.'
return getLineWithValueString('F', line, splitLine, self.getRounded(feedRateMinute))
def getLineWithX(self, line, splitLine, x):
'Get the line with an x.'
return getLineWithValueString('X', line, splitLine, self.getRounded(x))
def getLineWithY(self, line, splitLine, y):
'Get the line with a y.'
return getLineWithValueString('Y', line, splitLine, self.getRounded(y))
def getLineWithZ(self, line, splitLine, z):
'Get the line with a z.'
return getLineWithValueString('Z', line, splitLine, self.getRounded(z))
def getRounded(self, number):
'Get number rounded to the number of carried decimal places as a string.'
return euclidean.getRoundedToPlacesString(self.decimalPlacesCarried, number)
def parseSplitLine(self, firstWord, splitLine):
'Parse gcode split line and store the parameters.'
firstWord = getWithoutBracketsEqualTab(firstWord)
if firstWord == 'decimalPlacesCarried':
self.decimalPlacesCarried = int(splitLine[1])
|
natetrue/ReplicatorG
|
skein_engines/skeinforge-40/fabmetheus_utilities/gcodec.py
|
Python
|
gpl-2.0
| 15,033
|
"""
Settings for the current site. This file is heavily based on David Cramer's
post on http://justcramer.com/2011/01/13/settings-in-django/
"""
import os
# Load deploy environment specific settings
DJANGO_DEPLOY_ENV = os.environ.get('DJANGO_DEPLOY_ENV', 'dev')
if DJANGO_DEPLOY_ENV != 'defaults':
module = __import__('siteconfig.deploy_envs.' + DJANGO_DEPLOY_ENV,
globals(), locals(), ['*'])
for k in dir(module):
if k[0] == '_':
continue
locals()[k] = getattr(module, k)
## Remove disabled apps
if 'DISABLED_APPS' in locals():
INSTALLED_APPS = [k for k in INSTALLED_APPS if k not in DISABLED_APPS]
MIDDLEWARE_CLASSES = list(MIDDLEWARE_CLASSES)
DATABASE_ROUTERS = list(DATABASE_ROUTERS)
TEMPLATE_CONTEXT_PROCESSORS = list(TEMPLATE_CONTEXT_PROCESSORS)
for a in DISABLED_APPS:
for x, m in enumerate(MIDDLEWARE_CLASSES):
if m.startswith(a):
MIDDLEWARE_CLASSES.pop(x)
for x, m in enumerate(TEMPLATE_CONTEXT_PROCESSORS):
if m.startswith(a):
TEMPLATE_CONTEXT_PROCESSORS.pop(x)
for x, m in enumerate(DATABASE_ROUTERS):
if m.startswith(a):
DATABASE_ROUTERS.pop(x)
|
brenoarosa/tweend
|
siteconfig/settings.py
|
Python
|
gpl-2.0
| 1,255
|
# Copyright (C) 2014 Equinor ASA, Norway.
#
# The file 'vector_template.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
"""
Typed vectors IntVector, DoubleVector and BoolVector.
This module implements a quite simple typed vector which will grow
transparently as needed. The vector is created with a default value,
which will be used for not explicitly set indices.
vec = IntVector( default_value = 66 )
vec[0] = 10
vec[2] = 10
After the 'vec[2] = 10' statement the vector has grown to contain
three elements. The element vec[1] has not been explicitly assigned by
the user, in that case the implementation has 'filled the hole' with
the default value (i.e. 66 in this case). So the statement
print(vec[1])
will give '66'. The main part of the implementation is in terms of an
"abstract base class" TVector. The TVector class should be not
instantiated directly, instead the child classes IntVector,
DoubleVector or BoolVector should be used.
The C-level has implementations for several fundamental types like
float and size_t not currently implemented in the Python version.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
from cwrap import CFILE, BaseCClass
class VectorTemplate(BaseCClass):
def strided_copy(self, slice_range):
"""
Will create a new copy according to @slice.
Mainly a support function to support slice based lookup like
v = IntVector()
v[0] = 1
v[1] = 1
v[100] = 100
...
c = v[0:100:10]
Now 'c' will be a Intvector() instance containing every tenth
element from 'v'.
"""
(start, stop, step) = slice_range.indices(len(self))
if stop > start:
return self._strided_copy(start, stop, step)
else:
return None
def __bool__(self):
"""
Will evaluate to False for empty vector.
"""
if len(self) == 0:
return False
else:
return True
def __nonzero__(self):
return self.__bool__( )
def __eq__(self, other):
return self._equal(other)
def __ne__(self,other):
return not self.__eq__(other)
def first_eq(self, other, offset = 0):
index = self._first_eq(other, offset)
if index <= -2:
raise ValueError("Invalid offset")
return index
def first_neq(self, other, offset = 0):
index = self._first_neq(other, offset)
if index <= -2:
raise ValueError("Invalid offset")
return index
def copy(self):
"""
Create a new copy of the current vector.
"""
new = self._alloc_copy( )
return new
def __irshift__(self,shift):
if shift < 0:
raise ValueError("The shift must be positive")
self._rshift(shift)
return self
def __ilshift__(self,shift):
if shift < 0:
raise ValueError("The shift must be positive")
if shift > len(self):
raise ValueError("The shift is too large %d > %d" % (shift, len(self)))
self._lshift( shift)
return self
def __rshift__(self,shift):
copy = self.copy()
copy >>= shift
return copy
def __lshift__(self,shift):
copy = self.copy()
copy <<= shift
return copy
def __deepcopy__(self, memo):
new = self.copy()
return new
def __init__(self, default_value=0, initial_size=0):
"""
Creates a new TVector instance.
"""
c_pointer = self._alloc(initial_size, default_value)
super(VectorTemplate, self).__init__(c_pointer)
self.element_size = self._element_size()
def __contains__(self , value):
return self._contains( value)
def pop(self):
if len(self) > 0:
return self._pop()
else:
raise ValueError("Trying to pop from empty vector")
def str_data(self, width, index1, index2, fmt):
"""
Helper function for str() method.
"""
data = []
s = ""
for index in range(index1, index2):
data.append(self[index])
for index in range(len(data)):
s += fmt % data[index]
if index % width == (width - 1):
s += "\n"
return s
# The str() method is a verbatim copy of the implementation in
# ecl_kw.py.
def str(self, width=5, max_lines=10, fmt=None):
"""
Return string representation of vector for pretty printing.
The function will return a string consisting of a header, and
then a chunk of data. The data will be formatted in @width
columns, and a maximum of @max_lines lines. If @max_lines is
not sufficient the first elements in the kewyord are
represented, a .... continuation line and then the last part
of the vector. If @max_lines is None all of the vector will be
printed, irrespectiv of how long it is.
If a value is given for @fmt that is used as format string for
each element, otherwise a type-specific default format is
used. If given the @fmt string should contain spacing between
the elements. The implementation of the builtin method
__str__() is based on this method.
"""
s = ""
lines = len(self) // width
if not fmt:
fmt = self.default_format + " "
if max_lines is None or lines <= max_lines:
s += self.str_data(width, 0, len(self), fmt)
else:
s1 = width * max_lines // 2
s += self.str_data(width, 0, s1, fmt)
s += " .... \n"
s += self.str_data(width, len(self) - s1, len(self), fmt)
return s
def __str__(self):
"""
Returns string representantion of vector.
"""
return self.str(max_lines=10, width=5)
def __getitem__(self, index):
"""
Implements read [] operator - @index can be slice instance.
"""
if isinstance(index, int):
length = len(self)
idx = index
if idx < 0:
idx += length
if 0 <= idx < length:
return self._iget(idx)
else:
raise IndexError('Index must be in range %d <= %d < %d.' % (0, index, length))
elif isinstance(index, slice):
return self.strided_copy(index)
else:
raise TypeError("Index should be integer or slice type.")
def __setitem__(self, index, value):
"""
Implements write [] operator - @index must be integer or slice.
"""
ls = len(self)
if isinstance(index, int):
idx = index
if idx < 0:
idx += ls
self._iset(idx, value)
elif isinstance( index, slice ):
for i in range(*index.indices(ls)):
self[i] = value
else:
raise TypeError("Index should be integer type")
##################################################################
# Mathematical operations:
def __IADD(self, delta, add):
"""
Low-level function implementing inplace add.
The __IADD__ function implements the operation:
v += a
The variable which is added, i.e. @delta, can either be of the
same type as the current instance, or a numerical scalar. The
__IADD__ method implements both add and subtract, based on the
boolean flag @add.
The __IADD__ method should not be called directly; but rather
via the __iadd__, __add__ and __radd__ methods which implement
the various addition operation, and the corresponding
subtraction operations: __isub__, __sub__ and __rsub__.
"""
if type(self) == type(delta):
if len(self) == len(delta):
# This is vector + vector operation.
if not add:
delta *= -1
self._inplace_add(delta)
else:
raise ValueError("Incompatible sizes for add self:%d other:%d" % (len(self), len(delta)))
else:
if isinstance(delta, int) or isinstance(delta, float):
if not add:
delta *= -1
self._shift(delta)
else:
raise TypeError("delta has wrong type:%s " % type(delta))
return self
def __iadd__(self, delta):
"""
Implements inplace add. @delta can be vector or scalar.
"""
return self.__IADD(delta, True)
def __isub__(self, delta):
"""
Implements inplace subtract. @delta can be vector or scalar.
"""
return self.__IADD(delta, False)
def __radd__(self, delta):
return self.__add__(delta)
def __add__(self, delta):
"""
Implements add operation - creating a new copy.
b = DoubleVector()
c = DoubleVector() // Or alternatively scalar
....
a = b + c
"""
copy = self._alloc_copy( )
copy += delta
return copy
def __sub__(self, delta):
"""
Implements subtraction - creating new copy.
"""
copy = self._alloc_copy( )
copy -= delta
return copy
def __rsub__(self, delta):
return self.__sub__(delta) * -1
def __imul__(self, factor):
"""
Low-level function implementing inplace multiplication.
The __IMUL__ function implements the operation:
v *= a
The variable which is multiplied in, i.e. @factor, can either
be of the same type as the current instance, or a numerical
scalar. The __IMUL__ method should not be called directly, but
rather via the __mul__, __imul__ and __rmul__ functions.
"""
if type(self) == type(factor):
# This is vector * vector operation.
if len(self) == len(factor):
self._inplace_mul(factor)
else:
raise ValueError("Incompatible sizes for mul self:%d other:%d" % (len(self), len(factor)))
else:
if isinstance(factor, int) or isinstance(factor, float):
self._scale(factor)
else:
raise TypeError("factor has wrong type:%s " % type(factor))
return self
def __mul__(self, factor):
copy = self._alloc_copy( )
copy *= factor
return copy
def __rmul__(self, factor):
return self.__mul__(factor)
def __div__(self, divisor):
if isinstance(divisor, int) or isinstance(divisor, float):
copy = self._alloc_copy()
copy._div(divisor)
return copy
else:
raise TypeError("Divisor has wrong type:%s" % type(divisor))
def __truediv__(self, divisor):
return self.__div__(divisor)
def __idiv__(self, divisor):
return self.__div__(divisor)
def __itruediv__(self, divisor):
return self.__div__(divisor)
# End mathematical operations
#################################################################
# Essentally implements a = b
def assign(self, value):
"""
Implements assignment of type a = b.
The @value parameter can either be a vector instance, in which
case the content of @value is copied into the current
instance, or a scalar in which case all the elements of the
vector are set to this value:
v1 = IntVector()
v2 = IntVector()
v1[10] = 77
v2.assign( v1 ) # Now v2 contains identicall content to v1
....
v1.assign( 66 ) # Now v1 is a vector of 11 elements - all equal to 66
"""
if type(self) == type(value):
# This is a copy operation
self._memcpy(value)
else:
if isinstance(value, int) or isinstance(value, float):
self._assign(value)
else:
raise TypeError("Value has wrong type")
def __len__(self):
"""
The number of elements in the vector.
"""
return self._size( )
def printf(self, fmt=None, name=None, stream=sys.stdout):
"""
See also the str() method which returns string representantion
of the vector.
"""
cfile = CFILE(stream)
if not fmt:
fmt = self.default_format
self._fprintf(cfile, name, fmt)
def max(self):
if len(self) > 0:
return self._get_max()
else:
raise IndexError("The vector is empty!")
def min(self):
if len(self) > 0:
return self._get_min()
else:
raise IndexError("The vector is empty!")
def minIndex(self, reverse=False):
"""
@type reverse: bool
@rtype: int
"""
if len(self) > 0:
return self._get_min_index(reverse)
else:
raise IndexError("The vector is empty!")
def maxIndex(self, reverse=False):
"""
@type reverse: bool
@rtype: int
"""
if len(self) > 0:
return self._get_max_index(reverse)
else:
raise IndexError("The vector is empty!")
def append(self, value):
self._append(value)
def deleteBlock(self, index, block_size):
"""
Remove a block of size @block_size starting at @index.
After the removal data will be left shifted.
"""
self._idel_block(index, block_size)
def sort(self, reverse=False):
"""
Sort the vector inplace in increasing numerical order or decreasing order if reverse is True.
@type reverse: bool
"""
if not reverse:
self._sort()
else:
self._rsort()
def clear(self):
self._reset()
def safeGetByIndex(self, index):
return self._safe_iget(index)
def setReadOnly(self, read_only):
self._set_read_only(read_only)
def getReadOnly(self):
return self._get_read_only()
def setDefault(self, value):
self._set_default(value)
def getDefault(self):
return self._get_default()
def free(self):
self._free()
def __repr__(self):
return self._create_repr('size = %d' % len(self))
def permute(self, permutation_vector):
"""
Reorders this vector based on the indexes in permutation_vector.
@type permutation_vector: PermutationVector
"""
self._permute( permutation_vector)
def permutationSort(self, reverse=False):
"""
Returns the permutation vector for sorting of this vector. Vector is not sorted.
@type reverse: bool
@@rtype: PermutationVector
"""
if len(self) > 0:
if not reverse:
return self._sort_perm()
else:
return self._rsort_perm()
return None
def asList(self):
l = [0] * len(self)
for (index,value) in enumerate(self):
l[index] = value
return l
def selectUnique(self):
self._select_unique()
def elementSum(self):
return self._element_sum( )
def getDataPtr(self):
"Low level function which returns a pointer to underlying storage"
# Observe that the get_data_ptr() function is not implemented
# for the TimeVector class.
return self._get_data_ptr()
def countEqual(self , value):
return self._count_equal( value )
def initRange(self , min_value , max_value , delta):
"""
Will fill the vector with the values from min_value to
max_value in steps of delta. The upper limit is guaranteed to
be inclusive, even if it is not commensurable with the delta.
"""
if delta == 0:
raise ValueError("Invalid range")
else:
self._init_range( min_value , max_value , delta )
@classmethod
def create_linear(cls, start_value, end_value, num_values):
vector = cls()
if not vector._init_linear(start_value, end_value, num_values):
raise ValueError("init_linear arguments invalid")
return vector
@classmethod
def createRange(cls , min_value , max_value , delta):
"""
Will create new vector and initialize a range.
"""
vector = cls( )
vector.initRange( min_value , max_value , delta )
return vector
def _strided_copy(self, *_):
raise NotImplementedError()
def _rshift(self, *_):
raise NotImplementedError()
def _lshift(self, *_):
raise NotImplementedError()
def _alloc(self, *_):
raise NotImplementedError()
def _element_size(self, *_):
raise NotImplementedError()
def _contains(self, *_):
raise NotImplementedError()
def _pop(self, *_):
raise NotImplementedError()
def default_format(self, *_):
raise NotImplementedError()
def _iget(self, *_):
raise NotImplementedError()
def _iset(self, *_):
raise NotImplementedError()
def _inplace_add(self, *_):
raise NotImplementedError()
def _shift(self, *_):
raise NotImplementedError()
def _alloc_copy(self, *_):
raise NotImplementedError()
def _inplace_mul(self, *_):
raise NotImplementedError()
def _scale(self, *_):
raise NotImplementedError()
def _memcpy(self, *_):
raise NotImplementedError()
def _assign(self, *_):
raise NotImplementedError()
def _size(self, *_):
raise NotImplementedError()
def _fprintf(self, *_):
raise NotImplementedError()
def _get_max(self, *_):
raise NotImplementedError()
def _get_min(self, *_):
raise NotImplementedError()
def _get_min_index(self, *_):
raise NotImplementedError()
def _get_max_index(self, *_):
raise NotImplementedError()
def _append(self, *_):
raise NotImplementedError()
def _idel_block(self, *_):
raise NotImplementedError()
def _sort(self, *_):
raise NotImplementedError()
def _rsort(self, *_):
raise NotImplementedError()
def _reset(self, *_):
raise NotImplementedError()
def _safe_iget(self, *_):
raise NotImplementedError()
def _set_read_only(self, *_):
raise NotImplementedError()
def _get_read_only(self, *_):
raise NotImplementedError()
def _set_default(self, *_):
raise NotImplementedError()
def _get_default(self, *_):
raise NotImplementedError()
def _free(self, *_):
raise NotImplementedError()
def _permute(self, *_):
raise NotImplementedError()
def _sort_perm(self, *_):
raise NotImplementedError()
def _rsort_perm(self, *_):
raise NotImplementedError()
def _select_unique(self, *_):
raise NotImplementedError()
def _element_sum(self, *_):
raise NotImplementedError()
def _get_data_ptr(self, *_):
raise NotImplementedError()
def _count_equal(self, *_):
raise NotImplementedError()
def _init_range(self, *_):
raise NotImplementedError()
|
Statoil/libecl
|
python/ecl/util/util/vector_template.py
|
Python
|
gpl-3.0
| 20,059
|
import types
from os import path
from PyQt5.QtWidgets import QFrame, QHeaderView
from PyQt5.QtGui import QPixmap
from gui.view.gen.ui_AppMainWindow import Ui_MainWindow
from golem.core.common import get_golem_path
from mainwindow import MainWindow
class AppMainWindow(object):
def __init__(self):
self.window = MainWindow()
self.ui = Ui_MainWindow()
self.ui.setupUi(self.window)
table = self.ui.taskTableWidget
header = table.horizontalHeader()
header.setStretchLastSection(True)
header.setSectionResizeMode(QHeaderView.Interactive)
def window_resize(instance, event):
column_count = table.columnCount()
column_width = int(table.width() / column_count)
for n in range(column_count):
width = column_width
if n == column_count - 1:
width -= 1 # hide the horizontal scrollbar
table.setColumnWidth(n, width)
super(MainWindow, instance).resizeEvent(event)
self.window.resizeEvent = types.MethodType(window_resize, self.window, MainWindow)
self.ui.previewLabel.setFrameStyle(QFrame.NoFrame)
self.ui.previewLabel.setPixmap(QPixmap(path.join(get_golem_path(), "gui", "view", "nopreview.png")))
self.__new_task_buttons = [
self.ui.showAdvanceNewTaskButton,
self.ui.addResourceButton,
self.ui.saveButton,
self.ui.loadButton,
self.ui.taskTypeComboBox,
]
self.__recount_buttons = [
self.ui.recountBlenderButton,
self.ui.recountButton,
self.ui.recountLuxButton,
self.ui.settingsOkButton,
self.ui.settingsCancelButton,
]
self.__style_sheet = "color: black"
def show(self):
self.window.show()
self.window.resizeEvent(None)
def setEnabled(self, tab_name, enable):
"""
Enable or disable buttons on the 'New task' or 'Provider' tab
:param tab_name: Tab name. Available values: 'new_task' and 'recount'
:param enable: enable if True, disable otherwise
"""
tab_name = tab_name.lower()
if tab_name == 'new_task':
self.__set_enabled(self.__new_task_buttons, enable)
if enable and self.__style_sheet is not None:
self.ui.startTaskButton.setStyleSheet(self.__style_sheet)
elif tab_name == 'settings':
self.ui.settingsOkButton.setEnabled(enable)
self.ui.settingsCancelButton.setEnabled(enable)
elif tab_name == 'recount':
self.__set_enabled(self.__recount_buttons, enable)
def __set_enabled(self, elements, enable):
"""
Enable or disable buttons
:param elements: UI elements to be enabled or disabled
:param enable: enable if True, disable otherwise
"""
for element in elements:
element.setEnabled(enable)
if enable and self.__style_sheet is not None:
element.setStyleSheet(self.__style_sheet)
|
Radagast-red/golem
|
gui/view/appmainwindow.py
|
Python
|
gpl-3.0
| 3,164
|
# -*- coding: utf-8 -*-
"""Copyright (c) 2012 Sergio Gabriel Teves
All rights reserved.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django.db import models
from django.utils.encoding import smart_unicode
class ExceptionLog(models.Model):
date = models.DateTimeField(auto_now_add=True, db_index=True)
exception = models.TextField(null=True, blank=True)
stacktrace = models.TextField(null=True, blank=True)
request = models.TextField(null=True, blank=True)
def __unicode__(self):
return '%s - %s' % (smart_unicode(self.date),smart_unicode(self.exception))
def __repr__(self):
return smart_unicode(self)
class Meta:
ordering = ('-date',)
|
dahool/vertaal
|
exceptionlogger/models.py
|
Python
|
gpl-3.0
| 1,327
|
import os
import shutil
import re
class PackagePath(object):
def __init__(self, package_name):
self.name = package_name
def generate(self):
return self.name.replace('.', '/')
class FileOperation(object):
def __init__(self, src, dest):
self.src = src
self.dest = dest
def make_directory_if_not_exist(self, iter):
for name in iter:
path = "%s/%s" % (self.dest, name)
try:
os.makedirs(path)
except OSError, e:
if e.errno == 17:
print "%s already exists, ignoring" % path
return self
def copy_file(self, map):
for srcname, destname in map.iteritems():
shutil.copyfile("%s/%s" % (self.src, srcname), "%s/%s" % (self.dest, destname))
return self
def transform(self, pattern, repl, iter):
matcher = re.compile(pattern)
for name in iter:
path = "%s/%s" % (self.dest, name)
scratch = "%s.tmp" % path
with open(path, "r") as src_file:
with open(scratch, "w") as dest_file:
for l in src_file:
dest_file.write(matcher.sub(repl, l))
shutil.move(scratch, path)
return self
if __name__ == '__main__':
import sys
try:
src = os.path.dirname(sys.argv[0])
dest = sys.argv[1]
package = sys.argv[2]
except IndexError:
print "usage: %s <project path> <package name>" % sys.argv[0]
sys.exit(1)
package_path = PackagePath(package).generate()
FileOperation(src, dest).make_directory_if_not_exist((
'test/unit/libs',
'test/unit/src/%s' % package_path
))
FileOperation(src, dest).copy_file({
"custom_rules.template.xml": "custom_rules.xml",
"runner-template.properties": "runner.properties",
"test-unit-build.template.xml": "test/unit/build.xml",
"test-unit-TestRunner.template.java": "test/unit/src/%s/TestRunner.java" % package_path
})
FileOperation(src, dest).transform(
r'^#runner.at=.*$',
'runner.at=%s' % os.path.dirname(sys.argv[0]),
("runner.properties", )
)
FileOperation(src, dest).transform(
r'x\.y\.z', package,
("test/unit/src/%s/TestRunner.java" % package_path, )
)
print """\
Test environment has been set up for %(package)s! Next:
1. Check and fix runner.properties
2. 'ant test-unit-clean' to start unit tests
3a. 'ant test-integ-clean' to start integration tests
3b. 'ant test-func-clean' to start functional tests
3c. 'ant test-accept-clean' to start acceptance tests
(NB. you need to create a test-project beforehand to do these)
""" % dict(package=package, package_path=package_path)
|
taky/lucene
|
etc/runner/bootstrap.py
|
Python
|
gpl-3.0
| 2,792
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import numpy as np
import bpy
from bpy.props import IntProperty, FloatProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, match_long_repeat
def hilbert(step, n):
def hilbert3(n):
if (n <= 0):
x, y, z = 0, 0, 0
else:
[xo, yo, zo] = hilbert3(n-1)
x = step * .5 * np.array([.5+zo, .5+yo, -.5+yo, -.5-xo, -.5-xo, -.5-yo, .5-yo, .5+zo])
y = step * .5 * np.array([.5+xo, .5+zo, .5+zo, .5+yo, -.5+yo, -.5-zo, -.5-zo, -.5-xo])
z = step * .5 * np.array([.5+yo, -.5+xo, -.5+xo, .5-zo, .5-zo, -.5+xo, -.5+xo, .5-yo])
return [x, y, z]
vx, vy, vz = hilbert3(n)
vx = vx.flatten().tolist()
vy = vy.flatten().tolist()
vz = vz.flatten().tolist()
verts = list(zip(vx, vy, vz))
return verts
class Hilbert3dNode(bpy.types.Node, SverchCustomTreeNode):
''' Hilbert3d line '''
bl_idname = 'Hilbert3dNode'
bl_label = 'Hilbert3d'
bl_icon = 'OUTLINER_OB_EMPTY'
sv_icon = 'SV_HILBERT_3D'
level_: IntProperty(
name='level', description='Level',
default=2, min=1, max=5, update=updateNode)
size_: FloatProperty(
name='size', description='Size',
default=1.0, min=0.1, update=updateNode)
def sv_init(self, context):
self.inputs.new('SvStringsSocket', "Level").prop_name = 'level_'
self.inputs.new('SvStringsSocket', "Size").prop_name = 'size_'
self.outputs.new('SvVerticesSocket', "Vertices")
self.outputs.new('SvStringsSocket', "Edges")
def process(self):
level_socket, size_socket = self.inputs
verts_socket, edges_socket = self.outputs
if verts_socket.is_linked:
Integer = level_socket.sv_get()[0]
Step = size_socket.sv_get()[0]
# make verts
Integer, Step = match_long_repeat((Integer, Step))
verts = []
for lev, siz in zip(Integer, Step):
verts.append(hilbert(siz, int(lev)))
verts_socket.sv_set(verts)
# make associated edge lists
if edges_socket.is_linked:
listEdg = []
for ve in verts:
listEdg.append([(i, i+1) for i in range(len(ve) - 1)])
edges_socket.sv_set(listEdg)
def register():
bpy.utils.register_class(Hilbert3dNode)
def unregister():
bpy.utils.unregister_class(Hilbert3dNode)
|
DolphinDream/sverchok
|
nodes/generators_extended/hilbert3d.py
|
Python
|
gpl-3.0
| 3,268
|
def test(msgtype, flags):
if flags == 1:
msgtype = 1
elif flags == 2:
msgtype = 2
elif flags == 3:
msgtype = 3
return msgtype
|
zrax/pycdc
|
tests/input/if_elif_else.py
|
Python
|
gpl-3.0
| 166
|
"""
Copyright 2008-2011 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
from . import odict
from . Constants import ADVANCED_PARAM_TAB, DEFAULT_PARAM_TAB
from Element import Element
from Cheetah.Template import Template
from UserDict import UserDict
from itertools import imap
class TemplateArg(UserDict):
"""
A cheetah template argument created from a param.
The str of this class evaluates to the param's to code method.
The use of this class as a dictionary (enum only) will reveal the enum opts.
The __call__ or () method can return the param evaluated to a raw python data type.
"""
def __init__(self, param):
UserDict.__init__(self)
self._param = param
if param.is_enum():
for key in param.get_opt_keys():
self[key] = str(param.get_opt(key))
def __str__(self):
return str(self._param.to_code())
def __call__(self):
return self._param.get_evaluated()
def _get_keys(lst): return [elem.get_key() for elem in lst]
def _get_elem(lst, key):
try: return lst[_get_keys(lst).index(key)]
except ValueError: raise ValueError, 'Key "%s" not found in %s.'%(key, _get_keys(lst))
class Block(Element):
def __init__(self, flow_graph, n):
"""
Make a new block from nested data.
Args:
flow: graph the parent element
n: the nested odict
Returns:
block a new block
"""
#build the block
Element.__init__(self, flow_graph)
#grab the data
params = n.findall('param')
sources = n.findall('source')
sinks = n.findall('sink')
self._name = n.find('name')
self._key = n.find('key')
self._category = n.find('category') or ''
self._grc_source = n.find('grc_source') or ''
self._block_wrapper_path = n.find('block_wrapper_path')
self._bussify_sink = n.find('bus_sink')
self._bussify_source = n.find('bus_source')
self._var_value = n.find('var_value') or '$value'
# get list of param tabs
n_tabs = n.find('param_tab_order') or None
self._param_tab_labels = n_tabs.findall('tab') if n_tabs is not None else [DEFAULT_PARAM_TAB]
#create the param objects
self._params = list()
#add the id param
self.get_params().append(self.get_parent().get_parent().Param(
block=self,
n=odict({
'name': 'ID',
'key': 'id',
'type': 'id',
})
))
self.get_params().append(self.get_parent().get_parent().Param(
block=self,
n=odict({
'name': 'Enabled',
'key': '_enabled',
'type': 'raw',
'value': 'True',
'hide': 'all',
})
))
for param in imap(lambda n: self.get_parent().get_parent().Param(block=self, n=n), params):
key = param.get_key()
#test against repeated keys
if key in self.get_param_keys():
raise Exception, 'Key "%s" already exists in params'%key
#store the param
self.get_params().append(param)
#create the source objects
self._sources = list()
for source in map(lambda n: self.get_parent().get_parent().Port(block=self, n=n, dir='source'), sources):
key = source.get_key()
#test against repeated keys
if key in self.get_source_keys():
raise Exception, 'Key "%s" already exists in sources'%key
#store the port
self.get_sources().append(source)
self.back_ofthe_bus(self.get_sources())
#create the sink objects
self._sinks = list()
for sink in map(lambda n: self.get_parent().get_parent().Port(block=self, n=n, dir='sink'), sinks):
key = sink.get_key()
#test against repeated keys
if key in self.get_sink_keys():
raise Exception, 'Key "%s" already exists in sinks'%key
#store the port
self.get_sinks().append(sink)
self.back_ofthe_bus(self.get_sinks())
self.current_bus_structure = {'source':'','sink':''};
# Virtual source/sink and pad source/sink blocks are
# indistinguishable from normal GR blocks. Make explicit
# checks for them here since they have no work function or
# buffers to manage.
is_not_virtual_or_pad = ((self._key != "virtual_source") \
and (self._key != "virtual_sink") \
and (self._key != "pad_source") \
and (self._key != "pad_sink"))
if is_not_virtual_or_pad:
self.get_params().append(self.get_parent().get_parent().Param(
block=self,
n=odict({'name': 'Block Alias',
'key': 'alias',
'type': 'string',
'hide': 'part',
'tab': ADVANCED_PARAM_TAB
})
))
if (len(sources) or len(sinks)) and is_not_virtual_or_pad:
self.get_params().append(self.get_parent().get_parent().Param(
block=self,
n=odict({'name': 'Core Affinity',
'key': 'affinity',
'type': 'int_vector',
'hide': 'part',
'tab': ADVANCED_PARAM_TAB
})
))
if len(sources) and is_not_virtual_or_pad:
self.get_params().append(self.get_parent().get_parent().Param(
block=self,
n=odict({'name': 'Min Output Buffer',
'key': 'minoutbuf',
'type': 'int',
'hide': 'part',
'value': '0',
'tab': ADVANCED_PARAM_TAB
})
))
self.get_params().append(self.get_parent().get_parent().Param(
block=self,
n=odict({'name': 'Max Output Buffer',
'key': 'maxoutbuf',
'type': 'int',
'hide': 'part',
'value': '0',
'tab': ADVANCED_PARAM_TAB
})
))
def back_ofthe_bus(self, portlist):
portlist.sort(key=lambda p: p._type == 'bus')
def filter_bus_port(self, ports):
buslist = [p for p in ports if p._type == 'bus']
return buslist or ports
def get_enabled(self):
"""
Get the enabled state of the block.
Returns:
true for enabled
"""
try: return eval(self.get_param('_enabled').get_value())
except: return True
def set_enabled(self, enabled):
"""
Set the enabled state of the block.
Args:
enabled: true for enabled
"""
self.get_param('_enabled').set_value(str(enabled))
def __str__(self): return 'Block - %s - %s(%s)'%(self.get_id(), self.get_name(), self.get_key())
def get_id(self): return self.get_param('id').get_value()
def is_block(self): return True
def get_name(self): return self._name
def get_key(self): return self._key
def get_category(self): return self._category
def set_category(self, cat): self._category = cat
def get_doc(self): return ''
def get_ports(self): return self.get_sources() + self.get_sinks()
def get_ports_gui(self): return self.filter_bus_port(self.get_sources()) + self.filter_bus_port(self.get_sinks());
def get_children(self): return self.get_ports() + self.get_params()
def get_children_gui(self): return self.get_ports_gui() + self.get_params()
def get_block_wrapper_path(self): return self._block_wrapper_path
##############################################
# Access Params
##############################################
def get_param_tab_labels(self): return self._param_tab_labels
def get_param_keys(self): return _get_keys(self._params)
def get_param(self, key): return _get_elem(self._params, key)
def get_params(self): return self._params
def has_param(self, key):
try:
_get_elem(self._params, key);
return True;
except:
return False;
##############################################
# Access Sinks
##############################################
def get_sink_keys(self): return _get_keys(self._sinks)
def get_sink(self, key): return _get_elem(self._sinks, key)
def get_sinks(self): return self._sinks
def get_sinks_gui(self): return self.filter_bus_port(self.get_sinks())
##############################################
# Access Sources
##############################################
def get_source_keys(self): return _get_keys(self._sources)
def get_source(self, key): return _get_elem(self._sources, key)
def get_sources(self): return self._sources
def get_sources_gui(self): return self.filter_bus_port(self.get_sources());
def get_connections(self):
return sum([port.get_connections() for port in self.get_ports()], [])
def resolve_dependencies(self, tmpl):
"""
Resolve a paramater dependency with cheetah templates.
Args:
tmpl: the string with dependencies
Returns:
the resolved value
"""
tmpl = str(tmpl)
if '$' not in tmpl: return tmpl
n = dict((p.get_key(), TemplateArg(p)) for p in self.get_params())
try:
return str(Template(tmpl, n))
except Exception as err:
return "Template error: %s\n %s" % (tmpl, err)
##############################################
# Controller Modify
##############################################
def type_controller_modify(self, direction):
"""
Change the type controller.
Args:
direction: +1 or -1
Returns:
true for change
"""
changed = False
type_param = None
for param in filter(lambda p: p.is_enum(), self.get_params()):
children = self.get_ports() + self.get_params()
#priority to the type controller
if param.get_key() in ' '.join(map(lambda p: p._type, children)): type_param = param
#use param if type param is unset
if not type_param: type_param = param
if type_param:
#try to increment the enum by direction
try:
keys = type_param.get_option_keys()
old_index = keys.index(type_param.get_value())
new_index = (old_index + direction + len(keys))%len(keys)
type_param.set_value(keys[new_index])
changed = True
except: pass
return changed
def port_controller_modify(self, direction):
"""
Change the port controller.
Args:
direction: +1 or -1
Returns:
true for change
"""
return False
def form_bus_structure(self, direc):
if direc == 'source':
get_p = self.get_sources;
get_p_gui = self.get_sources_gui;
bus_structure = self.get_bus_structure('source');
else:
get_p = self.get_sinks;
get_p_gui = self.get_sinks_gui
bus_structure = self.get_bus_structure('sink');
struct = [range(len(get_p()))];
if True in map(lambda a: isinstance(a.get_nports(), int), get_p()):
structlet = [];
last = 0;
for j in [i.get_nports() for i in get_p() if isinstance(i.get_nports(), int)]:
structlet.extend(map(lambda a: a+last, range(j)));
last = structlet[-1] + 1;
struct = [structlet];
if bus_structure:
struct = bus_structure
self.current_bus_structure[direc] = struct;
return struct
def bussify(self, n, direc):
if direc == 'source':
get_p = self.get_sources;
get_p_gui = self.get_sources_gui;
bus_structure = self.get_bus_structure('source');
else:
get_p = self.get_sinks;
get_p_gui = self.get_sinks_gui
bus_structure = self.get_bus_structure('sink');
for elt in get_p():
for connect in elt.get_connections():
self.get_parent().remove_element(connect);
if (not 'bus' in map(lambda a: a.get_type(), get_p())) and len(get_p()) > 0:
struct = self.form_bus_structure(direc);
self.current_bus_structure[direc] = struct;
if get_p()[0].get_nports():
n['nports'] = str(1);
for i in range(len(struct)):
n['key'] = str(len(get_p()));
n = odict(n);
port = self.get_parent().get_parent().Port(block=self, n=n, dir=direc);
get_p().append(port);
elif 'bus' in map(lambda a: a.get_type(), get_p()):
for elt in get_p_gui():
get_p().remove(elt);
self.current_bus_structure[direc] = ''
##############################################
## Import/Export Methods
##############################################
def export_data(self):
"""
Export this block's params to nested data.
Returns:
a nested data odict
"""
n = odict()
n['key'] = self.get_key()
n['param'] = map(lambda p: p.export_data(), self.get_params())
if 'bus' in map(lambda a: a.get_type(), self.get_sinks()):
n['bus_sink'] = str(1);
if 'bus' in map(lambda a: a.get_type(), self.get_sources()):
n['bus_source'] = str(1);
return n
def import_data(self, n):
"""
Import this block's params from nested data.
Any param keys that do not exist will be ignored.
Since params can be dynamically created based another param,
call rewrite, and repeat the load until the params stick.
This call to rewrite will also create any dynamic ports
that are needed for the connections creation phase.
Args:
n: the nested data odict
"""
get_hash = lambda: hash(tuple(map(hash, self.get_params())))
my_hash = 0
while get_hash() != my_hash:
params_n = n.findall('param')
for param_n in params_n:
key = param_n.find('key')
value = param_n.find('value')
#the key must exist in this block's params
if key in self.get_param_keys():
self.get_param(key).set_value(value)
#store hash and call rewrite
my_hash = get_hash()
self.rewrite()
bussinks = n.findall('bus_sink');
if len(bussinks) > 0 and not self._bussify_sink:
self.bussify({'name':'bus','type':'bus'}, 'sink')
elif len(bussinks) > 0:
self.bussify({'name':'bus','type':'bus'}, 'sink')
self.bussify({'name':'bus','type':'bus'}, 'sink')
bussrcs = n.findall('bus_source');
if len(bussrcs) > 0 and not self._bussify_source:
self.bussify({'name':'bus','type':'bus'}, 'source')
elif len(bussrcs) > 0:
self.bussify({'name':'bus','type':'bus'}, 'source')
self.bussify({'name':'bus','type':'bus'}, 'source')
|
courtarro/gnuradio-wg-grc
|
grc/base/Block.py
|
Python
|
gpl-3.0
| 16,569
|
# xyz
# Copyright (C) 2014 xyz developers <admin@localhost.lh> (see AUTHORS)
#
# All rights reserved.
"""
Django settings for xyz project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'jwMD5TMr3VDg2JcvCfk2Ttc85q/4d591V4ILjC/L89WMoFWUv9Sxwg=='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'rest_framework',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myapp',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'xyz.urls'
WSGI_APPLICATION = 'xyz.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en'
gettext = lambda s: s
LANGUAGES = (
)
MODELTRANSLATION_DEFAULT_LANGUAGE = 'en'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAdminUser',),
'PAGINATE_BY': 10
}
|
4geit-module/4i.language.python.django.restframework
|
tests/single_model/output/xyz/settings.py
|
Python
|
gpl-3.0
| 2,412
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Payment Gateway Setup
:copyright: (c) 2013-2015 by Openlabs Technologies & Consulting (P) Ltd.
:license: BSD, see LICENSE for more details
'''
import time
import sys
import re
import os
import ConfigParser
import unittest
from setuptools import setup, Command
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
class SQLiteTest(Command):
"""
Run the tests on SQLite
"""
description = "Run tests on SQLite"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
os.environ['TRYTOND_DATABASE_URI'] = 'sqlite://'
os.environ['DB_NAME'] = ':memory:'
from tests import suite
test_result = unittest.TextTestRunner(verbosity=3).run(suite())
if test_result.wasSuccessful():
sys.exit(0)
sys.exit(-1)
class PostgresTest(Command):
"""
Run the tests on Postgres.
"""
description = "Run tests on Postgresql"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
os.environ['TRYTOND_DATABASE_URI'] = 'postgresql://'
os.environ['DB_NAME'] = 'test_' + str(int(time.time()))
from tests import suite
test_result = unittest.TextTestRunner(verbosity=3).run(suite())
if test_result.wasSuccessful():
sys.exit(0)
sys.exit(-1)
config = ConfigParser.ConfigParser()
config.readfp(open('tryton.cfg'))
info = dict(config.items('tryton'))
for key in ('depends', 'extras_depend', 'xml'):
if key in info:
info[key] = info[key].strip().splitlines()
major_version, minor_version, _ = info.get('version', '0.0.1').split('.', 2)
major_version = int(major_version)
minor_version = int(minor_version)
requires = []
MODULE = 'nereid_checkout'
PREFIX = 'trytond'
MODULE2PREFIX = {
'nereid_payment_gateway': 'openlabs',
'sale_payment_gateway': 'openlabs',
'email_queue': 'openlabs',
'sale_confirmation_email': 'openlabs',
}
for dep in info.get('depends', []):
if not re.match(r'(ir|res|webdav)(\W|$)', dep):
requires.append(
'%s_%s >= %s.%s, < %s.%s' % (
MODULE2PREFIX.get(dep, 'trytond'), dep,
major_version, minor_version, major_version, minor_version + 1
)
)
requires.append(
'trytond >= %s.%s, < %s.%s' % (
major_version, minor_version, major_version, minor_version + 1
)
)
setup(
name='%s_%s' % (PREFIX, MODULE),
version=info.get('version', '0.0.1'),
description="Tryton module for Payment Gatway/Merchant Integration",
author="Openlabs Technologies & consulting (P) Limited",
author_email='info@openlabs.co.in',
url='http://www.openlabs.co.in',
package_dir={'trytond.modules.%s' % MODULE: '.'},
packages=[
'trytond.modules.%s' % MODULE,
'trytond.modules.%s.tests' % MODULE,
],
package_data={
'trytond.modules.%s' % MODULE:
info.get('xml', []) +
info.get('translation', []) +
['tryton.cfg', 'locale/*.po', 'tests/*.rst', '*.odt'] +
['view/*.xml'],
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Plugins',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Tryton',
'Topic :: Office/Business',
],
license='GPL-3',
install_requires=requires,
extras_require={
'docs': ['sphinx', 'sphinx_rtd_theme'],
},
zip_safe=False,
entry_points="""
[trytond.modules]
%s = trytond.modules.%s
""" % (MODULE, MODULE),
test_suite='tests',
test_loader='trytond.test_loader:Loader',
tests_require=[
'openlabs_payment_gateway_authorize_net >= %s.%s, < %s.%s' % (
major_version, minor_version, major_version, minor_version + 1
),
'pycountry',
'mock',
],
cmdclass={
'test': SQLiteTest,
'test_on_postgres': PostgresTest,
},
)
|
aroraumang/nereid-checkout
|
setup.py
|
Python
|
gpl-3.0
| 4,582
|
# -*- coding: UTF-8 -*-
# Copyright (C) 2011 Juan David Ibáñez Palomar <jdavid@itaapy.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Import from the Standard Library
from datetime import datetime
from os.path import abspath
from subprocess import Popen, PIPE
# Import from pygit2
from pygit2 import Repository, GitError
def message_short(commit):
"""Helper function to get the subject line of the commit message.
XXX This code is based on the 'message_short' value that was once
available in libgit2 (and removed by 5ae2f0c0135). It should be removed
once libgit2 gets the feature back, see issue #250 for the discussion:
https://github.com/libgit2/libgit2/pull/250
"""
message = commit.message
message = message.split('\n\n')[0]
message = message.replace('\n', ' ')
return message.rstrip()
class Worktree(object):
def __init__(self, path, repo):
self.path = abspath(path) + '/'
self.repo = repo
self.cache = {} # {sha: object}
#######################################################################
# Internal utility functions
#######################################################################
def _call(self, command):
"""Interface to cal git.git for functions not yet implemented using
libgit2.
"""
popen = Popen(command, stdout=PIPE, stderr=PIPE, cwd=self.path)
stdoutdata, stderrdata = popen.communicate()
if popen.returncode != 0:
raise EnvironmentError, (popen.returncode, stderrdata)
return stdoutdata
def _resolve_reference(self, reference):
"""This method returns the SHA the given reference points to. For now
only HEAD is supported.
FIXME This is quick & dirty. TODO Implement references in pygit2 and
use them here.
"""
# Case 1: SHA
if len(reference) == 40:
return reference
# Case 2: reference
reference = self.repo.lookup_reference(reference)
try:
reference = reference.resolve()
except KeyError:
return None
return reference.target
#######################################################################
# External API
#######################################################################
def lookup(self, sha):
"""Return the object by the given SHA. We use a cache to warrant that
two calls with the same SHA will resolve to the same object, so the
'is' operator will work.
"""
cache = self.cache
if sha not in cache:
cache[sha] = self.repo[sha]
return cache[sha]
@property
def index(self):
"""Gives access to the index file. Reloads the index file if it has
been modified in the filesystem.
"""
index = self.repo.index
# Bare repository
if index is None:
raise RuntimeError, 'expected standard repository, not bare'
return index
def git_describe(self):
"""Equivalent to 'git describe', returns a unique but short
identifier for the current commit based on tags.
TODO Implement using libgit2
"""
# Call
command = ['git', 'describe', '--tags', '--long']
try:
data = self._call(command)
except EnvironmentError:
return None
# Parse
tag, n, commit = data.rsplit('-', 2)
return tag, int(n), commit
def get_branch_name(self):
"""Returns the name of the current branch.
"""
ref = open('%s/.git/HEAD' % self.path).read().rstrip()
ref = ref.rsplit('/', 1)
return ref[1] if len(ref) == 2 else None
def get_filenames(self):
"""Returns the list of filenames tracked by git.
"""
index = self.index
return [ index[i].path for i in range(len(index)) ]
def get_metadata(self, reference='HEAD'):
"""Resolves the given reference and returns metadata information
about the commit in the form of a dict.
"""
sha = self._resolve_reference(reference)
commit = self.lookup(sha)
parents = commit.parents
author = commit.author
committer = commit.committer
# TODO Use the offset for the author/committer time
return {
'tree': commit.tree.hex,
'parent': parents[0].hex if parents else None,
'author_name': author.name,
'author_email': author.email,
'author_date': datetime.fromtimestamp(author.time),
'committer_name': committer.name,
'committer_email': committer.email,
'committer_date': datetime.fromtimestamp(committer.time),
'message': commit.message,
'message_short': message_short(commit),
}
def open_worktree(path, soft=False):
try:
repo = Repository('%s/.git' % path)
except GitError:
if soft:
return None
raise
return Worktree(path, repo)
|
hforge/itools
|
itools/pkg/git.py
|
Python
|
gpl-3.0
| 5,696
|
# encoding: utf-8
from ckan.tests.legacy import *
import ckan.model as model
class TestRevisionPurge:
@classmethod
def setup_class(self):
model.Session.remove()
CreateTestData.create()
@classmethod
def teardown_class(self):
model.repo.rebuild_db()
def setup(self):
self.pkgname = u'revision-purge-test'
model.repo.new_revision()
self.pkg = model.Package(name=self.pkgname)
self.old_url = u'abc.com'
self.pkg.url = self.old_url
tag1 = model.Tag.by_name(u'russian')
tag2 = model.Tag.by_name(u'tolstoy')
self.pkg.add_tag(tag1)
self.pkg.add_tag(tag2)
model.repo.commit_and_remove()
txn2 = model.repo.new_revision()
pkg = model.Package.by_name(self.pkgname)
newurl = u'blah.com'
pkg.url = newurl
for tag in pkg.get_tags():
pkg.remove_tag(tag)
self.pkgname2 = u'revision-purge-test-2'
self.pkg_new = model.Package(name=self.pkgname2)
model.repo.commit_and_remove()
def teardown(self):
model.Session.remove()
pkg_new = model.Package.by_name(self.pkgname2)
if pkg_new:
pkg_new.purge()
pkg = model.Package.by_name(self.pkgname)
pkg.purge()
model.Session.commit()
model.Session.remove()
def test_1(self):
rev = model.repo.youngest_revision()
model.repo.purge_revision(rev, leave_record=True)
rev = model.repo.youngest_revision()
pkg = model.Package.by_name(self.pkgname)
assert rev.message.startswith('PURGED'), rev.message
assert pkg.url == self.old_url
pkg2 = model.Package.by_name(self.pkgname2)
assert pkg2 is None, 'pkgname2 should no longer exist'
assert len(pkg.get_tags()) == 2
def test_2(self):
rev = model.repo.youngest_revision()
num = rev.id
model.repo.purge_revision(rev, leave_record=True)
rev = model.repo.youngest_revision()
# TODO: should youngest_revision be made purge aware
# (requires state on revision)
assert rev.id == num
def test_purge_first_revision(self):
rev = model.repo.youngest_revision()
num = rev.id
q = model.repo.history()
q = q.order_by(model.Revision.timestamp.desc())
q = q.limit(2)
rev2 = q.all()[1]
model.repo.purge_revision(rev, leave_record=True)
rev = model.repo.youngest_revision()
assert rev.id == num
# either none or should equal num - 2 or be None (if no lower revision)
pkg = model.Package.by_name(self.pkgname)
assert len(pkg.all_revisions) == 1
|
NicoVarg99/daf-recipes
|
ckan/ckan/ckan/ckan/tests/legacy/models/test_purge_revision.py
|
Python
|
gpl-3.0
| 2,715
|
#!/usr/bin/python
import sys
port=0
program=""
def usage():
print "Usage: mkjobs.py port program arggroup1 [arggoup2 ...]"
print "port: smallest port to use"
print "arggroupX: program arguments to use"
print "the script will generate all job.sh calls for each arggroup"
print ""
print "Example2: mkjobs.py 12000 bubblesort.exe 1000 > runjobs.sh"
print "Example1: mkjobs.py 13000 bubblesort.exe 100 200 > runjobs.sh"
print ""
print "This program is originally written by me, Jochen Wierum."
print "You can do whatever you want with this code, but I won't take"
print "responsibility for anything."
def mkDineroArgs(
l1dsize="64k", l1dassoc='2',
l2usize="512k", l2uassoc='16',
l3usize="8m", l3uassoc='32',
lsize='64'):
return ['-l1-dsize ' + l1dsize, '-l2-usize ' + l2usize, \
'-l3-usize ' + l3usize, '-l1-dbsize ' + lsize, \
'-l2-ubsize ' + lsize, '-l3-ubsize ' + lsize, \
'-l1-dassoc ' + l1dassoc, '-l2-uassoc ' + l2uassoc, \
'-l3-uassoc ' + l3uassoc]
def printCallString(pargs, name, value):
global port
args = {name: value}
print 'qsub -vPORT=' + str(port) \
+ ',PROGRAM=' + program \
+ ',PROGRAM_ARGS=' + pargs \
+ ',DIR=' + pargs + '/' + name \
+ ',FILE=' + value + '.txt,DINERO_ARGS="' + ' '.join(mkDineroArgs(**args)) \
+ '" job.sh'
port = port + 1
def printCallStrings(args):
sizes = ['1k', '2k', '4k', '8k', '16k', '64k', '128k', '256k', '512k']
print "# L1 cache size: " + ', '.join(sizes)
for size in sizes:
printCallString(args, 'l1dsize', size)
sizes = ['64k', '128k', '256k', '512k', '1m', '2m', '4m', '8m', '16m']
print "# L2 cache size: " + ', '.join(sizes)
for size in sizes:
printCallString(args, 'l2usize', size)
sizes = ['512k', '1m', '2m', '4m', '8m', '16m', '32m', '64m', '128m']
print "# L3 cache size: " + ', '.join(sizes)
for size in sizes:
printCallString(args, 'l3usize', size)
sizes = ['1', '2', '3', '4', '5', '6', '7', '8']
print "# L1 associativity: " + ', '.join(sizes)
for size in sizes:
printCallString(args, 'l1dassoc', size)
sizes = ['1', '2', '3', '4', '8', '16', '32', '64', '128']
print "# L2 associativity: " + ', '.join(sizes)
for size in sizes:
printCallString(args, 'l2uassoc', size)
sizes = ['1', '2', '3', '4', '8', '16', '32', '64', '128']
print "# L3 associativity: " + ', '.join(sizes)
for size in sizes:
printCallString(args, 'l3uassoc', size)
sizes = ['8', '16', '32', '64', '128', '256', '512', '1024']
print "# line size: " + ', '.join(sizes)
for size in sizes:
printCallString(args, 'lsize', size)
print len(sys.argv)
if len(sys.argv) < 4:
usage()
sys.exit(1)
else:
port = int(sys.argv[1])
program = sys.argv[2]
for arg in sys.argv[3:]:
printCallStrings(arg)
|
autarchprinceps/Parallele-Systeme
|
u3/a1/src/mkjobs.py
|
Python
|
gpl-3.0
| 2,992
|
#re creating the functionality of the manipulator menu from 2.49
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
bl_info = {
"name": "3d View: Manipulator Menu",
"author": "MichaelW",
"version": (1, 2, 1),
"blender": (2, 61, 0),
"location": "View3D > Ctrl Space ",
"description": "Menu to change the manipulator type and/or disable it",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/3D_interaction/Manipulator_Menu",
"tracker_url": "https://developer.blender.org/T22092",
"category": "3D View"}
import bpy
def main(context):
bpy.context.space_data.manipulator = False
#class VIEW3D_OT_disable_manipulator(bpy.types.Operator):
# """"""
# bl_idname = "VIEW3D_OT_disable_manipulator"
# bl_label = "disable manipulator"
#
# def poll(self, context):
# return context.active_object != None
#
# def execute(self, context):
# main(context)
# return {'FINISHED'}
#
class VIEW3D_MT_ManipulatorMenu(bpy.types.Menu):
bl_label = "ManipulatorType"
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
props = layout.operator("view3d.enable_manipulator",text ='Translate', icon='MAN_TRANS')
props.translate = True
props = layout.operator("view3d.enable_manipulator",text ='Rotate', icon='MAN_ROT')
props.rotate = True
props = layout.operator("view3d.enable_manipulator",text ='Scale', icon='MAN_SCALE')
props.scale = True
layout.separator()
props = layout.operator("view3d.enable_manipulator",text ='Combo', icon='MAN_SCALE')
props.scale = True
props.rotate = True
props.translate = True
layout.separator()
props = layout.operator("view3d.enable_manipulator",text ='Hide', icon='MAN_SCALE')
props.scale = False
props.rotate = False
props.translate = False
layout.separator()
def register():
bpy.utils.register_module(__name__)
wm = bpy.context.window_manager
km = wm.keyconfigs.addon.keymaps.new(name='3D View Generic', space_type='VIEW_3D')
kmi = km.keymap_items.new('wm.call_menu', 'SPACE', 'PRESS', ctrl=True)
kmi.properties.name = "VIEW3D_MT_ManipulatorMenu"
def unregister():
bpy.utils.unregister_module(__name__)
wm = bpy.context.window_manager
km = wm.keyconfigs.addon.keymaps['3D View Generic']
for kmi in km.keymap_items:
if kmi.idname == 'wm.call_menu':
if kmi.properties.name == "VIEW3D_MT_ManipulatorMenu":
km.keymap_items.remove(kmi)
break
if __name__ == "__main__":
register
|
Passtechsoft/TPEAlpGen
|
blender/release/scripts/addons_contrib/space_view3d_manipulator_Menu.py
|
Python
|
gpl-3.0
| 3,458
|
"""Acconnt endpoints"""
from __future__ import absolute_import
from flask import request
from flask_restful import fields, marshal, Resource
from flask_login import login_user
from app import api, db
from models.account import User
from exceptions import ValidationError
user_fields = {
'id': fields.Integer,
'email': fields.String,
'date_created': fields.DateTime,
'date_updated': fields.DateTime,
}
class UserResource(Resource):
"""Define user RESTful endpoints."""
def get(self, user_id):
"""Serve GET requests."""
user = User.query.get_or_404(user_id)
return marshal(user, user_fields)
def post(self):
"""Serve POST requests."""
try:
user = User(
email=request.form.get('email', ''),
password=request.form.get('password', '')
)
except ValidationError as error:
return {'errors': error.message}, 400
db.session.add(user)
db.session.commit()
login_user(user)
return marshal(user, user_fields), 201
api.add_resource(UserResource, '/users', '/users/<int:user_id>')
|
TimothyBest/gitplaylist
|
app/views/account.py
|
Python
|
gpl-3.0
| 1,152
|
#!/usr/bin/env python
# coding: utf-8
# # Comparing jktebop and ellc to PHOEBE
#
# In this example script, we'll reproduce Figure 6 from the fitting release paper ([Conroy et al. 2020](http://phoebe-project.org/publications/2020Conroy+)).
#
# <img src="http://phoebe-project.org/images/figures/2020Conroy+_fig6.png" alt="Figure 6" width="800px"/>
# Let's first make sure we have the latest version of PHOEBE 2.3 installed (uncomment this line if running in an online notebook session such as colab).
# In[ ]:
#!pip install -I "phoebe>=2.3,<2.4"
# Next we'll import matplotlib and set some figure options before importing phoebe
# In[1]:
import matplotlib.pyplot as plt
plt.rc('font', family='serif', size=14, serif='STIXGeneral')
plt.rc('mathtext', fontset='stix')
# In[2]:
import phoebe
# In[3]:
def run_comparison_models(b):
b.add_compute('legacy')
b.add_compute('ellc', rv_method='flux-weighted')
# for jktebop, we'll use requiv_max_limit=1.0 to allow the semi-detached case to run
b.add_compute('jktebop', requiv_max_limit=1.0)
b.add_dataset('lc', compute_phases=phoebe.linspace(0,1,1001))
b.add_dataset('rv', compute_phases=phoebe.linspace(0,1,1001))
if True:
b.set_value_all('irrad_method', 'none') # if not using this, then we need to use dynamical RVs for ellc
else:
b.set_value_all('rv_method', kind='ellc', value='dynamical')
b.set_value_all('pblum_method', 'phoebe')
b.set_value_all('ld_mode', 'lookup')
b.set_value_all('ld_func', 'linear')
b.run_compute(kind='phoebe', model='phoebe2_model', ntriangles=3000)
b.run_compute(kind='legacy', model='phoebe1_model', gridsize=60)
b.run_compute(kind='ellc', model='ellc_model')
b.run_compute(kind='jktebop', model='jktebop_model')
return b
# # Detached Case
# In[4]:
b = phoebe.default_binary()
b['period@binary'] = 1.2 # 1.0 case causes some issues with ellc
b['requiv@primary'] = 0.95 # same stars causes issues with RM in legacy
# In[5]:
b = run_comparison_models(b)
# In[6]:
afig, mplfig = b.plot(x='phases', context='model',
ylim={'lc': (2.05,2.15), 'rv': (-100,100)}, xlim={'rv': (-0.25,0.25)},
c={'phoebe2_model': 'blue', 'phoebe1_model': 'green', 'ellc_model': 'orange', 'jktebop_model': 'purple'},
ls={'phoebe2_model': 'solid', 'phoebe1_model': 'dashed', 'ellc_model': 'dotted', 'jktebop_model': '-.'},
legend={'lc': True},
show=True, save='figure_backends_compare.pdf')
# # Semi-Detached Case
# In[7]:
b = phoebe.default_binary(semidetached='secondary')
b.set_value('period@binary', 3.0633292)
b.set_value('q@binary@component',0.24700)
b.set_value('ecc@binary@orbit', 0.0 )
b.set_value('requiv@primary@component', 2.8)
b.set_value('incl@orbit', 83.500)
b.set_value('sma@orbit', 15.9)
b.set_value('teff@primary', 12900.000000)
b.set_value('teff@secondary', 5500.000000)
b.set_value('gravb_bol@primary',1.0)
b.set_value('gravb_bol@secondary',0.32)
b.set_value('irrad_frac_refl_bol@primary',1.0)
b.set_value('irrad_frac_refl_bol@secondary',0.75)
b.set_value('per0@binary@orbit',0.0)
# In[8]:
b = run_comparison_models(b)
# In[9]:
afig, mplfig = b.plot(x='phase', context='model',
compute=['phoebe01', 'legacy01', 'ellc01'],
c={'phoebe2_model': 'blue', 'phoebe1_model': 'green', 'ellc_model': 'orange', 'jktebop_model': 'purple'},
ls={'phoebe2_model': 'solid', 'phoebe1_model': 'dashed', 'ellc_model': 'dotted', 'jktebop_model': '-.'},
legend={'lc': True},
show=True, save='figure_backends_compare_semidetached.pdf')
|
phoebe-project/phoebe2-docs
|
development/examples/backends_compare_legacy_jktebop_ellc.py
|
Python
|
gpl-3.0
| 3,762
|
# -*- coding: utf-8 -*-
############################################################################
# GPL License #
# #
# This file is a SCons (http://www.scons.org/) builder #
# Copyright (c) 2012-14, Philipp Kraus, <philipp.kraus@flashpixx.de> #
# Copyright 2014 Intel Mobile Communications GmbH All Rights Reserved. #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as #
# published by the Free Software Foundation, either version 3 of the #
# License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
############################################################################
# This builder originated from work by Philipp Kraus and flashpixx project
# (see https://github.com/flashpixx). Based on the Unpack.py, it only
# contains changes to allow a complete unpacking of the archive.
# It is assumed that the target represents a file in the archive after it
# is unpacked.
# The Unpack Builder can be used for unpacking archives (eg Zip, TGZ, BZ, ... ).
# The emitter of the Builder reads the archive data and creates a returning
# file list the builder extract the archive. The environment variable
# stores a dictionary "UNPACK" for set different extractions (subdict "EXTRACTOR"):
# {
# PRIORITY => a value for setting the extractor order (lower numbers = extractor is used earlier)
# SUFFIX => defines a list with file suffixes, which should be handled with this extractor
# EXTRACTSUFFIX => suffix of the extract command
# EXTRACTFLAGS => a string parameter for the RUN command for extracting the data
# EXTRACTCMD => full extract command of the builder
# RUN => the main program which will be started (if the parameter is empty, the extractor will be ignored)
# LISTCMD => the listing command for the emitter
# LISTFLAGS => the string options for the RUN command for showing a list of files
# LISTSUFFIX => suffix of the list command
# LISTEXTRACTOR => a optional Python function, that is called on each output line of the
# LISTCMD for extracting file & dir names, the function need two parameters (first line number,
# second line content) and must return a string with the file / dir path (other value types
# will be ignored)
# }
# Other options in the UNPACK dictionary are:
# STOPONEMPTYFILE => bool variable for stoping if the file has empty size (default True)
# VIWEXTRACTOUTPUT => shows the output messages of the extraction command (default False)
# EXTRACTDIR => path in that the data will be extracted (default #)
#
# The file which is handled by the first suffix match of the extractor, the extractor list can be append for other files.
# The order of the extractor dictionary creates the listing & extractor command eg file extension .tar.gz should be
# before .gz, because the tar.gz is extract in one shoot.
#
# Under *nix system these tools are supported: tar, bzip2, gzip, unzip
# Under Windows only 7-Zip (http://www.7-zip.org/) is supported
import subprocess, os
import SCons.Errors, SCons.Warnings, SCons.Util
# enables Scons warning for this builder
class UnpackWarning(SCons.Warnings.Warning) :
pass
SCons.Warnings.enableWarningClass(UnpackWarning)
# extractor function for Tar output
# @param env environment object
# @param count number of returning lines
# @param no number of the output line
# @param i line content
def __fileextractor_nix_tar( env, count, no, i ) :
return i.split()[-1]
# extractor function for GZip output,
# ignore the first line
# @param env environment object
# @param count number of returning lines
# @param no number of the output line
# @param i line content
def __fileextractor_nix_gzip( env, count, no, i ) :
if no == 0 :
return None
return i.split()[-1]
# extractor function for Unzip output,
# ignore the first & last two lines
# @param env environment object
# @param count number of returning lines
# @param no number of the output line
# @param i line content
def __fileextractor_nix_unzip( env, count, no, i ) :
if no < 3 or no >= count - 2 :
return None
return i.split()[-1]
# extractor function for 7-Zip
# @param env environment object
# @param count number of returning lines
# @param no number of the output line
# @param i line content
def __fileextractor_win_7zip( env, count, no, i ) :
item = i.split()
if no > 8 and no < count - 2 :
return item[-1]
return None
# returns the extractor item for handling the source file
# @param source input source file
# @param env environment object
# @return extractor entry or None on non existing
def __getExtractor( source, env ) :
# we check each unpacker and get the correct list command first, run the command and
# replace the target filelist with the list values, we sorte the extractors by their priority
for unpackername, extractor in sorted(env["UNPACK"]["EXTRACTOR"].iteritems(), key = lambda (k,v) : (v["PRIORITY"],k)):
if not SCons.Util.is_String(extractor["RUN"]) :
raise SCons.Errors.StopError("list command of the unpack builder for [%s] archives is not a string" % (unpackername))
if not len(extractor["RUN"]) :
raise SCons.Errors.StopError("run command of the unpack builder for [%s] archives is not set - can not extract files" % (unpackername))
if not SCons.Util.is_String(extractor["LISTFLAGS"]) :
raise SCons.Errors.StopError("list flags of the unpack builder for [%s] archives is not a string" % (unpackername))
if not SCons.Util.is_String(extractor["LISTCMD"]) :
raise SCons.Errors.StopError("list command of the unpack builder for [%s] archives is not a string" % (unpackername))
if not SCons.Util.is_String(extractor["EXTRACTFLAGS"]) :
raise SCons.Errors.StopError("extract flags of the unpack builder for [%s] archives is not a string" % (unpackername))
if not SCons.Util.is_String(extractor["EXTRACTCMD"]) :
raise SCons.Errors.StopError("extract command of the unpack builder for [%s] archives is not a string" % (unpackername))
# check the source file suffix and if the first is found, run the list command
if not SCons.Util.is_List(extractor["SUFFIX"]) :
raise SCons.Errors.StopError("suffix list of the unpack builder for [%s] archives is not a list" % (unpackername))
for suffix in extractor["SUFFIX"] :
if str(source[0]).lower()[-len(suffix):] == suffix.lower() :
return extractor
return None
# creates the extracter output message
# @param s original message
# @param target target name
# @param source source name
# @param env environment object
def __message( s, target, source, env ) :
print "extract [%s] ..." % (source[0])
# action function for extracting of the data
# @param target target packed file
# @param source extracted files
# @param env environment object
def __action( target, source, env ) :
extractor = __getExtractor(source, env)
if not extractor :
raise SCons.Errors.StopError( "can not find any extractor value for the source file [%s]" % (source[0]) )
extractor_cmd = extractor["EXTRACTCMD"]
# if the extract command is empty, we create an error
if len(extractor_cmd) == 0 :
raise SCons.Errors.StopError( "the extractor command for the source file [%s] is empty" % (source[0]) )
# build it now (we need the shell, because some programs need it)
handle = None
source_path = os.path.realpath(source[0].path)
target_path = os.path.realpath(target[0].path)
cmd = env.subst(extractor_cmd, source=source_path, target=target)
cwd = os.path.dirname(source_path)
if env["UNPACK"]["VIWEXTRACTOUTPUT"] :
handle = subprocess.Popen( cmd, shell=True )
else :
devnull = open(os.devnull, "wb")
handle = subprocess.Popen( cmd, shell=True, stdout=devnull, cwd=cwd)
if handle.wait() <> 0 :
raise SCons.Errors.BuildError( "error running extractor [%s] on the source [%s]" % (cmd, source[0]) )
fhandle = open(target_path, 'a')
try:
os.utime(target_path, None)
finally:
fhandle.close()
# emitter function for getting the files
# within the archive
# @param target target packed file
# @param source extracted files
# @param env environment object
def __emitter( target, source, env ) :
return target, source
# generate function, that adds the builder to the environment
# @param env environment object
def generate( env ) :
# setup environment variable
toolset = {
"STOPONEMPTYFILE" : True,
"VIWEXTRACTOUTPUT" : False,
"EXTRACTDIR" : os.curdir,
"EXTRACTOR" : {
"TARGZ" : {
"PRIORITY" : 0,
"SUFFIX" : [".tar.gz", ".tgz", ".tar.gzip"],
"EXTRACTSUFFIX" : "",
"EXTRACTFLAGS" : "",
"EXTRACTCMD" : "${UNPACK['EXTRACTOR']['TARGZ']['RUN']} ${UNPACK['EXTRACTOR']['TARGZ']['EXTRACTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['TARGZ']['EXTRACTSUFFIX']}",
"RUN" : "",
"LISTCMD" : "${UNPACK['EXTRACTOR']['TARGZ']['RUN']} ${UNPACK['EXTRACTOR']['TARGZ']['LISTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['TARGZ']['LISTSUFFIX']}",
"LISTSUFFIX" : "",
"LISTFLAGS" : "",
"LISTEXTRACTOR" : None
},
"TARBZ" : {
"PRIORITY" : 0,
"SUFFIX" : [".tar.bz", ".tbz", ".tar.bz2", ".tar.bzip2", ".tar.bzip"],
"EXTRACTSUFFIX" : "",
"EXTRACTFLAGS" : "",
"EXTRACTCMD" : "${UNPACK['EXTRACTOR']['TARBZ']['RUN']} ${UNPACK['EXTRACTOR']['TARBZ']['EXTRACTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['TARBZ']['EXTRACTSUFFIX']}",
"RUN" : "",
"LISTCMD" : "${UNPACK['EXTRACTOR']['TARBZ']['RUN']} ${UNPACK['EXTRACTOR']['TARBZ']['LISTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['TARBZ']['LISTSUFFIX']}",
"LISTSUFFIX" : "",
"LISTFLAGS" : "",
"LISTEXTRACTOR" : None
},
"BZIP" : {
"PRIORITY" : 1,
"SUFFIX" : [".bz", "bzip", ".bz2", ".bzip2"],
"EXTRACTSUFFIX" : "",
"EXTRACTFLAGS" : "",
"EXTRACTCMD" : "${UNPACK['EXTRACTOR']['BZIP']['RUN']} ${UNPACK['EXTRACTOR']['BZIP']['EXTRACTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['BZIP']['EXTRACTSUFFIX']}",
"RUN" : "",
"LISTCMD" : "${UNPACK['EXTRACTOR']['BZIP']['RUN']} ${UNPACK['EXTRACTOR']['BZIP']['LISTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['BZIP']['LISTSUFFIX']}",
"LISTSUFFIX" : "",
"LISTFLAGS" : "",
"LISTEXTRACTOR" : None
},
"GZIP" : {
"PRIORITY" : 1,
"SUFFIX" : [".gz", ".gzip"],
"EXTRACTSUFFIX" : "",
"EXTRACTFLAGS" : "",
"EXTRACTCMD" : "${UNPACK['EXTRACTOR']['GZIP']['RUN']} ${UNPACK['EXTRACTOR']['GZIP']['EXTRACTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['GZIP']['EXTRACTSUFFIX']}",
"RUN" : "",
"LISTCMD" : "${UNPACK['EXTRACTOR']['GZIP']['RUN']} ${UNPACK['EXTRACTOR']['GZIP']['LISTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['GZIP']['LISTSUFFIX']}",
"LISTSUFFIX" : "",
"LISTFLAGS" : "",
"LISTEXTRACTOR" : None
},
"TAR" : {
"PRIORITY" : 1,
"SUFFIX" : [".tar"],
"EXTRACTSUFFIX" : "",
"EXTRACTFLAGS" : "",
"EXTRACTCMD" : "${UNPACK['EXTRACTOR']['TAR']['RUN']} ${UNPACK['EXTRACTOR']['TAR']['EXTRACTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['TAR']['EXTRACTSUFFIX']}",
"RUN" : "",
"LISTCMD" : "${UNPACK['EXTRACTOR']['TAR']['RUN']} ${UNPACK['EXTRACTOR']['TAR']['LISTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['TAR']['LISTSUFFIX']}",
"LISTSUFFIX" : "",
"LISTFLAGS" : "",
"LISTEXTRACTOR" : None
},
"ZIP" : {
"PRIORITY" : 1,
"SUFFIX" : [".zip"],
"EXTRACTSUFFIX" : "",
"EXTRACTFLAGS" : "",
"EXTRACTCMD" : "${UNPACK['EXTRACTOR']['ZIP']['RUN']} ${UNPACK['EXTRACTOR']['ZIP']['EXTRACTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['ZIP']['EXTRACTSUFFIX']}",
"RUN" : "",
"LISTCMD" : "${UNPACK['EXTRACTOR']['ZIP']['RUN']} ${UNPACK['EXTRACTOR']['ZIP']['LISTFLAGS']} $SOURCE ${UNPACK['EXTRACTOR']['ZIP']['LISTSUFFIX']}",
"LISTSUFFIX" : "",
"LISTFLAGS" : "",
"LISTEXTRACTOR" : None
}
}
}
# read tools for Windows system
if env["PLATFORM"] <> "darwin" and "win" in env["PLATFORM"] :
if env.WhereIs("7z") :
toolset["EXTRACTOR"]["TARGZ"]["RUN"] = "7z"
toolset["EXTRACTOR"]["TARGZ"]["LISTEXTRACTOR"] = __fileextractor_win_7zip
toolset["EXTRACTOR"]["TARGZ"]["LISTFLAGS"] = "x"
toolset["EXTRACTOR"]["TARGZ"]["LISTSUFFIX"] = "-so -y | ${UNPACK['EXTRACTOR']['TARGZ']['RUN']} l -sii -ttar -y -so"
toolset["EXTRACTOR"]["TARGZ"]["EXTRACTFLAGS"] = "x"
toolset["EXTRACTOR"]["TARGZ"]["EXTRACTSUFFIX"] = "-so -y | ${UNPACK['EXTRACTOR']['TARGZ']['RUN']} x -sii -ttar -y -oc:${UNPACK['EXTRACTDIR']}"
toolset["EXTRACTOR"]["TARBZ"]["RUN"] = "7z"
toolset["EXTRACTOR"]["TARBZ"]["LISTEXTRACTOR"] = __fileextractor_win_7zip
toolset["EXTRACTOR"]["TARBZ"]["LISTFLAGS"] = "x"
toolset["EXTRACTOR"]["TARBZ"]["LISTSUFFIX"] = "-so -y | ${UNPACK['EXTRACTOR']['TARGZ']['RUN']} l -sii -ttar -y -so"
toolset["EXTRACTOR"]["TARBZ"]["EXTRACTFLAGS"] = "x"
toolset["EXTRACTOR"]["TARBZ"]["EXTRACTSUFFIX"] = "-so -y | ${UNPACK['EXTRACTOR']['TARGZ']['RUN']} x -sii -ttar -y -oc:${UNPACK['EXTRACTDIR']}"
toolset["EXTRACTOR"]["BZIP"]["RUN"] = "7z"
toolset["EXTRACTOR"]["BZIP"]["LISTEXTRACTOR"] = __fileextractor_win_7zip
toolset["EXTRACTOR"]["BZIP"]["LISTFLAGS"] = "l"
toolset["EXTRACTOR"]["BZIP"]["LISTSUFFIX"] = "-y -so"
toolset["EXTRACTOR"]["BZIP"]["EXTRACTFLAGS"] = "x"
toolset["EXTRACTOR"]["BZIP"]["EXTRACTSUFFIX"] = "-y -oc:${UNPACK['EXTRACTDIR']}"
toolset["EXTRACTOR"]["GZIP"]["RUN"] = "7z"
toolset["EXTRACTOR"]["GZIP"]["LISTEXTRACTOR"] = __fileextractor_win_7zip
toolset["EXTRACTOR"]["GZIP"]["LISTFLAGS"] = "l"
toolset["EXTRACTOR"]["GZIP"]["LISTSUFFIX"] = "-y -so"
toolset["EXTRACTOR"]["GZIP"]["EXTRACTFLAGS"] = "x"
toolset["EXTRACTOR"]["GZIP"]["EXTRACTSUFFIX"] = "-y -oc:${UNPACK['EXTRACTDIR']}"
toolset["EXTRACTOR"]["ZIP"]["RUN"] = "7z"
toolset["EXTRACTOR"]["ZIP"]["LISTEXTRACTOR"] = __fileextractor_win_7zip
toolset["EXTRACTOR"]["ZIP"]["LISTFLAGS"] = "l"
toolset["EXTRACTOR"]["ZIP"]["LISTSUFFIX"] = "-y -so"
toolset["EXTRACTOR"]["ZIP"]["EXTRACTFLAGS"] = "x"
toolset["EXTRACTOR"]["ZIP"]["EXTRACTSUFFIX"] = "-y -oc:${UNPACK['EXTRACTDIR']}"
toolset["EXTRACTOR"]["TAR"]["RUN"] = "7z"
toolset["EXTRACTOR"]["TAR"]["LISTEXTRACTOR"] = __fileextractor_win_7zip
toolset["EXTRACTOR"]["TAR"]["LISTFLAGS"] = "l"
toolset["EXTRACTOR"]["TAR"]["LISTSUFFIX"] = "-y -ttar -so"
toolset["EXTRACTOR"]["TAR"]["EXTRACTFLAGS"] = "x"
toolset["EXTRACTOR"]["TAR"]["EXTRACTSUFFIX"] = "-y -ttar -oc:${UNPACK['EXTRACTDIR']}"
# here can add some other Windows tools, that can handle the archive files
# but I don't know which ones can handle all file types
# read the tools on *nix systems and sets the default parameters
elif env["PLATFORM"] in ["darwin", "linux", "posix", "msys"] :
if env.WhereIs("unzip") :
toolset["EXTRACTOR"]["ZIP"]["RUN"] = "unzip"
toolset["EXTRACTOR"]["ZIP"]["LISTEXTRACTOR"] = __fileextractor_nix_unzip
toolset["EXTRACTOR"]["ZIP"]["LISTFLAGS"] = "-l"
toolset["EXTRACTOR"]["ZIP"]["EXTRACTFLAGS"] = "-oqq"
toolset["EXTRACTOR"]["ZIP"]["EXTRACTSUFFIX"] = "-d ${UNPACK['EXTRACTDIR']}"
if env.WhereIs("tar") :
toolset["EXTRACTOR"]["TAR"]["RUN"] = "tar"
toolset["EXTRACTOR"]["TAR"]["LISTEXTRACTOR"] = __fileextractor_nix_tar
toolset["EXTRACTOR"]["TAR"]["LISTFLAGS"] = "tvf"
toolset["EXTRACTOR"]["TAR"]["EXTRACTFLAGS"] = "xf"
toolset["EXTRACTOR"]["TAR"]["EXTRACTSUFFIX"] = "-C ${UNPACK['EXTRACTDIR']}"
toolset["EXTRACTOR"]["TARGZ"]["RUN"] = "tar"
toolset["EXTRACTOR"]["TARGZ"]["LISTEXTRACTOR"] = __fileextractor_nix_tar
toolset["EXTRACTOR"]["TARGZ"]["EXTRACTFLAGS"] = "xfz"
toolset["EXTRACTOR"]["TARGZ"]["LISTFLAGS"] = "tvfz"
toolset["EXTRACTOR"]["TARGZ"]["EXTRACTSUFFIX"] = "-C ${UNPACK['EXTRACTDIR']}"
toolset["EXTRACTOR"]["TARBZ"]["RUN"] = "tar"
toolset["EXTRACTOR"]["TARBZ"]["LISTEXTRACTOR"] = __fileextractor_nix_tar
toolset["EXTRACTOR"]["TARBZ"]["EXTRACTFLAGS"] = "xfj"
toolset["EXTRACTOR"]["TARBZ"]["LISTFLAGS"] = "tvfj"
toolset["EXTRACTOR"]["TARBZ"]["EXTRACTSUFFIX"] = "-C ${UNPACK['EXTRACTDIR']}"
if env.WhereIs("bzip2") :
toolset["EXTRACTOR"]["BZIP"]["RUN"] = "bzip2"
toolset["EXTRACTOR"]["BZIP"]["EXTRACTFLAGS"] = "-df"
if env.WhereIs("gzip") :
toolset["EXTRACTOR"]["GZIP"]["RUN"] = "gzip"
toolset["EXTRACTOR"]["GZIP"]["LISTEXTRACTOR"] = __fileextractor_nix_gzip
toolset["EXTRACTOR"]["GZIP"]["LISTFLAGS"] = "-l"
toolset["EXTRACTOR"]["GZIP"]["EXTRACTFLAGS"] = "-df"
else :
raise SCons.Errors.StopError("Unpack tool detection on this platform [%s] unkown" % (env["PLATFORM"]))
# the target_factory must be a "Entry", because the target list can be files and dirs, so we can not specified the targetfactory explicite
env.Replace(UNPACK = toolset)
env["BUILDERS"]["UnpackAll"] = SCons.Builder.Builder( action = __action, emitter = __emitter, target_factory = SCons.Node.FS.Entry, source_factory = SCons.Node.FS.File, single_source = True, PRINT_CMD_LINE_FUNC = __message )
# existing function of the builder
# @param env environment object
# @return true
def exists(env) :
return 1
|
kadasaikumar/iotivity-1.2.1
|
tools/scons/UnpackAll.py
|
Python
|
gpl-3.0
| 20,133
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) 2008-2013 - Lionel Dricot & Bertrand Rousseau
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
import os
import uuid
from GTG.backends.backendsignals import BackendSignals
from GTG.backends.genericbackend import GenericBackend
from GTG.backends.periodicimportbackend import PeriodicImportBackend
from GTG.backends.syncengine import SyncEngine, SyncMeme
from GTG.core.task import Task
from GTG.core.translations import _
from GTG.tools.logger import Log
from suds.client import Client
'''
Backend for importing mantis issues in GTG
Dependencies:
* python-suds
'''
class Backend(PeriodicImportBackend):
_general_description = {
GenericBackend.BACKEND_NAME: "backend_mantis",
GenericBackend.BACKEND_HUMAN_NAME: _("MantisBT"),
GenericBackend.BACKEND_AUTHORS: ["Luca Invernizzi", "Alayn Gortazar"],
GenericBackend.BACKEND_TYPE: GenericBackend.TYPE_READONLY,
GenericBackend.BACKEND_DESCRIPTION:
_("This synchronization service lets you import the issues found"
" on Mantis using a prestablished filter called 'gtg'."
" As the issue state changes in Mantis, the GTG task is "
" updated.\n"
"Please note that this is a read only synchronization service,"
" which means that if you open one of the imported tasks and "
" change one of the:\n"
" - title\n"
" - description\n"
" - tags\n"
"Your changes <b>will</b> be reverted when the associated"
" issue is modified. Apart from those, you are free to set "
" any other field (start/due dates, subtasks...): your "
" changes will be preserved. This is useful to add "
" personal annotations to issue"),
}
_static_parameters = {
"period": {
GenericBackend.PARAM_TYPE: GenericBackend.TYPE_INT,
GenericBackend.PARAM_DEFAULT_VALUE: 5, },
"username": {
GenericBackend.PARAM_TYPE: GenericBackend.TYPE_STRING,
GenericBackend.PARAM_DEFAULT_VALUE: 'insert your username', },
"password": {
GenericBackend.PARAM_TYPE: GenericBackend.TYPE_PASSWORD,
GenericBackend.PARAM_DEFAULT_VALUE: '', },
"service-url": {
GenericBackend.PARAM_TYPE: GenericBackend.TYPE_STRING,
GenericBackend.PARAM_DEFAULT_VALUE: 'http://example.com/mantis',
},
"tag-with-project-name": {
GenericBackend.PARAM_TYPE: GenericBackend.TYPE_BOOL,
GenericBackend.PARAM_DEFAULT_VALUE: True},
}
def __init__(self, parameters):
'''
See GenericBackend for an explanation of this function.
Re-loads the saved state of the synchronization
'''
super().__init__(parameters)
# loading the saved state of the synchronization, if any
self.data_path = os.path.join(
'mantis', 'sync_engine-' + self.get_id())
self.sync_engine = self._load_pickled_file(self.data_path,
SyncEngine())
def save_state(self):
'''Saves the state of the synchronization'''
self._store_pickled_file(self.data_path, self.sync_engine)
def do_periodic_import(self):
# Establishing connection
try:
self.cancellation_point()
client = Client('%s/api/soap/mantisconnect.php?wsdl' %
self._parameters['service-url'])
except KeyError:
self.quit(disable=True)
BackendSignals().backend_failed(self.get_id(),
BackendSignals.ERRNO_AUTHENTICATION
)
return
projects = client.service.mc_projects_get_user_accessible(
self._parameters['username'],
self._parameters['password'])
filters = client.service.mc_filter_get(self._parameters['username'],
self._parameters['password'], 0)
# Fetching the issues
self.cancellation_point()
my_issues = []
for filt in filters:
if filt['name'] == 'gtg':
for project in projects:
my_issues = client.service.mc_filter_get_issues(
self._parameters['username'],
self._parameters['password'],
project['id'],
filt['id'], 0, 100)
for issue in my_issues:
self.cancellation_point()
self._process_mantis_issue(issue)
last_issue_list = self.sync_engine.get_all_remote()
new_issue_list = [str(issue['id']) for issue in my_issues]
for issue_link in set(last_issue_list).difference(set(new_issue_list)):
self.cancellation_point()
# we make sure that the other backends are not modifying the task
# set
with self.datastore.get_backend_mutex():
tid = self.sync_engine.get_local_id(issue_link)
self.datastore.request_task_deletion(tid)
try:
self.sync_engine.break_relationship(remote_id=issue_link)
except KeyError:
pass
return
###############################################################################
# Process tasks ###############################################################
###############################################################################
def _process_mantis_issue(self, issue):
'''
Given a issue object, finds out if it must be synced to a GTG note and,
if so, it carries out the synchronization (by creating or
updating a GTG task, or deleting itself if the related task has
been deleted)
@param note: a mantis issue
'''
has_task = self.datastore.has_task
action, tid = self.sync_engine.analyze_remote_id(str(issue['id']),
has_task,
lambda b: True)
Log.debug("processing mantis (%s)" % (action))
if action is None:
return
issue_dic = self._prefetch_issue_data(issue)
# for the rest of the function, no access to issue must be made, so
# that the time of blocking inside the with statements is short.
# To be sure of that, set issue to None
issue = None
with self.datastore.get_backend_mutex():
if action == SyncEngine.ADD:
tid = str(uuid.uuid4())
task = self.datastore.task_factory(tid)
self._populate_task(task, issue_dic)
meme = SyncMeme(
task.get_modified(), issue_dic['modified'], self.get_id())
self.sync_engine.record_relationship(
local_id=tid,
remote_id=str(issue_dic['number']),
meme=meme
)
self.datastore.push_task(task)
elif action == SyncEngine.UPDATE:
task = self.datastore.get_task(tid)
self._populate_task(task, issue_dic)
meme = self.sync_engine.get_meme_from_remote_id(
issue_dic['number'])
meme.set_local_last_modified(task.get_modified())
meme.set_remote_last_modified(issue_dic['modified'])
self.save_state()
def _prefetch_issue_data(self, mantis_issue):
'''
We fetch all the necessary info that we need from the mantis_issue to
populate a task beforehand (these will be used in _populate_task).
@param mantis_issue: a mantis issue
@returns dict: a dictionary containing the relevant issue attributes
'''
issue_dic = {'title': mantis_issue['summary'],
'text': mantis_issue['description'],
'reporter': mantis_issue['reporter'].name,
'modified': mantis_issue['last_updated'],
'project': mantis_issue['project'].name,
'status': mantis_issue['status'].name,
'completed': (mantis_issue['status'].id >= 80),
'number': str(mantis_issue['id'])}
try:
issue_dic['assigned'] = mantis_issue['handler'].name == \
self._parameters['username']
except AttributeError:
issue_dic['assigned'] = False
return issue_dic
def _populate_task(self, task, issue_dic):
'''
Fills a GTG task with the data from a mantis issue.
@param task: a Task
@param issue_dic: a mantis issue
'''
# set task status
if issue_dic["completed"]:
task.set_status(Task.STA_DONE)
else:
task.set_status(Task.STA_ACTIVE)
if task.get_title() != issue_dic['title']:
task.set_title("{} {}: {}".format(
_("Iss."), issue_dic["number"], issue_dic['title']))
text = self._build_issue_text(issue_dic)
if task.get_excerpt() != text:
task.set_text(text)
new_tags = set([])
if self._parameters["tag-with-project-name"]:
new_tags = set(['@' + issue_dic['project']])
current_tags = set(task.get_tags_name())
# add the new ones
for tag in new_tags.difference(current_tags):
task.add_tag(tag)
task.add_remote_id(self.get_id(), issue_dic['number'])
def _build_issue_text(self, issue_dic):
'''
Creates the text that describes a issue
'''
text = _("Reported by: ") + issue_dic["reporter"] + '\n'
text += _("Link to issue: ") + \
self._parameters['service-url'] + '/view.php?id=%s' % \
(issue_dic["number"]) + '\n'
text += '\n' + issue_dic["text"]
return text
|
shtrom/gtg
|
GTG/backends/backend_mantis.py
|
Python
|
gpl-3.0
| 10,900
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Universidade de Aveiro, DETI/IEETA, Bioinformatics Group - http://bioinformatics.ua.pt/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from emif.settings import jerboa_collection, jerboa_aggregation_collection
from pymongo.errors import OperationFailure
from .parseJerboaFile import *
import json
from .conf_charts import *
from .charts.rule_matcher import *
#import pdb
class PopulationCharacteristic(object):
"""PopulationCharacteristic: This class controls the Jerboa File
"""
def __init__(self, arg=None, type=None):
self.arg = arg
self.type = type
def last_activity(self):
pass
def __to_json(self):
self._json = import_population_characteristics_data()
def revisions(self):
pass
def submit_new_revision(self, user, fingerprint_id, revision, path_file=None):
#path_file = "C:/Users/lbastiao/Projects/TEST_DataProfile_v1.5.6b.txt"
#path_file = "/Volumes/EXT1/Dropbox/MAPi-Dropbox/EMIF/Jerboa/TEST_DataProfile_v1.5.6b.txt"
self._json = import_population_characteristics_data(user, fingerprint_id, revision, filename=path_file)
#print self._json
#f = open('jerboaTmp', 'w')
#f.write(self._json)
#f.close()
json_data = json.loads(self._json)
try:
if len(json_data) > 0:
# Create MONGO record
data_example = jerboa_collection.insert(json_data)
# get last inserted record
#print jerboa_collection.find_one()
print "Success "
except OperationFailure:
print "Failure"
return json_data
def get_variables(self, var, row, fingerprint_id='abcd', revision='-1', filters=[], vars_that_should_exists=[]):
#db.jerboa_files.distinct( 'values.Var' )
# Need to filter by Fingerprint, otherwise, we're trapped.
#pdb.set_trace()
vars_that_should_exists = ['Count']
mrules = RuleMatcher(type=Fingerprint.objects.get(fingerprint_hash=fingerprint_id).questionnaire.id)
__filters = mrules.get_filter(var)
c1 = mrules.get_chart(var)
dict_query = {'fingerprint_id':fingerprint_id, 'revision': revision,
'values.Var': c1.title.var}
# Comparable
#comparable = True
#values_compare = ["M", "F"]
for ve in vars_that_should_exists:
dict_query['values.'+ve] = { "$exists" : True }
for _f in c1.y_axis.static_filters:
dict_query['values.'+_f.key] = _f.value
#print "filters"
#print filters
# Apply filters in the query
dict_query_general=[]
for ve in filters:
#print "ve"
#print ve
if isinstance(filters[ve], list):
#if not "$or" in dict_query:
_or_dict_query = {}
_or_dict_query["$or"] = [ ]
for _aux in filters[ve]:
_or_dict_query2 = {ve: _aux}
_or_dict_query["$or"].append(_or_dict_query2)
dict_query_general.append(_or_dict_query)
else:
dict_query[ve] = filters[ve]
if dict_query_general != []:
dict_query["$and"]= dict_query_general
#print dict_query
values = jerboa_collection.find(dict_query )
results = []
def transform(v, transformation, values):
if not type(v) is list:
y = float(values[v])
new_y = eval(transformation)
values[v] = new_y
else:
for _v in v:
y = float(values[_v])
new_y = eval(transformation)
values[_v] = new_y
#print values[_v]
return values
values_app = None
for v in values:
if c1.y_axis.transformation != None:
try:
#print "transformation"
values_app = transform(c1.y_axis.var, c1.y_axis.transformation,v[u'values'])
#y = float(v[u'values'][c1.y_axis.var])
#new_y = eval(c1.y_axis.transformation)
#v[u'values'][c1.y_axis.var] = new_y
v[u'values'] = values_app
#print values_app
except:
#raise
print "bastard x error %s, %s " % (c1.y_axis.var, str(v[u'values']))
results.append(v[u'values'])
vorder = c1.x_axis.var
if c1.x_axis.sort_func!=None:
vorder = c1.x_axis.var
results = sorted(results, key=lambda k: eval(c1.x_axis.sort_func))
return results
def get_variables_filter(self, gender=None, name1=None, value1=None, name2=None,
value2=None, var=None):
#db.jerboa_files.distinct( 'values.Var' )
values = jerboa_collection.distinct( 'values.' + param )
return values
def generic_filter(self, filters):
json_acceptable_string = filters.replace("'", "\"")
#print json_acceptable_string
d = json.loads(json_acceptable_string)
#print d
values = jerboa_collection.find(d)
r = []
for v in values:
v = unicode(v)
r.append(v);
return r
def filters(self, var, fingerprint_id):
# Go to the rule matcher and ask for the filter for that particular case
comp = False
if fingerprint_id=="COMPARE":
comp=True
mrules = RuleMatcher(comp=comp)
filters = mrules.get_filter(var)
chart = mrules.get_chart(var)
#_filter = charts_conf.
# Should check if any special operation, for now, let's assume: NO!
for _filter in filters:
# Generate query
dict_query = {'fingerprint_id':fingerprint_id,
'values.Var': chart.title.var,
}
if comp:
dict_query = {'values.Var': chart.title.var,}
if _filter.key != None:
dict_query['values.' + _filter.key] = _filter.name
#print _filter
#print _filter.value
#print dict_query
if comp:
print dict_query
print 'values.' + _filter.value
values = jerboa_aggregation_collection.find( dict_query ).distinct('values.' + _filter.value )#
else:
values = jerboa_collection.find( dict_query ).distinct('values.' + _filter.value )#
values = sorted(values)
#values = jerboa_collection.find( dict_query ).distinct('values.' + _filter.value )
#print values
_filter.values = values
return filters
def get_var(self):
values = jerboa_collection.distinct( 'values.Var' )
# Go to the rule matcher and ask for the filter for that particular case
comp = False
if fingerprint_id=="COMPARE":
comp=True
mrules = RuleMatcher(comp=comp)
filters = mrules.get_filter(var)
chart = mrules.get_chart(var)
# Generate the filters here.
for _filter in filters:
# Generate query
dict_query = {'fingerprint_id':fingerprint_id,
'values.Var': chart.title.var,
}
if comp:
dict_query = {'values.Var': chart.title.var,}
if _filter.key != None:
dict_query['values.' + _filter.key] = _filter.name
if comp:
values = jerboa_aggregation_collection.find( dict_query ).distinct('values.' + _filter.value )#
else:
values = jerboa_collection.find( dict_query ).distinct('values.' + _filter.value )#
values = sorted(values)
_filter.values = values
return filters
return values
def get_xs(self):
pass
def get_x_y(self):
pass
def get_settings(self):
cc = ConfCharts()
return cc.get_main_settings(type=self.type)
|
bioinformatics-ua/catalogue
|
emif/population_characteristics/services.py
|
Python
|
gpl-3.0
| 8,764
|
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import, print_function)
__license__ = 'GPL 3'
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import random
import re
from contextlib import closing
from lxml import html
from PyQt4.Qt import QUrl
from calibre import browser
from calibre.gui2 import open_url
from calibre.gui2.store import StorePlugin
from calibre.gui2.store.search_result import SearchResult
class AmazonKindleStore(StorePlugin):
search_url = 'http://www.amazon.com/s/?url=search-alias%3Ddigital-text&field-keywords='
details_url = 'http://amazon.com/dp/'
drm_search_text = u'Simultaneous Device Usage'
drm_free_text = u'Unlimited'
def open(self, parent=None, detail_item=None, external=False):
'''
Amazon comes with a number of difficulties.
QWebView has major issues with Amazon.com. The largest of
issues is it simply doesn't work on a number of pages.
When connecting to a number parts of Amazon.com (Kindle library
for instance) QNetworkAccessManager fails to connect with a
NetworkError of 399 - ProtocolFailure. The strange thing is,
when I check QNetworkRequest.HttpStatusCodeAttribute when the
399 error is returned the status code is 200 (Ok). However, once
the QNetworkAccessManager decides there was a NetworkError it
does not download the page from Amazon. So I can't even set the
HTML in the QWebView myself.
There is http://bugreports.qt.nokia.com/browse/QTWEBKIT-259 an
open bug about the issue but it is not correct. We can set the
useragent (Arora does) to something else and the above issue
will persist. This http://developer.qt.nokia.com/forums/viewthread/793
gives a bit more information about the issue but as of now (27/Feb/2011)
there is no solution or work around.
We cannot change the The linkDelegationPolicy to allow us to avoid
QNetworkAccessManager because it only works links. Forms aren't
included so the same issue persists on any part of the site (login)
that use a form to load a new page.
Using an aStore was evaluated but I've decided against using it.
There are three major issues with an aStore. Because checkout is
handled by sending the user to Amazon we can't put it in a QWebView.
If we're sending the user to Amazon sending them there directly is
nicer. Also, we cannot put the aStore in a QWebView and let it open the
redirection the users default browser because the cookies with the
shopping cart won't transfer.
Another issue with the aStore is how it handles the referral. It only
counts the referral for the items in the shopping card / the item
that directed the user to Amazon. Kindle books do not use the shopping
cart and send the user directly to Amazon for the purchase. In this
instance we would only get referral credit for the one book that the
aStore directs to Amazon that the user buys. Any other purchases we
won't get credit for.
The last issue with the aStore is performance. Even though it's an
Amazon site it's alow. So much slower than Amazon.com that it makes
me not want to browse books using it. The look and feel are lesser
issues. So is the fact that it almost seems like the purchase is
with calibre. This can cause some support issues because we can't
do much for issues with Amazon.com purchase hiccups.
Another option that was evaluated was the Product Advertising API.
The reasons against this are complexity. It would take a lot of work
to basically re-create Amazon.com within calibre. The Product
Advertising API is also designed with being run on a server not
in an app. The signing keys would have to be made avaliable to ever
calibre user which means bad things could be done with our account.
The Product Advertising API also assumes the same browser for easy
shopping cart transfer to Amazon. With QWebView not working and there
not being an easy way to transfer cookies between a QWebView and the
users default browser this won't work well.
We could create our own website on the calibre server and create an
Amazon Product Advertising API store. However, this goes back to the
complexity argument. Why spend the time recreating Amazon.com
The final and largest issue against using the Product Advertising API
is the Efficiency Guidelines:
"Each account used to access the Product Advertising API will be allowed
an initial usage limit of 2,000 requests per hour. Each account will
receive an additional 500 requests per hour (up to a maximum of 25,000
requests per hour) for every $1 of shipped item revenue driven per hour
in a trailing 30-day period. Usage thresholds are recalculated daily based
on revenue performance."
With over two million users a limit of 2,000 request per hour could
render our store unusable for no other reason than Amazon rate
limiting our traffic.
The best (I use the term lightly here) solution is to open Amazon.com
in the users default browser and set the affiliate id as part of the url.
'''
aff_id = {'tag': 'josbl0e-cpb-20'}
# Use Kovid's affiliate id 30% of the time.
if random.randint(1, 10) in (1, 2, 3):
aff_id['tag'] = 'calibrebs-20'
store_link = 'http://www.amazon.com/Kindle-eBooks/b/?ie=UTF&node=1286228011&ref_=%(tag)s&ref=%(tag)s&tag=%(tag)s&linkCode=ur2&camp=1789&creative=390957' % aff_id
if detail_item:
aff_id['asin'] = detail_item
store_link = 'http://www.amazon.com/dp/%(asin)s/?tag=%(tag)s' % aff_id
open_url(QUrl(store_link))
def search(self, query, max_results=10, timeout=60):
url = self.search_url + query.encode('ascii', 'backslashreplace').replace('%', '%25').replace('\\x', '%').replace(' ', '+')
br = browser()
counter = max_results
with closing(br.open(url, timeout=timeout)) as f:
doc = html.fromstring(f.read().decode('latin-1', 'replace'))
# Amazon has two results pages.
is_shot = doc.xpath('boolean(//div[@id="shotgunMainResults"])')
# Horizontal grid of books. Search "Paolo Bacigalupi"
if is_shot:
data_xpath = '//div[contains(@class, "result")]'
format_xpath = './/div[@class="productTitle"]//text()'
asin_xpath = './/div[@class="productTitle"]//a'
cover_xpath = './/div[@class="productTitle"]//img/@src'
title_xpath = './/div[@class="productTitle"]/a//text()'
price_xpath = './/div[@class="newPrice"]/span/text()'
# Vertical list of books.
else:
# New style list. Search "Paolo Bacigalupi"
if doc.xpath('boolean(//div[@class="image"])'):
data_xpath = '//div[contains(@class, "results")]//div[contains(@class, "result")]'
format_xpath = './/span[@class="binding"]//text()'
asin_xpath = './/div[@class="image"]/a[1]'
cover_xpath = './/img[@class="productImage"]/@src'
title_xpath = './/a[@class="title"]/text()'
price_xpath = './/span[contains(@class, "price")]/text()'
# Old style list. Search "martin"
else:
data_xpath = '//div[contains(@class, "result")]'
format_xpath = './/span[@class="format"]//text()'
asin_xpath = './/div[@class="productImage"]/a[1]'
cover_xpath = './/div[@class="productImage"]//img/@src'
title_xpath = './/div[@class="productTitle"]/a/text()'
price_xpath = './/div[@class="newPrice"]//span//text()'
for data in doc.xpath(data_xpath):
if counter <= 0:
break
# Even though we are searching digital-text only Amazon will still
# put in results for non Kindle books (author pages). Se we need
# to explicitly check if the item is a Kindle book and ignore it
# if it isn't.
format = ''.join(data.xpath(format_xpath))
if 'kindle' not in format.lower():
continue
# We must have an asin otherwise we can't easily reference the
# book later.
asin_href = None
asin_a = data.xpath(asin_xpath)
if asin_a:
asin_href = asin_a[0].get('href', '')
m = re.search(r'/dp/(?P<asin>.+?)(/|$)', asin_href)
if m:
asin = m.group('asin')
else:
continue
else:
continue
cover_url = ''.join(data.xpath(cover_xpath))
title = ''.join(data.xpath(title_xpath))
price = ''.join(data.xpath(price_xpath))
if is_shot:
author = format.split(' by ')[-1]
else:
author = ''.join(data.xpath('.//span[@class="ptBrand"]/text()'))
author = author.split('by ')[-1]
counter -= 1
s = SearchResult()
s.cover_url = cover_url.strip()
s.title = title.strip()
s.author = author.strip()
s.price = price.strip()
s.detail_item = asin.strip()
s.formats = 'Kindle'
yield s
def get_details(self, search_result, timeout):
url = self.details_url
br = browser()
with closing(br.open(url + search_result.detail_item, timeout=timeout)) as nf:
idata = html.fromstring(nf.read())
if idata.xpath('boolean(//div[@class="content"]//li/b[contains(text(), "' +
self.drm_search_text + '")])'):
if idata.xpath('boolean(//div[@class="content"]//li[contains(., "' +
self.drm_free_text + '") and contains(b, "' +
self.drm_search_text + '")])'):
search_result.drm = SearchResult.DRM_UNLOCKED
else:
search_result.drm = SearchResult.DRM_UNKNOWN
else:
search_result.drm = SearchResult.DRM_LOCKED
return True
|
yeyanchao/calibre
|
src/calibre/gui2/store/stores/amazon_plugin.py
|
Python
|
gpl-3.0
| 10,978
|
from ImageScripter import *
from elan import *
Viewer.Start()
Viewer.settingswheel.Click()
Viewer.settingspasswordpage.WaitContinue(threshold = .98)
Viewer.one.Click()
Viewer.two.Click()
Viewer.three.Click()
Viewer.four.Click()
sleep(3)
try:
Viewer.settingsmenu2.WaitContinue(seconds=10)
except:
Viewer.settingsmenuwithvideoandclimate.WaitContinue()
print("Change Settings Passcode")
Say("Change Settings Passcode")
Viewer.changesettingspassword.Click()
Viewer.changesettingspasscodepage.WaitContinue()
Viewer.backarrow.Click()
################# Manage Users Tab
Viewer.manageusers.WaitContinue(threshold = .96)
Viewer.manageusers.Click(threshold = .96)
Viewer.manageuserspage.WaitContinue()
Viewer.add.Click(threshold = .96)
Viewer.cursorpassword.RealClick()
Viewer.Press('enter')
Viewer.newuser.Click(threshold = .96)
SleepCount(3)
Viewer.rename.Click()#GOOD
Viewer.Press('backspace')
sleep(1)
Viewer.Type('N')
sleep(1)
Viewer.Type('e')
sleep(1)
Viewer.Type('w')
sleep(1)
Viewer.Type(' ')
sleep(1)
Viewer.Type('U')
sleep(1)
Viewer.Type('s')
sleep(1)
Viewer.Type('e')
sleep(1)
Viewer.Type('r')
sleep(1)
Press('enter')
#Viewer.Press('enter')
#Viewer.delete.Click()
Viewer.deleteold.Click()
Viewer.admin.Click()
Viewer.settings.Click()
Viewer.adminsettingspage.WaitContinue()
Viewer.backarrow.Click()
Viewer.manageuserspage.WaitContinue()
Viewer.backarrow.Click()
####################### Intercom Global Settings Menu
Viewer.intercomglobalsettings.Click()
Viewer.intercomglobalsettingspage.WaitContinue()
################## Media Zone Paging Tab
Viewer.mediazonepaging.Click()
Viewer.mediazonepagingpage.WaitContinue()
Viewer.backarrow.Click()
############ Intercom Sound Settings Tab
Viewer.intercomsoundsettings.Click()
Viewer.intercomsoundsettingspage.WaitContinue()
Viewer.backarrow.Click()
############# Door Bell Settings Tab
Viewer.doorbellsettings.Click()
Viewer.add.Click()
Viewer.cursorpassword.RealClick()
Viewer.Press('enter')
Viewer.doorbell.Click()
Viewer.settings.Click()
Viewer.soundinputoutputs.WaitContinue(threshold = .96)
Viewer.backarrow.Click()
Viewer.doorbell.Click()
SleepCount(3)
#Viewer.rename.WaitContinue()#GOOD
Viewer.rename2.Click()#GOOD
Viewer.Press('backspace')
for i in 'Door Bell':
Viewer.Type(i)
Press('enter')
Viewer.delete.Click()
Viewer.backarrow.Click()
############### Door Station Dial Plans
Viewer.doorstationdialplans.Click()
Viewer.doorstationdialplanspage.WaitContinue()
Viewer.add.Click()
Viewer.cursorpassword.RealClick()
Viewer.Press('enter')
Viewer.newdialplan.Click()
Viewer.settings.Click()
Viewer.newdialplansettingspage.WaitContinue()
Viewer.backarrow.Click()
Viewer.newdialplan.Click()
Viewer.rename2.Click()#GOOD
Viewer.Press('backspace')
Viewer.Type('New Dial Plan')
Viewer.Press('enter')
Viewer.delete.Click()
Viewer.doorstationdialplanspage.WaitContinue()
Viewer.backarrow.Click()
############### Assign Dial Plans
Viewer.assigndialplans.Click()
Viewer.backarrow.Click()
Viewer.backarrow.Click()
Viewer.tvchannelgroups.Click()
Viewer.tvchannelgroupspage.WaitContinue()
Viewer.backarrow.Click()
Viewer.configurethisstation.Click()
Viewer.configurethisstationpage.WaitContinue()
Viewer.registration.Click()
Viewer.backarrow.Click()
Viewer.themes.Click()
Viewer.backarrow.Click()
Viewer.codecs.Click()
Viewer.codecspage.WaitContinue()
Viewer.backarrow.Click()
Viewer.backarrow.Click()
try:
Viewer.touchscreensecurity.Click()
Viewer.backarrow.Click()
except:
pass
Viewer.managealarms.Click()
Viewer.backarrow.Click()
Viewer.backarrow.Click()
sleep(5)
Viewer.elanlogo.Click()
if Viewer.bigelan.Exists == True:
pass
else:
Viewer.shudder.Click()
Viewer.CloseAndClean()
Configurator.Reset()
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/elan/Pools/Quick_Tests/2____Viewer_Settings_Unconfigured_Check_Pages___.py
|
Python
|
gpl-3.0
| 3,706
|
# -*- coding: utf-8 -*-
# lstm 语言模型
#
# Author: Igor
import time
import numpy as np
import tensorflow as tf
flags = tf.app.flags
# 定义flags
flags.DEFINE_float('init_scale', 0.1,
'the initial scale of the weights')
flags.DEFINE_float('learning_rate', 1.0,
'the initial value of the learning rate')
flags.DEFINE_float('max_grad_norm', 2,
'the maximum permissible norm of the gradient')
flags.DEFINE_float('num_layers', 2,
'the number of LSTM layers')
flags.DEFINE_float('num_steps', 10,
'the number of unrolled steps of LSTM')
flags.DEFINE_float('hidden_size', 200,
'the number of LSTM units')
flags.DEFINE_float('max_epoch', 10,
'the number of epochs trained with the initial learning rate')
flags.DEFINE_float('max_max_epoch', 50,
'the total number of epochs for training')
flags.DEFINE_float('keep_prob', 1.0,
'the probability of keeping weights in the dropout layer')
flags.DEFINE_float('lr_decay', 0.7,
'the decay of the learning rate for each epoch after "max_epoch"')
flags.DEFINE_float('batch_size', 50,
'the batch size')
flags.DEFINE_float('vocab_size', 10000, 'the vocab size')
flags.DEFINE_integer('statistics_interval', 5,
'Print statistics every n seconds')
flags.DEFINE_integer('summary_interval', 5,
"Save training summary to file every n seconds (rounded "
"up to statistics interval.")
flags.DEFINE_integer('checkpoint_interval', 600,
"Checkpoint the model (i.e. save the parameters) every n "
"seconds (rounded up to statistics interval.")
flags.DEFINE_string('data_path', 'data/', 'data directory')
FLAGS = flags.FLAGS
class Options(object):
"""
Options used by Language Model with LSTM
"""
def __init__(self):
'''
Model options
:return:
'''
self.init_scale = FLAGS.init_scale
self.learning_rate = FLAGS.learning_rate
self.max_grad_norm = FLAGS.max_grad_norm
self.num_layers = FLAGS.num_layers
self.num_steps = FLAGS.num_steps
self.hidden_size = FLAGS.hidden_size
self.max_epoch = FLAGS.max_epoch
self.max_max_epoch = FLAGS.max_max_epoch
self.keep_prob = FLAGS.keep_prob
self.lr_decay = FLAGS.lr_decay
self.batch_size = FLAGS.batch_size
self.vocab_size = FLAGS.vocab_size
self.summary_interval = FLAGS.summary_interval
self.checkpoint_interval = FLAGS.checkpoint_interval
class LanguageLSTM():
'''
LSTM 语言模型
'''
def __init__(self, config, session, is_training):
'''
初始化模型
:param config: 模型参数配置
:param session: tensorflow session
:param reader : 数据的读取器
'''
self._config = config
self._session = session
self._is_traing = is_training
# self._initial_state = None
# self._lr = None
# self._train_op = None
# self._final_state = None
# self._cost = None
# self.summary_writer = None
def inference(self):
'''
inference
'''
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(
self._config.hidden_size, forget_bias=0.0)
if self._is_traing and self._config.keep_prob < 1:
lstm_cell = tf.nn.rnn_cell.DropoutWrapper(
lstm_cell,
output_keep_prob=self._config.keep_prob
)
cell = tf.nn.rnn_cell.MultiRNNCell(
[lstm_cell] * self._config.num_layers)
self._initial_state = cell.zero_state(
self._config.batch_size,
tf.float32
)
with tf.device('/cpu:0'):
self.embedding = tf.get_variable(
'embedding',
[self._config.vocab_size, self._config.hidden_size]
)
inputs = tf.nn.embedding_lookup(self.embedding, self._input_data)
if self._is_traing and self._config.keep_prob < 1:
inputs = tf.nn.dropout(inputs,
self._config.keep_prob)
outputs = []
state = self._initial_state
with tf.variable_scope("LSTM"):
for time_step in range(self._config.num_steps):
if time_step > 0:
tf.get_variable_scope().reuse_variables()
(cell_output, state) = cell(inputs[:, time_step, :], state)
outputs.append(cell_output)
output = tf.reshape(
tf.concat(1, outputs), [-1, self._config.hidden_size])
softmax_w = tf.get_variable("softmax_w",
[self._config.hidden_size,
self._config.vocab_size])
softmax_b = tf.get_variable("softmax_b",
[self._config.vocab_size])
logits = tf.matmul(output, softmax_w) + softmax_b
self._final_state = state
return logits
def loss(self, logits):
'''
计算损失
:param logits:
'''
loss = tf.nn.seq2seq.sequence_loss_by_example( # 损失
[logits],
[tf.reshape(self._targets, [-1])],
[tf.ones(
[self._config.batch_size * self._config.num_steps])])
return loss
def optimize(self, loss):
self._cost = cost = tf.reduce_sum(loss) / self._config.batch_size
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),
self._config.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(self._lr) # 梯度下降
self._train_op = train_op = optimizer.apply_gradients(zip(grads, tvars)) # 更新
return train_op
def assign_lr(self, lr_value):
self._session.run(tf.assign(self.lr, lr_value))
def run_epoch(self, data, reader, summary_op, verbose=False):
'''
Runs the model on given data
:param data: 数据
:param eval_op: 计算操作
:param verbose:
:return: costs
'''
epoch_size = (len(data) // self._config.batch_size - 1) // self._config.num_steps
start_time = time.time()
costs = 0.0
iters = 0
state = self._initial_state.eval()
for step, (x, y) in enumerate(reader.iterator(data, self._config.batch_size, self._config.num_steps)):
feed_dict = {self.input_data: x, self.targets: y, self.initial_state: state}
cost, state, _ = self._session.run([self.cost, self.final_state, self.train_op],
feed_dict)
costs += cost
iters += self._config.num_steps
if verbose and step % (epoch_size // 10) == 10:
print("%.3f ; perplexity: %.3f ; speed: %.0f wps" %
(step * 1.0 / epoch_size, np.exp(costs / iters),
iters * self._config.batch_size / (time.time() - start_time)))
print("Summary Wrtier")
summary_str = self._session.run(summary_op, feed_dict=feed_dict)
self.summary_writer.add_summary(summary_str, step)
return np.exp(costs / iters)
def train(self, data, reader):
if not FLAGS.data_path:
raise ValueError("Must set --data_path to data directory")
# self.build_graph()
with tf.Graph().as_default(), tf.Session() as session:
self._session = session
# 初始化所有变量
initializer = tf.random_normal_initializer(
-self._config.init_scale, self._config.init_scale)
self._input_data = tf.placeholder(
tf.int32, [config.batch_size, config.num_steps])
self._targets = tf.placeholder(
tf.int32, [config.batch_size, config.num_steps])
# 推理
logits = self.inference()
# 计算损失
loss = self.loss(logits)
# 最优化
self.optimize(loss)
summary_op = tf.merge_all_summaries()
# saver = tf.train.Saver()
self.summary_writer = tf.train.SummaryWriter(FLAGS.data_path,
graph_def=self._session.graph_def)
tf.initialize_all_variables().run()
for i in range(self._config.max_max_epoch):
lr_decay = self._config.lr_decay ** max(i - self._config.max_epoch, 0.0)
self.assign_lr(self._config.learning_rate * lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1,
self._session.run(self.lr)))
train_perplexity = self.run_epoch(data, reader, summary_op, verbose=True)
def load(self):
'''
载入模型
'''
pass
@property
def input_data(self):
return self._input_data
@property
def targets(self):
return self._targets
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
if __name__ == '__main__':
# from TensorFlow.word_rnn import reader
#
# train = test.ptb_raw_data('data/')[0]
# config = Options()
# session = tf.Session()
#
# lstm = LanguageLSTM(config, session, True)
# lstm.train(data=train, reader=test)
pass
|
IgorWang/MachineLearningPracticer
|
tensorgo/lstm/lstm.py
|
Python
|
gpl-3.0
| 9,957
|
# -*- coding: utf-8 -*-
# Copyright (C) 2010 by RoboLab - University of Extremadura
#
# This file is part of RoboComp
#
# RoboComp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RoboComp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RoboComp. If not, see <http://www.gnu.org/licenses/>.
#
import Ice, sys, math, traceback
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
class C(QWidget):
def __init__(self, endpoint, modules):
QWidget.__init__(self)
self.ic = Ice.initialize(sys.argv)
self.mods = modules
print ('Endpoint', endpoint)
self.prx = self.ic.stringToProxy(endpoint)
self.proxy = self.mods['RoboCompRoimant'].RoimantPrx.checkedCast(self.prx)
self.pyrList = []
for level in range(4): self.pyrList.append(None)
self.job()
def job(self):
#output = self.proxy.getBothPyramidsAndLeftROIList()
output = self.proxy.getBothPyramidsRGBAndLeftROIList()
for level in range(4):
self.pyrList[level] = output[0][level]
def paintEvent(self, event=None):
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing, True)
xPos = -160
yPos = self.height()
for level in range(len(self.pyrList)):
xPos = xPos + 160/(2**level)
yPos = yPos - 240/(2**level)
qimage = QImage(self.pyrList[level], 320/(2**level), 240/(2**level), QImage.Format_RGB888);
#qimage = QImage(self.pyrList[level], 320/(2**level), 240/(2**level), QImage.Format_Indexed8)
#for i in range(256):
#qimage.setColor(i, QColor(i,i,i).rgb())
painter.drawImage(QPointF(xPos, yPos), qimage)
painter.end()
painter = None
|
robocomp/robocomp
|
tools/rcmonitor/examples/pyramidRoi.py
|
Python
|
gpl-3.0
| 2,085
|
#!/usr/bin/env python
#################################################################
# Create configurations programmatically.
# This allows us to test features in combination,
# as well as in isolation.
#################################################################
import yaml
import sys
from subprocess import call
#################################################################
# 'Constants' shared by all test runs
#################################################################
MAX_AGENTS = 10000
MAX_ANALYSIS_STEPS = 10000# Increase for longer test simulation duration
ANALYSIS_STEP_SAVE_POINT = MAX_ANALYSIS_STEPS * 3/4 # For saving tests, when should we save to disk?
MAX_REAL_TIME = "hour" # Increase for longer test time allowance
RAND_TIME_INCR = False
RUN_SH_FLAGS = "--debug-std --stacktrace"
#################################################################
# Create configurations programmatically.
#
# This allows us to test features in combination,
# as well as in isolation.
#################################################################
class Configuration:
def __init__(self,
follow_model,
use_tweeting,
use_retweeting,
use_follow,
use_tweet_follow,
use_unfollow,
use_followback,
use_hashtags,
use_add_rate):
self.follow_model = follow_model
self.use_tweeting = use_tweeting
self.use_retweeting = use_retweeting
self.use_follow = use_follow
self.use_tweet_follow = use_tweet_follow
self.use_unfollow = use_unfollow
self.use_followback = use_followback
self.use_hashtags = use_hashtags
self.use_add_rate = use_add_rate
self.initial_agents = MAX_AGENTS if not use_add_rate else 0
def description(self):
tweet = "+Tweets: on" if self.use_tweeting else "-Tweets: OFF"
retweet = "+Retweets: on" if self.use_retweeting else "-Retweets: OFF"
unfollow = "+Unfollows: on" if self.use_unfollow else "-Unfollows: OFF"
followback = "+Followback: on" if self.use_followback else "-Retweets: OFF"
hashtags = "+Hashtags: on" if self.use_hashtags else "-Hashtags: OFF"
add_rate = "+Add Rate: on" if self.use_add_rate else "-Add Rate: OFF"
return "[Follow-model=%s, %s %s %s %s, Initial=%d, Max=%d]" % (
self.follow_model,
tweet, retweet, followback, add_rate,
self.initial_agents, MAX_AGENTS
)
FOLLOW_MODELS = ['barabasi', 'random', 'preferential', 'agent', 'preferential-agent', 'hashtag', 'twitter', 'none']
configurations = []
# Saving is implemented by saving numbers into 'observables'
# and comparing the corresponding numbers when implementing saving
for follow_model in FOLLOW_MODELS:
for use_tweeting in [True, False]:
for use_retweeting in [False,True]:
for use_tweet_follow in [False,True]:
for use_follow in [False,True]:
for use_unfollow in [False,True]:
for use_followback in [False,True]:
for use_hashtags in [False,True]:
for use_add_rate in [False,True]:
configurations.append(
Configuration(
follow_model,
use_tweeting,
use_retweeting,
use_follow,
use_tweet_follow,
use_unfollow,
use_followback,
use_hashtags,
use_add_rate
)
)
#for config in configurations:
# print(config.description())
#################################################################
# Convert configurations to tests.
#################################################################
def config_to_yaml(C):
follow_model = C.follow_model
if follow_model == 'barabasi' or follow_model == 'none':
follow_model = 'random'
use_hashtag_probability = 1 if C.use_hashtags else 0
add_rate = 0.1 if C.use_add_rate else 0
follow_weight = 0 if C.follow_model == 'none' else 5
retweet_rate = 0 if C.use_retweeting else 0
tweet_follow_probability = 0.5 if C.use_tweet_follow else 0
followback_probability = 0.44 if C.use_followback else 0
return {
"analysis": {
"initial_agents": C.initial_agents,
"max_agents": MAX_AGENTS,
"max_time": "unlimited",
"max_real_time": MAX_REAL_TIME,
"max_analysis_steps": MAX_ANALYSIS_STEPS,
"enable_interactive_mode": False,
"enable_lua_hooks": True,
# The TEST_INTERACT.lua file gives extra control over the test behaviour.
# Specifically, the file allows for Lua to save and load the network
"lua_script": "tests/TEST_INTERACT.lua",
"use_barabasi": (C.follow_model == 'barabasi'),
"use_random_time_increment": RAND_TIME_INCR,
"use_followback": C.use_followback,
"follow_model": follow_model,
"model_weights": {
"random": 0.2,
"preferential": 0.2,
"agent": 0.2,
"preferential_agent": 0.2,
"twitter_suggest": 0.2,
"hashtag": 0.2 if C.use_hashtags else 0
},
"stage1_unfollow": C.use_unfollow,
"unfollow_tweet_rate": 0.1,
"use_hashtag_probability": use_hashtag_probability,
},
"rates": {
"add": {
"function": "constant",
"value": add_rate
}
},
"preference_classes": [
{
"name": "StandardPref",
"tweet_transmission": {
"plain": {
"Standard": retweet_rate,
"Celebrity": retweet_rate,
"else": retweet_rate
},
"different_ideology": {
"Standard": retweet_rate,
"Celebrity": retweet_rate,
"else": retweet_rate
},
"same_ideology": {
"Standard": retweet_rate,
"Celebrity": retweet_rate,
"else": retweet_rate
},
"humorous": {
"Standard": retweet_rate,
"Celebrity": retweet_rate,
"else": retweet_rate
}
},
"follow_reaction_prob": tweet_follow_probability
},
],
"agents": [
{
"name": "Standard",
"weights": {
"add": 80,
"follow": 5,
"tweet_type": {
"ideological": 1,
"plain": 1,
"musical": 1,
"humorous": 1
}
},
"followback_probability": followback_probability,
"hashtag_follow_options": {
"care_about_region": C.use_hashtags,
"care_about_ideology": C.use_hashtags
},
"rates": {
"follow": {
"function": "constant",
"value": 0.1 if C.use_follow else 0
},
"tweet": {
"function": "constant",
"value": 0.01 if C.use_tweeting else 0
}
}
}
]
}
#################################################################
# Run tests, and gather statistics.
#################################################################
# Clear old logs
call(["bash", "clean_tests.sh"])
def run_config_as_test(test_id, C):
dict = config_to_yaml(C)
text = yaml.dump(dict)
f = open('TEST.yaml', 'w')
f.write(text)
f.close()
code = call(["bash", "wrapper.sh", test_id, C.description(), 'TEST.yaml', RUN_SH_FLAGS, str(ANALYSIS_STEP_SAVE_POINT)])
num = 1
print("Running " + str(len(configurations)) + " tests.")
for config in configurations:
run_config_as_test("Test_" + str(num), config)
num += 1
|
hashkat/hashkat
|
tests/run_tests.py
|
Python
|
gpl-3.0
| 8,374
|
"""PySoundFile is an audio library based on libsndfile, CFFI and NumPy.
Sound files can be read or written directly using the functions
:func:`read` and :func:`write`.
To read a sound file in a block-wise fashion, use :func:`blocks`.
Alternatively, sound files can be opened as :class:`SoundFile` objects.
For further information, see http://pysoundfile.readthedocs.org/.
"""
__version__ = "0.9.0"
import os as _os
import sys as _sys
from cffi import FFI as _FFI
from os import SEEK_SET, SEEK_CUR, SEEK_END
try:
_unicode = unicode # doesn't exist in Python 3.x
except NameError:
_unicode = str
_ffi = _FFI()
_ffi.cdef("""
enum
{
SF_FORMAT_SUBMASK = 0x0000FFFF,
SF_FORMAT_TYPEMASK = 0x0FFF0000,
SF_FORMAT_ENDMASK = 0x30000000
} ;
enum
{
SFC_GET_LIB_VERSION = 0x1000,
SFC_GET_LOG_INFO = 0x1001,
SFC_GET_FORMAT_INFO = 0x1028,
SFC_GET_FORMAT_MAJOR_COUNT = 0x1030,
SFC_GET_FORMAT_MAJOR = 0x1031,
SFC_GET_FORMAT_SUBTYPE_COUNT = 0x1032,
SFC_GET_FORMAT_SUBTYPE = 0x1033,
SFC_FILE_TRUNCATE = 0x1080,
SFC_SET_CLIPPING = 0x10C0,
SFC_SET_SCALE_FLOAT_INT_READ = 0x1014,
SFC_SET_SCALE_INT_FLOAT_WRITE = 0x1015,
} ;
enum
{
SF_FALSE = 0,
SF_TRUE = 1,
/* Modes for opening files. */
SFM_READ = 0x10,
SFM_WRITE = 0x20,
SFM_RDWR = 0x30,
} ;
typedef int64_t sf_count_t ;
typedef struct SNDFILE_tag SNDFILE ;
typedef struct SF_INFO
{
sf_count_t frames ; /* Used to be called samples. Changed to avoid confusion. */
int samplerate ;
int channels ;
int format ;
int sections ;
int seekable ;
} SF_INFO ;
SNDFILE* sf_open (const char *path, int mode, SF_INFO *sfinfo) ;
int sf_format_check (const SF_INFO *info) ;
sf_count_t sf_seek (SNDFILE *sndfile, sf_count_t frames, int whence) ;
int sf_command (SNDFILE *sndfile, int cmd, void *data, int datasize) ;
int sf_error (SNDFILE *sndfile) ;
const char* sf_strerror (SNDFILE *sndfile) ;
const char* sf_error_number (int errnum) ;
int sf_perror (SNDFILE *sndfile) ;
int sf_error_str (SNDFILE *sndfile, char* str, size_t len) ;
int sf_close (SNDFILE *sndfile) ;
void sf_write_sync (SNDFILE *sndfile) ;
sf_count_t sf_read_short (SNDFILE *sndfile, short *ptr, sf_count_t items) ;
sf_count_t sf_read_int (SNDFILE *sndfile, int *ptr, sf_count_t items) ;
sf_count_t sf_read_float (SNDFILE *sndfile, float *ptr, sf_count_t items) ;
sf_count_t sf_read_double (SNDFILE *sndfile, double *ptr, sf_count_t items) ;
/* Note: Data ptr argument types are declared as void* here in order to
avoid an implicit cast warning. (gh183). */
sf_count_t sf_readf_short (SNDFILE *sndfile, void *ptr, sf_count_t frames) ;
sf_count_t sf_readf_int (SNDFILE *sndfile, void *ptr, sf_count_t frames) ;
sf_count_t sf_readf_float (SNDFILE *sndfile, void *ptr, sf_count_t frames) ;
sf_count_t sf_readf_double (SNDFILE *sndfile, void *ptr, sf_count_t frames) ;
sf_count_t sf_write_short (SNDFILE *sndfile, short *ptr, sf_count_t items) ;
sf_count_t sf_write_int (SNDFILE *sndfile, int *ptr, sf_count_t items) ;
sf_count_t sf_write_float (SNDFILE *sndfile, float *ptr, sf_count_t items) ;
sf_count_t sf_write_double (SNDFILE *sndfile, double *ptr, sf_count_t items) ;
/* Note: The argument types were changed to void* in order to allow
writing bytes in SoundFile.buffer_write() */
sf_count_t sf_writef_short (SNDFILE *sndfile, void *ptr, sf_count_t frames) ;
sf_count_t sf_writef_int (SNDFILE *sndfile, void *ptr, sf_count_t frames) ;
sf_count_t sf_writef_float (SNDFILE *sndfile, void *ptr, sf_count_t frames) ;
sf_count_t sf_writef_double (SNDFILE *sndfile, void *ptr, sf_count_t frames) ;
sf_count_t sf_read_raw (SNDFILE *sndfile, void *ptr, sf_count_t bytes) ;
sf_count_t sf_write_raw (SNDFILE *sndfile, void *ptr, sf_count_t bytes) ;
const char* sf_get_string (SNDFILE *sndfile, int str_type) ;
int sf_set_string (SNDFILE *sndfile, int str_type, const char* str) ;
const char * sf_version_string (void) ;
typedef sf_count_t (*sf_vio_get_filelen) (void *user_data) ;
typedef sf_count_t (*sf_vio_seek) (sf_count_t offset, int whence, void *user_data) ;
typedef sf_count_t (*sf_vio_read) (void *ptr, sf_count_t count, void *user_data) ;
typedef sf_count_t (*sf_vio_write) (const void *ptr, sf_count_t count, void *user_data) ;
typedef sf_count_t (*sf_vio_tell) (void *user_data) ;
typedef struct SF_VIRTUAL_IO
{ sf_count_t (*get_filelen) (void *user_data) ;
sf_count_t (*seek) (sf_count_t offset, int whence, void *user_data) ;
sf_count_t (*read) (void *ptr, sf_count_t count, void *user_data) ;
sf_count_t (*write) (const void *ptr, sf_count_t count, void *user_data) ;
sf_count_t (*tell) (void *user_data) ;
} SF_VIRTUAL_IO ;
SNDFILE* sf_open_virtual (SF_VIRTUAL_IO *sfvirtual, int mode, SF_INFO *sfinfo, void *user_data) ;
SNDFILE* sf_open_fd (int fd, int mode, SF_INFO *sfinfo, int close_desc) ;
typedef struct SF_FORMAT_INFO
{
int format ;
const char* name ;
const char* extension ;
} SF_FORMAT_INFO ;
""")
if _sys.platform == 'win32':
_ffi.cdef("""
SNDFILE* sf_wchar_open (LPCWSTR wpath, int mode, SF_INFO *sfinfo) ;
""")
_str_types = {
'title': 0x01,
'copyright': 0x02,
'software': 0x03,
'artist': 0x04,
'comment': 0x05,
'date': 0x06,
'album': 0x07,
'license': 0x08,
'tracknumber': 0x09,
'genre': 0x10,
}
_formats = {
'WAV': 0x010000, # Microsoft WAV format (little endian default).
'AIFF': 0x020000, # Apple/SGI AIFF format (big endian).
'AU': 0x030000, # Sun/NeXT AU format (big endian).
'RAW': 0x040000, # RAW PCM data.
'PAF': 0x050000, # Ensoniq PARIS file format.
'SVX': 0x060000, # Amiga IFF / SVX8 / SV16 format.
'NIST': 0x070000, # Sphere NIST format.
'VOC': 0x080000, # VOC files.
'IRCAM': 0x0A0000, # Berkeley/IRCAM/CARL
'W64': 0x0B0000, # Sonic Foundry's 64 bit RIFF/WAV
'MAT4': 0x0C0000, # Matlab (tm) V4.2 / GNU Octave 2.0
'MAT5': 0x0D0000, # Matlab (tm) V5.0 / GNU Octave 2.1
'PVF': 0x0E0000, # Portable Voice Format
'XI': 0x0F0000, # Fasttracker 2 Extended Instrument
'HTK': 0x100000, # HMM Tool Kit format
'SDS': 0x110000, # Midi Sample Dump Standard
'AVR': 0x120000, # Audio Visual Research
'WAVEX': 0x130000, # MS WAVE with WAVEFORMATEX
'SD2': 0x160000, # Sound Designer 2
'FLAC': 0x170000, # FLAC lossless file format
'CAF': 0x180000, # Core Audio File format
'WVE': 0x190000, # Psion WVE format
'OGG': 0x200000, # Xiph OGG container
'MPC2K': 0x210000, # Akai MPC 2000 sampler
'RF64': 0x220000, # RF64 WAV file
}
_subtypes = {
'PCM_S8': 0x0001, # Signed 8 bit data
'PCM_16': 0x0002, # Signed 16 bit data
'PCM_24': 0x0003, # Signed 24 bit data
'PCM_32': 0x0004, # Signed 32 bit data
'PCM_U8': 0x0005, # Unsigned 8 bit data (WAV and RAW only)
'FLOAT': 0x0006, # 32 bit float data
'DOUBLE': 0x0007, # 64 bit float data
'ULAW': 0x0010, # U-Law encoded.
'ALAW': 0x0011, # A-Law encoded.
'IMA_ADPCM': 0x0012, # IMA ADPCM.
'MS_ADPCM': 0x0013, # Microsoft ADPCM.
'GSM610': 0x0020, # GSM 6.10 encoding.
'VOX_ADPCM': 0x0021, # OKI / Dialogix ADPCM
'G721_32': 0x0030, # 32kbs G721 ADPCM encoding.
'G723_24': 0x0031, # 24kbs G723 ADPCM encoding.
'G723_40': 0x0032, # 40kbs G723 ADPCM encoding.
'DWVW_12': 0x0040, # 12 bit Delta Width Variable Word encoding.
'DWVW_16': 0x0041, # 16 bit Delta Width Variable Word encoding.
'DWVW_24': 0x0042, # 24 bit Delta Width Variable Word encoding.
'DWVW_N': 0x0043, # N bit Delta Width Variable Word encoding.
'DPCM_8': 0x0050, # 8 bit differential PCM (XI only)
'DPCM_16': 0x0051, # 16 bit differential PCM (XI only)
'VORBIS': 0x0060, # Xiph Vorbis encoding.
'ALAC_16': 0x0070, # Apple Lossless Audio Codec (16 bit).
'ALAC_20': 0x0071, # Apple Lossless Audio Codec (20 bit).
'ALAC_24': 0x0072, # Apple Lossless Audio Codec (24 bit).
'ALAC_32': 0x0073, # Apple Lossless Audio Codec (32 bit).
}
_endians = {
'FILE': 0x00000000, # Default file endian-ness.
'LITTLE': 0x10000000, # Force little endian-ness.
'BIG': 0x20000000, # Force big endian-ness.
'CPU': 0x30000000, # Force CPU endian-ness.
}
# libsndfile doesn't specify default subtypes, these are somehow arbitrary:
_default_subtypes = {
'WAV': 'PCM_16',
'AIFF': 'PCM_16',
'AU': 'PCM_16',
# 'RAW': # subtype must be explicit!
'PAF': 'PCM_16',
'SVX': 'PCM_16',
'NIST': 'PCM_16',
'VOC': 'PCM_16',
'IRCAM': 'PCM_16',
'W64': 'PCM_16',
'MAT4': 'DOUBLE',
'MAT5': 'DOUBLE',
'PVF': 'PCM_16',
'XI': 'DPCM_16',
'HTK': 'PCM_16',
'SDS': 'PCM_16',
'AVR': 'PCM_16',
'WAVEX': 'PCM_16',
'SD2': 'PCM_16',
'FLAC': 'PCM_16',
'CAF': 'PCM_16',
'WVE': 'ALAW',
'OGG': 'VORBIS',
'MPC2K': 'PCM_16',
'RF64': 'PCM_16',
}
_ffi_types = {
'float64': 'double',
'float32': 'float',
'int32': 'int',
'int16': 'short'
}
try:
_snd = _ffi.dlopen('sndfile')
except OSError:
if _sys.platform == 'darwin':
_libname = 'libsndfile.dylib'
elif _sys.platform == 'win32':
from platform import architecture as _architecture
_libname = 'libsndfile' + _architecture()[0] + '.dll'
else:
raise
_snd = _ffi.dlopen(_os.path.join(
_os.path.dirname(_os.path.abspath(__file__)),
'_soundfile_data', _libname))
__libsndfile_version__ = _ffi.string(_snd.sf_version_string()).decode('utf-8', 'replace')
if __libsndfile_version__.startswith('libsndfile-'):
__libsndfile_version__ = __libsndfile_version__[len('libsndfile-'):]
def read(file, frames=-1, start=0, stop=None, dtype='float64', always_2d=False,
fill_value=None, out=None, samplerate=None, channels=None,
format=None, subtype=None, endian=None, closefd=True):
"""Provide audio data from a sound file as NumPy array.
By default, the whole file is read from the beginning, but the
position to start reading can be specified with `start` and the
number of frames to read can be specified with `frames`.
Alternatively, a range can be specified with `start` and `stop`.
If there is less data left in the file than requested, the rest of
the frames are filled with `fill_value`.
If no `fill_value` is specified, a smaller array is returned.
Parameters
----------
file : str or int or file-like object
The file to read from. See :class:`SoundFile` for details.
frames : int, optional
The number of frames to read. If `frames` is negative, the whole
rest of the file is read. Not allowed if `stop` is given.
start : int, optional
Where to start reading. A negative value counts from the end.
stop : int, optional
The index after the last frame to be read. A negative value
counts from the end. Not allowed if `frames` is given.
dtype : {'float64', 'float32', 'int32', 'int16'}, optional
Data type of the returned array, by default ``'float64'``.
Floating point audio data is typically in the range from
``-1.0`` to ``1.0``. Integer data is in the range from
``-2**15`` to ``2**15-1`` for ``'int16'`` and from ``-2**31`` to
``2**31-1`` for ``'int32'``.
.. note:: Reading int values from a float file will *not*
scale the data to [-1.0, 1.0). If the file contains
``np.array([42.6], dtype='float32')``, you will read
``np.array([43], dtype='int32')`` for ``dtype='int32'``.
Returns
-------
audiodata : numpy.ndarray or type(out)
A two-dimensional NumPy array is returned, where the channels
are stored along the first dimension, i.e. as columns.
If the sound file has only one channel, a one-dimensional array
is returned. Use ``always_2d=True`` to return a two-dimensional
array anyway.
If `out` was specified, it is returned. If `out` has more
frames than available in the file (or if `frames` is smaller
than the length of `out`) and no `fill_value` is given, then
only a part of `out` is overwritten and a view containing all
valid frames is returned.
samplerate : int
The sample rate of the audio file.
Other Parameters
----------------
always_2d : bool, optional
By default, reading a mono sound file will return a
one-dimensional array. With ``always_2d=True``, audio data is
always returned as a two-dimensional array, even if the audio
file has only one channel.
fill_value : float, optional
If more frames are requested than available in the file, the
rest of the output is be filled with `fill_value`. If
`fill_value` is not specified, a smaller array is returned.
out : numpy.ndarray or subclass, optional
If `out` is specified, the data is written into the given array
instead of creating a new array. In this case, the arguments
`dtype` and `always_2d` are silently ignored! If `frames` is
not given, it is obtained from the length of `out`.
samplerate, channels, format, subtype, endian, closefd
See :class:`SoundFile`.
Examples
--------
>>> import soundfile as sf
>>> data, samplerate = sf.read('stereo_file.wav')
>>> data
array([[ 0.71329652, 0.06294799],
[-0.26450912, -0.38874483],
...
[ 0.67398441, -0.11516333]])
>>> samplerate
44100
"""
with SoundFile(file, 'r', samplerate, channels,
subtype, endian, format, closefd) as f:
frames = f._prepare_read(start, stop, frames)
data = f.read(frames, dtype, always_2d, fill_value, out)
return data, f.samplerate
def write(file, data, samplerate, subtype=None, endian=None, format=None,
closefd=True):
"""Write data to a sound file.
.. note:: If `file` exists, it will be truncated and overwritten!
Parameters
----------
file : str or int or file-like object
The file to write to. See :class:`SoundFile` for details.
data : array_like
The data to write. Usually two-dimensional (channels x frames),
but one-dimensional `data` can be used for mono files.
Only the data types ``'float64'``, ``'float32'``, ``'int32'``
and ``'int16'`` are supported.
.. note:: The data type of `data` does **not** select the data
type of the written file. Audio data will be
converted to the given `subtype`. Writing int values
to a float file will *not* scale the values to
[-1.0, 1.0). If you write the value ``np.array([42],
dtype='int32')``, to a ``subtype='FLOAT'`` file, the
file will then contain ``np.array([42.],
dtype='float32')``.
samplerate : int
The sample rate of the audio data.
subtype : str, optional
See :func:`default_subtype` for the default value and
:func:`available_subtypes` for all possible values.
Other Parameters
----------------
format, endian, closefd
See :class:`SoundFile`.
Examples
--------
Write 10 frames of random data to a new file:
>>> import numpy as np
>>> import soundfile as sf
>>> sf.write('stereo_file.wav', np.random.randn(10, 2), 44100, 'PCM_24')
"""
import numpy as np
data = np.asarray(data)
if data.ndim == 1:
channels = 1
else:
channels = data.shape[1]
with SoundFile(file, 'w', samplerate, channels,
subtype, endian, format, closefd) as f:
f.write(data)
def blocks(file, blocksize=None, overlap=0, frames=-1, start=0, stop=None,
dtype='float64', always_2d=False, fill_value=None, out=None,
samplerate=None, channels=None,
format=None, subtype=None, endian=None, closefd=True):
"""Return a generator for block-wise reading.
By default, iteration starts at the beginning and stops at the end
of the file. Use `start` to start at a later position and `frames`
or `stop` to stop earlier.
If you stop iterating over the generator before it's exhausted,
the sound file is not closed. This is normally not a problem
because the file is opened in read-only mode. To close the file
properly, the generator's ``close()`` method can be called.
Parameters
----------
file : str or int or file-like object
The file to read from. See :class:`SoundFile` for details.
blocksize : int
The number of frames to read per block.
Either this or `out` must be given.
overlap : int, optional
The number of frames to rewind between each block.
Yields
------
numpy.ndarray or type(out)
Blocks of audio data.
If `out` was given, and the requested frames are not an integer
multiple of the length of `out`, and no `fill_value` was given,
the last block will be a smaller view into `out`.
Other Parameters
----------------
frames, start, stop
See :func:`read`.
dtype : {'float64', 'float32', 'int32', 'int16'}, optional
See :func:`read`.
always_2d, fill_value, out
See :func:`read`.
samplerate, channels, format, subtype, endian, closefd
See :class:`SoundFile`.
Examples
--------
>>> import soundfile as sf
>>> for block in sf.blocks('stereo_file.wav', blocksize=1024):
>>> pass # do something with 'block'
"""
with SoundFile(file, 'r', samplerate, channels,
subtype, endian, format, closefd) as f:
frames = f._prepare_read(start, stop, frames)
for block in f.blocks(blocksize, overlap, frames,
dtype, always_2d, fill_value, out):
yield block
class _SoundFileInfo(object):
"""Information about a SoundFile"""
def __init__(self, file, verbose):
self.verbose = verbose
with SoundFile(file) as f:
self.name = f.name
self.samplerate = f.samplerate
self.channels = f.channels
self.frames = len(f)
self.duration = self.frames/f.samplerate
self.format = f.format
self.subtype = f.subtype
self.endian = f.endian
self.format_info = f.format_info
self.subtype_info = f.subtype_info
self.sections = f.sections
self.extra_info = f.extra_info
@property
def _duration_str(self):
hours, rest = divmod(self.duration, 3600)
minutes, seconds = divmod(rest, 60)
if hours >= 1:
duration = "{0:.0g}:{1:02.0g}:{2:05.3f} h".format(hours, minutes, seconds)
elif minutes >= 1:
duration = "{0:02.0g}:{1:05.3f} min".format(minutes, seconds)
else:
duration = "{0:.3f} s".format(seconds)
return duration
def __repr__(self):
info = "\n".join(
["{0.name}",
"samplerate: {0.samplerate} Hz",
"channels: {0.channels}",
"duration: {0._duration_str}",
"format: {0.format_info} [{0.format}]",
"subtype: {0.subtype_info} [{0.subtype}]"])
if self.verbose:
info += "\n".join(
["\nendian: {0.endian}",
"sections: {0.sections}",
"frames: {0.frames}",
'extra_info: """',
' {1}"""'])
indented_extra_info = ("\n"+" "*4).join(self.extra_info.split("\n"))
return info.format(self, indented_extra_info)
def info(file, verbose=False):
"""Returns an object with information about a SoundFile.
Parameters
----------
verbose : bool
Whether to print additional information.
"""
return _SoundFileInfo(file, verbose)
def available_formats():
"""Return a dictionary of available major formats.
Examples
--------
>>> import soundfile as sf
>>> sf.available_formats()
{'FLAC': 'FLAC (FLAC Lossless Audio Codec)',
'OGG': 'OGG (OGG Container format)',
'WAV': 'WAV (Microsoft)',
'AIFF': 'AIFF (Apple/SGI)',
...
'WAVEX': 'WAVEX (Microsoft)',
'RAW': 'RAW (header-less)',
'MAT5': 'MAT5 (GNU Octave 2.1 / Matlab 5.0)'}
"""
return dict(_available_formats_helper(_snd.SFC_GET_FORMAT_MAJOR_COUNT,
_snd.SFC_GET_FORMAT_MAJOR))
def available_subtypes(format=None):
"""Return a dictionary of available subtypes.
Parameters
----------
format : str
If given, only compatible subtypes are returned.
Examples
--------
>>> import soundfile as sf
>>> sf.available_subtypes('FLAC')
{'PCM_24': 'Signed 24 bit PCM',
'PCM_16': 'Signed 16 bit PCM',
'PCM_S8': 'Signed 8 bit PCM'}
"""
subtypes = _available_formats_helper(_snd.SFC_GET_FORMAT_SUBTYPE_COUNT,
_snd.SFC_GET_FORMAT_SUBTYPE)
return dict((subtype, name) for subtype, name in subtypes
if format is None or check_format(format, subtype))
def check_format(format, subtype=None, endian=None):
"""Check if the combination of format/subtype/endian is valid.
Examples
--------
>>> import soundfile as sf
>>> sf.check_format('WAV', 'PCM_24')
True
>>> sf.check_format('FLAC', 'VORBIS')
False
"""
try:
return bool(_format_int(format, subtype, endian))
except (ValueError, TypeError):
return False
def default_subtype(format):
"""Return the default subtype for a given format.
Examples
--------
>>> import soundfile as sf
>>> sf.default_subtype('WAV')
'PCM_16'
>>> sf.default_subtype('MAT5')
'DOUBLE'
"""
_check_format(format)
return _default_subtypes.get(format.upper())
class SoundFile(object):
"""A sound file.
For more documentation see the __init__() docstring (which is also
used for the online documentation (http://pysoundfile.readthedocs.org/).
"""
def __init__(self, file, mode='r', samplerate=None, channels=None,
subtype=None, endian=None, format=None, closefd=True):
"""Open a sound file.
If a file is opened with `mode` ``'r'`` (the default) or
``'r+'``, no sample rate, channels or file format need to be
given because the information is obtained from the file. An
exception is the ``'RAW'`` data format, which always requires
these data points.
File formats consist of three case-insensitive strings:
* a *major format* which is by default obtained from the
extension of the file name (if known) and which can be
forced with the format argument (e.g. ``format='WAVEX'``).
* a *subtype*, e.g. ``'PCM_24'``. Most major formats have a
default subtype which is used if no subtype is specified.
* an *endian-ness*, which doesn't have to be specified at all in
most cases.
A :class:`SoundFile` object is a *context manager*, which means
if used in a "with" statement, :meth:`.close` is automatically
called when reaching the end of the code block inside the "with"
statement.
Parameters
----------
file : str or int or file-like object
The file to open. This can be a file name, a file
descriptor or a Python file object (or a similar object with
the methods ``read()``/``readinto()``, ``write()``,
``seek()`` and ``tell()``).
mode : {'r', 'r+', 'w', 'w+', 'x', 'x+'}, optional
Open mode. Has to begin with one of these three characters:
``'r'`` for reading, ``'w'`` for writing (truncates `file`)
or ``'x'`` for writing (raises an error if `file` already
exists). Additionally, it may contain ``'+'`` to open
`file` for both reading and writing.
The character ``'b'`` for *binary mode* is implied because
all sound files have to be opened in this mode.
If `file` is a file descriptor or a file-like object,
``'w'`` doesn't truncate and ``'x'`` doesn't raise an error.
samplerate : int
The sample rate of the file. If `mode` contains ``'r'``,
this is obtained from the file (except for ``'RAW'`` files).
channels : int
The number of channels of the file.
If `mode` contains ``'r'``, this is obtained from the file
(except for ``'RAW'`` files).
subtype : str, sometimes optional
The subtype of the sound file. If `mode` contains ``'r'``,
this is obtained from the file (except for ``'RAW'``
files), if not, the default value depends on the selected
`format` (see :func:`default_subtype`).
See :func:`available_subtypes` for all possible subtypes for
a given `format`.
endian : {'FILE', 'LITTLE', 'BIG', 'CPU'}, sometimes optional
The endian-ness of the sound file. If `mode` contains
``'r'``, this is obtained from the file (except for
``'RAW'`` files), if not, the default value is ``'FILE'``,
which is correct in most cases.
format : str, sometimes optional
The major format of the sound file. If `mode` contains
``'r'``, this is obtained from the file (except for
``'RAW'`` files), if not, the default value is determined
from the file extension. See :func:`available_formats` for
all possible values.
closefd : bool, optional
Whether to close the file descriptor on :meth:`.close`. Only
applicable if the `file` argument is a file descriptor.
Examples
--------
>>> from soundfile import SoundFile
Open an existing file for reading:
>>> myfile = SoundFile('existing_file.wav')
>>> # do something with myfile
>>> myfile.close()
Create a new sound file for reading and writing using a with
statement:
>>> with SoundFile('new_file.wav', 'x+', 44100, 2) as myfile:
>>> # do something with myfile
>>> # ...
>>> assert not myfile.closed
>>> # myfile.close() is called automatically at the end
>>> assert myfile.closed
"""
self._name = file
if mode is None:
mode = getattr(file, 'mode', None)
mode_int = _check_mode(mode)
self._mode = mode
self._info = _create_info_struct(file, mode, samplerate, channels,
format, subtype, endian)
self._file = self._open(file, mode_int, closefd)
if set(mode).issuperset('r+') and self.seekable():
# Move write position to 0 (like in Python file objects)
self.seek(0)
_snd.sf_command(self._file, _snd.SFC_SET_CLIPPING, _ffi.NULL,
_snd.SF_TRUE)
name = property(lambda self: self._name)
"""The file name of the sound file."""
mode = property(lambda self: self._mode)
"""The open mode the sound file was opened with."""
samplerate = property(lambda self: self._info.samplerate)
"""The sample rate of the sound file."""
channels = property(lambda self: self._info.channels)
"""The number of channels in the sound file."""
format = property(
lambda self: _format_str(self._info.format & _snd.SF_FORMAT_TYPEMASK))
"""The major format of the sound file."""
subtype = property(
lambda self: _format_str(self._info.format & _snd.SF_FORMAT_SUBMASK))
"""The subtype of data in the the sound file."""
endian = property(
lambda self: _format_str(self._info.format & _snd.SF_FORMAT_ENDMASK))
"""The endian-ness of the data in the sound file."""
format_info = property(
lambda self: _format_info(self._info.format &
_snd.SF_FORMAT_TYPEMASK)[1])
"""A description of the major format of the sound file."""
subtype_info = property(
lambda self: _format_info(self._info.format &
_snd.SF_FORMAT_SUBMASK)[1])
"""A description of the subtype of the sound file."""
sections = property(lambda self: self._info.sections)
"""The number of sections of the sound file."""
closed = property(lambda self: self._file is None)
"""Whether the sound file is closed or not."""
_errorcode = property(lambda self: _snd.sf_error(self._file))
"""A pending sndfile error code."""
@property
def extra_info(self):
"""Retrieve the log string generated when opening the file."""
info = _ffi.new("char[]", 2**14)
_snd.sf_command(self._file, _snd.SFC_GET_LOG_INFO,
info, _ffi.sizeof(info))
return _ffi.string(info).decode('utf-8', 'replace')
# avoid confusion if something goes wrong before assigning self._file:
_file = None
def __repr__(self):
return ("SoundFile({0.name!r}, mode={0.mode!r}, "
"samplerate={0.samplerate}, channels={0.channels}, "
"format={0.format!r}, subtype={0.subtype!r}, "
"endian={0.endian!r})".format(self))
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __setattr__(self, name, value):
"""Write text meta-data in the sound file through properties."""
if name in _str_types:
self._check_if_closed()
err = _snd.sf_set_string(self._file, _str_types[name],
value.encode())
_error_check(err)
else:
object.__setattr__(self, name, value)
def __getattr__(self, name):
"""Read text meta-data in the sound file through properties."""
if name in _str_types:
self._check_if_closed()
data = _snd.sf_get_string(self._file, _str_types[name])
return _ffi.string(data).decode('utf-8', 'replace') if data else ""
else:
raise AttributeError(
"'SoundFile' object has no attribute {0!r}".format(name))
def __len__(self):
return self._info.frames
def seekable(self):
"""Return True if the file supports seeking."""
return self._info.seekable == _snd.SF_TRUE
def seek(self, frames, whence=SEEK_SET):
"""Set the read/write position.
Parameters
----------
frames : int
The frame index or offset to seek.
whence : {SEEK_SET, SEEK_CUR, SEEK_END}, optional
By default (``whence=SEEK_SET``), `frames` are counted from
the beginning of the file.
``whence=SEEK_CUR`` seeks from the current position
(positive and negative values are allowed for `frames`).
``whence=SEEK_END`` seeks from the end (use negative value
for `frames`).
Returns
-------
int
The new absolute read/write position in frames.
Examples
--------
>>> from soundfile import SoundFile, SEEK_END
>>> myfile = SoundFile('stereo_file.wav')
Seek to the beginning of the file:
>>> myfile.seek(0)
0
Seek to the end of the file:
>>> myfile.seek(0, SEEK_END)
44100 # this is the file length
"""
self._check_if_closed()
position = _snd.sf_seek(self._file, frames, whence)
_error_check(self._errorcode)
return position
def tell(self):
"""Return the current read/write position."""
return self.seek(0, SEEK_CUR)
def read(self, frames=-1, dtype='float64', always_2d=False,
fill_value=None, out=None):
"""Read from the file and return data as NumPy array.
Reads the given number of frames in the given data format
starting at the current read/write position. This advances the
read/write position by the same number of frames.
By default, all frames from the current read/write position to
the end of the file are returned.
Use :meth:`.seek` to move the current read/write position.
Parameters
----------
frames : int, optional
The number of frames to read. If ``frames < 0``, the whole
rest of the file is read.
dtype : {'float64', 'float32', 'int32', 'int16'}, optional
Data type of the returned array, by default ``'float64'``.
Floating point audio data is typically in the range from
``-1.0`` to ``1.0``. Integer data is in the range from
``-2**15`` to ``2**15-1`` for ``'int16'`` and from
``-2**31`` to ``2**31-1`` for ``'int32'``.
.. note:: Reading int values from a float file will *not*
scale the data to [-1.0, 1.0). If the file contains
``np.array([42.6], dtype='float32')``, you will read
``np.array([43], dtype='int32')`` for
``dtype='int32'``.
Returns
-------
audiodata : numpy.ndarray or type(out)
A two-dimensional NumPy array is returned, where the
channels are stored along the first dimension, i.e. as
columns. If the sound file has only one channel, a
one-dimensional array is returned. Use ``always_2d=True``
to return a two-dimensional array anyway.
If `out` was specified, it is returned. If `out` has more
frames than available in the file (or if `frames` is
smaller than the length of `out`) and no `fill_value` is
given, then only a part of `out` is overwritten and a view
containing all valid frames is returned. numpy.ndarray or
type(out)
Other Parameters
----------------
always_2d : bool, optional
By default, reading a mono sound file will return a
one-dimensional array. With ``always_2d=True``, audio data
is always returned as a two-dimensional array, even if the
audio file has only one channel.
fill_value : float, optional
If more frames are requested than available in the file,
the rest of the output is be filled with `fill_value`. If
`fill_value` is not specified, a smaller array is
returned.
out : numpy.ndarray or subclass, optional
If `out` is specified, the data is written into the given
array instead of creating a new array. In this case, the
arguments `dtype` and `always_2d` are silently ignored! If
`frames` is not given, it is obtained from the length of
`out`.
Examples
--------
>>> from soundfile import SoundFile
>>> myfile = SoundFile('stereo_file.wav')
Reading 3 frames from a stereo file:
>>> myfile.read(3)
array([[ 0.71329652, 0.06294799],
[-0.26450912, -0.38874483],
[ 0.67398441, -0.11516333]])
>>> myfile.close()
See Also
--------
buffer_read, .write
"""
if out is None:
frames = self._check_frames(frames, fill_value)
out = self._create_empty_array(frames, always_2d, dtype)
else:
if frames < 0 or frames > len(out):
frames = len(out)
frames = self._array_io('read', out, frames)
if len(out) > frames:
if fill_value is None:
out = out[:frames]
else:
out[frames:] = fill_value
return out
def buffer_read(self, frames=-1, ctype=None, dtype=None):
"""Read from the file and return data as buffer object.
Reads the given number of `frames` in the given data format
starting at the current read/write position. This advances the
read/write position by the same number of frames.
By default, all frames from the current read/write position to
the end of the file are returned.
Use :meth:`.seek` to move the current read/write position.
Parameters
----------
frames : int, optional
The number of frames to read. If `frames < 0`, the whole
rest of the file is read.
dtype : {'float64', 'float32', 'int32', 'int16'}
Audio data will be converted to the given data type.
Returns
-------
buffer
A buffer containing the read data.
See Also
--------
buffer_read_into, .read, buffer_write
"""
frames = self._check_frames(frames, fill_value=None)
dtype = self._ctype_is_deprecated(ctype, dtype)
ctype = self._check_dtype(dtype)
cdata = _ffi.new(ctype + '[]', frames * self.channels)
read_frames = self._cdata_io('read', cdata, ctype, frames)
assert read_frames == frames
return _ffi.buffer(cdata)
def buffer_read_into(self, buffer, ctype=None, dtype=None):
"""Read from the file into a given buffer object.
Fills the given `buffer` with frames in the given data format
starting at the current read/write position (which can be
changed with :meth:`.seek`) until the buffer is full or the end
of the file is reached. This advances the read/write position
by the number of frames that were read.
Parameters
----------
buffer : writable buffer
Audio frames from the file are written to this buffer.
dtype : {'float64', 'float32', 'int32', 'int16'}
The data type of `buffer`.
Returns
-------
int
The number of frames that were read from the file.
This can be less than the size of `buffer`.
The rest of the buffer is not filled with meaningful data.
See Also
--------
buffer_read, .read
"""
dtype = self._ctype_is_deprecated(ctype, dtype)
ctype = self._check_dtype(dtype)
cdata, frames = self._check_buffer(buffer, ctype)
frames = self._cdata_io('read', cdata, ctype, frames)
return frames
def write(self, data):
"""Write audio data from a NumPy array to the file.
Writes a number of frames at the read/write position to the
file. This also advances the read/write position by the same
number of frames and enlarges the file if necessary.
Note that writing int values to a float file will *not* scale
the values to [-1.0, 1.0). If you write the value
``np.array([42], dtype='int32')``, to a ``subtype='FLOAT'``
file, the file will then contain ``np.array([42.],
dtype='float32')``.
Parameters
----------
data : array_like
The data to write. Usually two-dimensional (channels x
frames), but one-dimensional `data` can be used for mono
files. Only the data types ``'float64'``, ``'float32'``,
``'int32'`` and ``'int16'`` are supported.
.. note:: The data type of `data` does **not** select the
data type of the written file. Audio data will be
converted to the given `subtype`. Writing int values
to a float file will *not* scale the values to
[-1.0, 1.0). If you write the value ``np.array([42],
dtype='int32')``, to a ``subtype='FLOAT'`` file, the
file will then contain ``np.array([42.],
dtype='float32')``.
Examples
--------
>>> import numpy as np
>>> from soundfile import SoundFile
>>> myfile = SoundFile('stereo_file.wav')
Write 10 frames of random data to a new file:
>>> with SoundFile('stereo_file.wav', 'w', 44100, 2, 'PCM_24') as f:
>>> f.write(np.random.randn(10, 2))
See Also
--------
buffer_write, .read
"""
import numpy as np
# no copy is made if data has already the correct memory layout:
data = np.ascontiguousarray(data)
written = self._array_io('write', data, len(data))
assert written == len(data)
self._update_len(written)
def buffer_write(self, data, ctype=None, dtype=None):
"""Write audio data from a buffer/bytes object to the file.
Writes the contents of `data` to the file at the current
read/write position.
This also advances the read/write position by the number of
frames that were written and enlarges the file if necessary.
Parameters
----------
data : buffer or bytes
A buffer or bytes object containing the audio data to be
written.
dtype : {'float64', 'float32', 'int32', 'int16'}
The data type of the audio data stored in `data`.
See Also
--------
.write, buffer_read
"""
dtype = self._ctype_is_deprecated(ctype, dtype)
ctype = self._check_dtype(dtype)
cdata, frames = self._check_buffer(data, ctype)
written = self._cdata_io('write', cdata, ctype, frames)
assert written == frames
self._update_len(written)
def blocks(self, blocksize=None, overlap=0, frames=-1, dtype='float64',
always_2d=False, fill_value=None, out=None):
"""Return a generator for block-wise reading.
By default, the generator yields blocks of the given
`blocksize` (using a given `overlap`) until the end of the file
is reached; `frames` can be used to stop earlier.
Parameters
----------
blocksize : int
The number of frames to read per block. Either this or `out`
must be given.
overlap : int, optional
The number of frames to rewind between each block.
frames : int, optional
The number of frames to read.
If ``frames < 1``, the file is read until the end.
dtype : {'float64', 'float32', 'int32', 'int16'}, optional
See :meth:`.read`.
Yields
------
numpy.ndarray or type(out)
Blocks of audio data.
If `out` was given, and the requested frames are not an
integer multiple of the length of `out`, and no
`fill_value` was given, the last block will be a smaller
view into `out`.
Other Parameters
----------------
always_2d, fill_value, out
See :meth:`.read`.
Examples
--------
>>> from soundfile import SoundFile
>>> with SoundFile('stereo_file.wav') as f:
>>> for block in f.blocks(blocksize=1024):
>>> pass # do something with 'block'
"""
if 'r' not in self.mode and '+' not in self.mode:
raise RuntimeError("blocks() is not allowed in write-only mode")
if overlap != 0 and not self.seekable():
raise ValueError("overlap is only allowed for seekable files")
if out is None:
if blocksize is None:
raise TypeError("One of {blocksize, out} must be specified")
else:
if blocksize is not None:
raise TypeError(
"Only one of {blocksize, out} may be specified")
blocksize = len(out)
frames = self._check_frames(frames, fill_value)
while frames > 0:
if frames < blocksize:
if fill_value is not None and out is None:
out = self._create_empty_array(blocksize, always_2d, dtype)
blocksize = frames
block = self.read(blocksize, dtype, always_2d, fill_value, out)
frames -= blocksize
if frames > 0 and self.seekable():
self.seek(-overlap, SEEK_CUR)
frames += overlap
yield block
def truncate(self, frames=None):
"""Truncate the file to a given number of frames.
After this command, the read/write position will be at the new
end of the file.
Parameters
----------
frames : int, optional
Only the data before `frames` is kept, the rest is deleted.
If not specified, the current read/write position is used.
"""
if frames is None:
frames = self.tell()
err = _snd.sf_command(self._file, _snd.SFC_FILE_TRUNCATE,
_ffi.new("sf_count_t*", frames),
_ffi.sizeof("sf_count_t"))
if err:
raise RuntimeError("Error truncating the file")
self._info.frames = frames
def flush(self):
"""Write unwritten data to the file system.
Data written with :meth:`.write` is not immediately written to
the file system but buffered in memory to be written at a later
time. Calling :meth:`.flush` makes sure that all changes are
actually written to the file system.
This has no effect on files opened in read-only mode.
"""
self._check_if_closed()
_snd.sf_write_sync(self._file)
def close(self):
"""Close the file. Can be called multiple times."""
if not self.closed:
# be sure to flush data to disk before closing the file
self.flush()
err = _snd.sf_close(self._file)
self._file = None
_error_check(err)
def _open(self, file, mode_int, closefd):
"""Call the appropriate sf_open*() function from libsndfile."""
if isinstance(file, (_unicode, bytes)):
if _os.path.isfile(file):
if 'x' in self.mode:
raise OSError("File exists: {0!r}".format(self.name))
elif set(self.mode).issuperset('w+'):
# truncate the file, because SFM_RDWR doesn't:
_os.close(_os.open(file, _os.O_WRONLY | _os.O_TRUNC))
openfunction = _snd.sf_open
if isinstance(file, _unicode):
if _sys.platform == 'win32':
openfunction = _snd.sf_wchar_open
else:
file = file.encode(_sys.getfilesystemencoding())
file_ptr = openfunction(file, mode_int, self._info)
elif isinstance(file, int):
file_ptr = _snd.sf_open_fd(file, mode_int, self._info, closefd)
elif _has_virtual_io_attrs(file, mode_int):
file_ptr = _snd.sf_open_virtual(self._init_virtual_io(file),
mode_int, self._info, _ffi.NULL)
else:
raise TypeError("Invalid file: {0!r}".format(self.name))
_error_check(_snd.sf_error(file_ptr),
"Error opening {0!r}: ".format(self.name))
if mode_int == _snd.SFM_WRITE:
# Due to a bug in libsndfile version <= 1.0.25, frames != 0
# when opening a named pipe in SFM_WRITE mode.
# See http://github.com/erikd/libsndfile/issues/77.
self._info.frames = 0
# This is not necessary for "normal" files (because
# frames == 0 in this case), but it doesn't hurt, either.
return file_ptr
def _init_virtual_io(self, file):
"""Initialize callback functions for sf_open_virtual()."""
@_ffi.callback("sf_vio_get_filelen")
def vio_get_filelen(user_data):
curr = file.tell()
file.seek(0, SEEK_END)
size = file.tell()
file.seek(curr, SEEK_SET)
return size
@_ffi.callback("sf_vio_seek")
def vio_seek(offset, whence, user_data):
file.seek(offset, whence)
return file.tell()
@_ffi.callback("sf_vio_read")
def vio_read(ptr, count, user_data):
# first try readinto(), if not available fall back to read()
try:
buf = _ffi.buffer(ptr, count)
data_read = file.readinto(buf)
except AttributeError:
data = file.read(count)
data_read = len(data)
buf = _ffi.buffer(ptr, data_read)
buf[0:data_read] = data
return data_read
@_ffi.callback("sf_vio_write")
def vio_write(ptr, count, user_data):
buf = _ffi.buffer(ptr, count)
data = buf[:]
written = file.write(data)
# write() returns None for file objects in Python <= 2.7:
if written is None:
written = count
return written
@_ffi.callback("sf_vio_tell")
def vio_tell(user_data):
return file.tell()
# Note: the callback functions must be kept alive!
self._virtual_io = {'get_filelen': vio_get_filelen,
'seek': vio_seek,
'read': vio_read,
'write': vio_write,
'tell': vio_tell}
return _ffi.new("SF_VIRTUAL_IO*", self._virtual_io)
def _getAttributeNames(self):
"""Return all attributes used in __setattr__ and __getattr__.
This is useful for auto-completion (e.g. IPython).
"""
return _str_types
def _check_if_closed(self):
"""Check if the file is closed and raise an error if it is.
This should be used in every method that uses self._file.
"""
if self.closed:
raise RuntimeError("I/O operation on closed file")
def _check_frames(self, frames, fill_value):
"""Reduce frames to no more than are available in the file."""
if self.seekable():
remaining_frames = len(self) - self.tell()
if frames < 0 or (frames > remaining_frames and
fill_value is None):
frames = remaining_frames
elif frames < 0:
raise ValueError("frames must be specified for non-seekable files")
return frames
def _check_buffer(self, data, ctype):
"""Convert buffer to cdata and check for valid size."""
assert ctype in _ffi_types.values()
if not isinstance(data, bytes):
data = _ffi.from_buffer(data)
frames, remainder = divmod(len(data),
self.channels * _ffi.sizeof(ctype))
if remainder:
raise ValueError("Data size must be a multiple of frame size")
return data, frames
def _create_empty_array(self, frames, always_2d, dtype):
"""Create an empty array with appropriate shape."""
import numpy as np
if always_2d or self.channels > 1:
shape = frames, self.channels
else:
shape = frames,
return np.empty(shape, dtype, order='C')
def _check_dtype(self, dtype):
"""Check if dtype string is valid and return ctype string."""
try:
return _ffi_types[dtype]
except KeyError:
raise ValueError("dtype must be one of {0!r}".format(
sorted(_ffi_types.keys())))
def _ctype_is_deprecated(self, ctype, dtype):
"""Show warning if ctype is used instead of dtype.
At some point, ctype arguments shall be removed and the
corresponding dtype arguments shall lose their default value.
"""
if ctype is not None:
from warnings import warn
warn('ctype is deprecated; use dtype instead', Warning)
if dtype is not None:
raise TypeError('Use dtype instead of ctype')
for k, v in _ffi_types.items():
if v == ctype:
return k
return dtype
def _array_io(self, action, array, frames):
"""Check array and call low-level IO function."""
if (array.ndim not in (1, 2) or
array.ndim == 1 and self.channels != 1 or
array.ndim == 2 and array.shape[1] != self.channels):
raise ValueError("Invalid shape: {0!r}".format(array.shape))
if not array.flags.c_contiguous:
raise ValueError("Data must be C-contiguous")
ctype = self._check_dtype(array.dtype.name)
assert array.dtype.itemsize == _ffi.sizeof(ctype)
cdata = _ffi.cast(ctype + '*', array.__array_interface__['data'][0])
return self._cdata_io(action, cdata, ctype, frames)
def _cdata_io(self, action, data, ctype, frames):
"""Call one of libsndfile's read/write functions."""
assert ctype in _ffi_types.values()
self._check_if_closed()
if self.seekable():
curr = self.tell()
func = getattr(_snd, 'sf_' + action + 'f_' + ctype)
frames = func(self._file, data, frames)
_error_check(self._errorcode)
if self.seekable():
self.seek(curr + frames, SEEK_SET) # Update read & write position
return frames
def _update_len(self, written):
"""Update len(self) after writing."""
if self.seekable():
curr = self.tell()
self._info.frames = self.seek(0, SEEK_END)
self.seek(curr, SEEK_SET)
else:
self._info.frames += written
def _prepare_read(self, start, stop, frames):
"""Seek to start frame and calculate length."""
if start != 0 and not self.seekable():
raise ValueError("start is only allowed for seekable files")
if frames >= 0 and stop is not None:
raise TypeError("Only one of {frames, stop} may be used")
start, stop, _ = slice(start, stop).indices(len(self))
if stop < start:
stop = start
if frames < 0:
frames = stop - start
if self.seekable():
self.seek(start, SEEK_SET)
return frames
def _error_check(err, prefix=""):
"""Pretty-print a numerical error code if there is an error."""
if err != 0:
err_str = _snd.sf_error_number(err)
raise RuntimeError(prefix + _ffi.string(err_str).decode('utf-8', 'replace'))
def _format_int(format, subtype, endian):
"""Return numeric ID for given format|subtype|endian combo."""
result = _check_format(format)
if subtype is None:
subtype = default_subtype(format)
if subtype is None:
raise TypeError(
"No default subtype for major format {0!r}".format(format))
elif not isinstance(subtype, (_unicode, str)):
raise TypeError("Invalid subtype: {0!r}".format(subtype))
try:
result |= _subtypes[subtype.upper()]
except KeyError:
raise ValueError("Unknown subtype: {0!r}".format(subtype))
if endian is None:
endian = 'FILE'
elif not isinstance(endian, (_unicode, str)):
raise TypeError("Invalid endian-ness: {0!r}".format(endian))
try:
result |= _endians[endian.upper()]
except KeyError:
raise ValueError("Unknown endian-ness: {0!r}".format(endian))
info = _ffi.new("SF_INFO*")
info.format = result
info.channels = 1
if _snd.sf_format_check(info) == _snd.SF_FALSE:
raise ValueError(
"Invalid combination of format, subtype and endian")
return result
def _check_mode(mode):
"""Check if mode is valid and return its integer representation."""
if not isinstance(mode, (_unicode, str)):
raise TypeError("Invalid mode: {0!r}".format(mode))
mode_set = set(mode)
if mode_set.difference('xrwb+') or len(mode) > len(mode_set):
raise ValueError("Invalid mode: {0!r}".format(mode))
if len(mode_set.intersection('xrw')) != 1:
raise ValueError("mode must contain exactly one of 'xrw'")
if '+' in mode_set:
mode_int = _snd.SFM_RDWR
elif 'r' in mode_set:
mode_int = _snd.SFM_READ
else:
mode_int = _snd.SFM_WRITE
return mode_int
def _create_info_struct(file, mode, samplerate, channels,
format, subtype, endian):
"""Check arguments and create SF_INFO struct."""
original_format = format
if format is None:
format = _get_format_from_filename(file, mode)
assert isinstance(format, (_unicode, str))
else:
_check_format(format)
info = _ffi.new("SF_INFO*")
if 'r' not in mode or format.upper() == 'RAW':
if samplerate is None:
raise TypeError("samplerate must be specified")
info.samplerate = samplerate
if channels is None:
raise TypeError("channels must be specified")
info.channels = channels
info.format = _format_int(format, subtype, endian)
else:
if any(arg is not None for arg in (
samplerate, channels, original_format, subtype, endian)):
raise TypeError("Not allowed for existing files (except 'RAW'): "
"samplerate, channels, format, subtype, endian")
return info
def _get_format_from_filename(file, mode):
"""Return a format string obtained from file (or file.name).
If file already exists (= read mode), an empty string is returned on
error. If not, an exception is raised.
The return type will always be str or unicode (even if
file/file.name is a bytes object).
"""
format = ''
file = getattr(file, 'name', file)
try:
# This raises an exception if file is not a (Unicode/byte) string:
format = _os.path.splitext(file)[-1][1:]
# Convert bytes to unicode (raises AttributeError on Python 3 str):
format = format.decode('utf-8', 'replace')
except Exception:
pass
if format.upper() not in _formats and 'r' not in mode:
raise TypeError("No format specified and unable to get format from "
"file extension: {0!r}".format(file))
return format
def _format_str(format_int):
"""Return the string representation of a given numeric format."""
for dictionary in _formats, _subtypes, _endians:
for k, v in dictionary.items():
if v == format_int:
return k
else:
return 'n/a'
def _format_info(format_int, format_flag=_snd.SFC_GET_FORMAT_INFO):
"""Return the ID and short description of a given format."""
format_info = _ffi.new("SF_FORMAT_INFO*")
format_info.format = format_int
_snd.sf_command(_ffi.NULL, format_flag, format_info,
_ffi.sizeof("SF_FORMAT_INFO"))
name = format_info.name
return (_format_str(format_info.format),
_ffi.string(name).decode('utf-8', 'replace') if name else "")
def _available_formats_helper(count_flag, format_flag):
"""Helper for available_formats() and available_subtypes()."""
count = _ffi.new("int*")
_snd.sf_command(_ffi.NULL, count_flag, count, _ffi.sizeof("int"))
for format_int in range(count[0]):
yield _format_info(format_int, format_flag)
def _check_format(format_str):
"""Check if `format_str` is valid and return format ID."""
if not isinstance(format_str, (_unicode, str)):
raise TypeError("Invalid format: {0!r}".format(format_str))
try:
format_int = _formats[format_str.upper()]
except KeyError:
raise ValueError("Unknown format: {0!r}".format(format_str))
return format_int
def _has_virtual_io_attrs(file, mode_int):
"""Check if file has all the necessary attributes for virtual IO."""
readonly = mode_int == _snd.SFM_READ
writeonly = mode_int == _snd.SFM_WRITE
return all([
hasattr(file, 'seek'),
hasattr(file, 'tell'),
hasattr(file, 'write') or readonly,
hasattr(file, 'read') or hasattr(file, 'readinto') or writeonly,
])
|
frodo821/RePRo
|
python/modules/soundfile.py
|
Python
|
gpl-3.0
| 60,956
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from functools import partial
from itertools import combinations, chain
import json
import math
import networkx as nx
from rest_framework.decorators import api_view
from typing import Any, DefaultDict, Dict, Iterator, List, Optional, Set, Tuple, Union
from django.db import connection
from django.http import HttpRequest, JsonResponse
from catmaid.models import UserRole
from catmaid.control.authentication import requires_user_role
from catmaid.control.common import get_request_list
from catmaid.control.skeleton import _neuronnames
def _next_circle(skeleton_set:Set, relations, cursor, allowed_connector_ids=None) -> DefaultDict:
""" Return a dictionary of skeleton IDs in the skeleton_set vs a dictionary of connected skeletons vs how many connections."""
cursor.execute(f'''
SELECT tc1.skeleton_id, tc1.relation_id, tc2.skeleton_id
FROM treenode_connector tc1,
treenode_connector tc2
WHERE tc1.skeleton_id = ANY(%(skeleton_ids)s::bigint[])
AND tc1.connector_id = tc2.connector_id
AND tc1.skeleton_id != tc2.skeleton_id
AND tc1.relation_id != tc2.relation_id
AND tc1.relation_id = ANY(%(allowed_relation_ids)s::bigint[])
AND tc2.relation_id = ANY(%(allowed_relation_ids)s::bigint[])
{'AND tc1.connector_id = ANY(%(allowed_c_ids)s::bigint[])' if allowed_connector_ids else ''}
''', {
'skeleton_ids': list(skeleton_set),
'allowed_relation_ids': [relations['presynaptic_to'], relations['postsynaptic_to']],
'allowed_c_ids': allowed_connector_ids,
})
connections:DefaultDict = defaultdict(partial(defaultdict, partial(defaultdict, int)))
for row in cursor.fetchall():
connections[row[0]][row[1]][row[2]] += 1
return connections
def _relations(cursor, project_id:Union[int,str]) -> Dict:
cursor.execute("SELECT relation_name, id FROM relation WHERE project_id = %s AND (relation_name = 'presynaptic_to' OR relation_name = 'postsynaptic_to')" % int(project_id))
return dict(cursor.fetchall())
def _clean_mins(request:HttpRequest, cursor, project_id:Union[int,str]) -> Tuple[Dict, Any]:
min_pre: float = int(request.POST.get('min_pre', -1))
min_post: float = int(request.POST.get('min_post', -1))
if -1 == min_pre and -1 == min_post:
raise Exception("Can't grow: not retrieving any pre or post.")
if -1 == min_pre:
min_pre = float('inf')
if -1 == min_post:
min_post = float('inf')
relations = _relations(cursor, project_id)
mins = {}
mins[relations['presynaptic_to']] = min_post # inverted: all postsynaptic to the set
mins[relations['postsynaptic_to']] = min_pre # inverted: all presynaptic to the set
return mins, relations
@api_view(['POST'])
@requires_user_role(UserRole.Browse)
def circles_of_hell(request:HttpRequest, project_id) -> JsonResponse:
""" Given a set of one or more skeleton IDs, find all skeletons that connect
them (n_circles=1), or that connect to others that connect them (n_circles=2), etc.
Returns a list of unique skeleton IDs that exclude the ones provided as argument.
---
parameters:
- name: skeleton_ids[]
description: IDs of the skeletons to start expanding from.
required: true
type: array
items:
type: integer
paramType: form
- name: n_circles
description: (Optional) The numbers of recursive expansions.
required: false
defaultValue: 1
type: integer
paramType: form
- name: allowed_connector_ids[]
description: (Optional) IDs of connector nodes that are allowed to be used for expansion.
required: false
type: array
items:
type: integer
paramType: form
"""
n_circles = int(request.POST.get('n_circles', 1))
if n_circles < 1:
raise Exception("Requires at least one circle.")
first_circle = set(get_request_list(request.POST, 'skeleton_ids', map_fn=int))
if not first_circle:
raise Exception("No skeletons were provided.")
cursor = connection.cursor()
mins, relations = _clean_mins(request, cursor, int(project_id))
allowed_connector_ids = get_request_list(request.POST, 'allowed_connector_ids', None)
current_circle = first_circle
all_circles = first_circle
while n_circles > 0 and current_circle:
n_circles -= 1
connections = _next_circle(current_circle, relations, cursor, allowed_connector_ids)
next_circle = set(skID for c in connections.values() \
for relationID, cs in c.items() \
for skID, count in cs.items() if count >= mins[relationID])
current_circle = next_circle - all_circles
all_circles = all_circles.union(next_circle)
skeleton_ids = tuple(all_circles - first_circle)
return JsonResponse([skeleton_ids, _neuronnames(skeleton_ids, project_id)], safe=False)
@requires_user_role(UserRole.Browse)
def find_directed_paths(request:HttpRequest, project_id=None) -> JsonResponse:
""" Given a set of two or more skeleton IDs, find directed paths of connected neurons between them, for a maximum inner path length as given (i.e. origin and destination not counted). A directed path means that all edges are of the same kind, e.g. presynaptic_to. """
sources = set(int(v) for k,v in request.POST.items() if k.startswith('sources['))
targets = set(int(v) for k,v in request.POST.items() if k.startswith('targets['))
if len(sources) < 1 or len(targets) < 1:
raise Exception('Need at least 1 skeleton IDs for both sources and targets to find directed paths!')
path_length = int(request.POST.get('path_length', 2))
cursor = connection.cursor()
min:Union[int,float] = int(request.POST.get('min_synapses', -1))
if -1 == min:
min = float('inf')
relations = _relations(cursor, project_id)
def next_level(skids, rel1, rel2):
cursor.execute('''
SELECT tc1.skeleton_id, tc2.skeleton_id
FROM treenode_connector tc1,
treenode_connector tc2
WHERE tc1.skeleton_id in (%s)
AND tc1.connector_id = tc2.connector_id
AND tc1.skeleton_id != tc2.skeleton_id
AND tc1.relation_id = %s
AND tc2.relation_id = %s
GROUP BY tc1.skeleton_id, tc2.skeleton_id
HAVING count(*) >= %s
''' % (','.join(str(skid) for skid in skids),
rel1,
rel2,
min))
return cursor.fetchall()
# bidirectional search
i = 0
middle = path_length / 2
s1 = sources
t1 = targets
graph = nx.DiGraph()
pre = relations['presynaptic_to']
post = relations['postsynaptic_to']
while i <= middle:
if 0 == len(s1):
break
s2 = set()
for pre_skid, post_skid in next_level(s1, pre, post):
graph.add_edge(pre_skid, post_skid)
if post_skid not in s1:
s2.add(post_skid)
s1 = s2
i += 1
if i < middle and len(t1) > 0:
t2 = set()
for post_skid, pre_skid in next_level(t1, post, pre):
graph.add_edge(pre_skid, post_skid)
if pre_skid not in t1:
t2.add(pre_skid)
t1 = t2
# Nodes will not be in the graph if they didn't have further connections,
# like for example will happen for placeholder skeletons e.g. at unmerged postsynaptic sites.
all_paths = []
for source in sources:
if graph.has_node(source):
for target in targets:
if graph.has_node(target):
# The cutoff is the maximum number of hops, not the number of vertices in the path, hence -1:
for path in nx.all_simple_paths(graph, source, target, cutoff=(path_length -1)):
all_paths.append(path)
return JsonResponse(all_paths, safe=False)
@requires_user_role(UserRole.Browse)
def find_directed_path_skeletons(request:HttpRequest, project_id=None) -> JsonResponse:
""" Given a set of two or more skeleton Ids, find directed paths of connected neurons between them, for a maximum inner path length as given (i.e. origin and destination not counted), and return the nodes of those paths, including the provided source and target nodes.
Conceptually identical to find_directed_paths but far more performant. """
origin_skids = set(int(v) for k,v in request.POST.items() if k.startswith('sources['))
target_skids = set(int(v) for k,v in request.POST.items() if k.startswith('targets['))
if len(origin_skids) < 1 or len(target_skids) < 1:
raise Exception('Need at least 1 skeleton IDs for both sources and targets to find directed paths!')
max_n_hops = int(request.POST.get('n_hops', 2))
min_synapses:Union[int,float] = int(request.POST.get('min_synapses', -1))
if -1 == min_synapses:
min_synapses = float('inf')
cursor = connection.cursor()
relations = _relations(cursor, project_id)
def fetch_adjacent(cursor, skids, relation1, relation2, min_synapses) -> Iterator[Any]:
""" Return the list of skids one hop away from the given skids. """
cursor.execute("""
SELECT tc2.skeleton_id
FROM treenode_connector tc1,
treenode_connector tc2
WHERE tc1.project_id = %(project_id)s
AND tc1.skeleton_id = ANY (%(skeleton_ids)s::bigint[])
AND tc1.connector_id = tc2.connector_id
AND tc1.skeleton_id != tc2.skeleton_id
AND tc1.relation_id = %(relation_1)s
AND tc2.relation_id = %(relation_2)s
GROUP BY tc1.skeleton_id, tc2.skeleton_id
HAVING count(*) >= %(min_synapses)s
""", {
'project_id': int(project_id),
'skeleton_ids': [int(skid) for skid in skids],
'relation_1': int(relation1),
'relation_2': int(relation2),
'min_synapses': float(min_synapses),
})
return chain.from_iterable(cursor.fetchall())
pre = relations['presynaptic_to']
post = relations['postsynaptic_to']
def fetch_fronts(cursor, skids, max_n_hops, relation1, relation2, min_synapses) -> List[Set]:
fronts = [set(skids)]
for n_hops in range(1, max_n_hops):
adjacent = set(fetch_adjacent(cursor, fronts[-1], relation1, relation2, min_synapses))
for front in fronts:
adjacent -= front
if len(adjacent) > 0:
fronts.append(adjacent)
else:
break
# Fill in the rest
while len(fronts) < max_n_hops:
fronts.append(set())
return fronts
origin_fronts = fetch_fronts(cursor, origin_skids, max_n_hops, pre, post, min_synapses)
target_fronts = fetch_fronts(cursor, target_skids, max_n_hops, post, pre, min_synapses)
skeleton_ids = origin_fronts[0].union(target_fronts[0])
for i in range(1, max_n_hops):
skeleton_ids = skeleton_ids.union(origin_fronts[i].intersection(target_fronts[max_n_hops -i]))
return JsonResponse(tuple(skeleton_ids), safe=False)
|
tomka/CATMAID
|
django/applications/catmaid/control/circles.py
|
Python
|
gpl-3.0
| 11,322
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class QueryInterSmsIsoInfoRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Dysmsapi', '2017-05-25', 'QueryInterSmsIsoInfo')
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_CountryName(self):
return self.get_query_params().get('CountryName')
def set_CountryName(self,CountryName):
self.add_query_param('CountryName',CountryName)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
|
stormtrader/gw_trade
|
dysms_python/build/lib/aliyunsdkdysmsapi/request/v20170525/QueryInterSmsIsoInfoRequest.py
|
Python
|
gpl-3.0
| 1,725
|
#!/usr/bin/env python3
#
# Copyright 2012-2013 BrewPi/Elco Jacobs.
# Copyright 2015 Andrew Errington
#
# This file is part of BrewPi.
#
# BrewPi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BrewPi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BrewPi. If not, see <http://www.gnu.org/licenses/>.
#
import displayLCD as display
from constants import *
# Not sure where to put this at the moment. It's a utility
# to get our IP address.
# http://stackoverflow.com/a/24196955
import socket
import fcntl
import struct
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(), 0x8915, # SIOCGIFADDR
struct.pack(b'256s', bytes(ifname[:15], 'utf-8'))
)[20:24])
def init():
# turn on the buzzer for a short time
# initialise the hardware
# initialise the rotary encoder
pass
def showStartupPage(port_name):
LCD.clear()
LCD.println("Fuscus Controller")
LCD.println(" version 0.1.0")
LCD.println(port_name)
try:
eth0_addr = get_ip_address('eth0')
except:
eth0_addr = ''
try:
wlan0_addr = get_ip_address('wlan0')
except:
wlan0_addr = ''
if wlan0_addr != '':
LCD.println(wlan0_addr)
elif eth0_addr != '':
LCD.println(eth0_addr)
else:
LCD.println('No network')
LCD.backlight(BACKLIGHT_BRIGHT_LEVEL)
LCD.update()
# Return value is how long to leave startup page (in seconds)
return 5
def showControllerPage():
LCD.clear()
display.printStationaryText()
display.printState()
LCD.update()
def update():
# update the lcd for the chamber being displayed
display.printState()
display.printAllTemperatures()
display.printMode()
# display.updateBacklight();
LCD.update()
def ticks():
# Do UI housekeeping
if (encoder.pushed):
# rotaryEncoder.resetPushed();
while encoder.pushed: # Wait for button to be released
pass
menu.pickSettingToChange()
|
andrewerrington/fuscus
|
fuscus/ui.py
|
Python
|
gpl-3.0
| 2,521
|
"""
Test dedup subcommand
"""
import filecmp
import logging
import sys
from os import path
from bioy_pkg import main
from __init__ import TestBase, TestCaseSuppressOutput, datadir as datadir
log = logging.getLogger(__name__)
class TestDedup(TestBase, TestCaseSuppressOutput):
def main(self, arguments):
main(['dedup'] + arguments)
log_info = 'bioy dedup {}'
datadir = path.join(datadir, 'dedup')
fa_in = path.join(datadir, 'seqs.fasta.bz2')
split_info = path.join(datadir, 'split_info.csv.bz2')
def test01(self):
"""
Test basic usage with no split_info
"""
datadir = self.datadir
outdir = self.mkoutdir()
this_test = sys._getframe().f_code.co_name
dedup_out = path.join(outdir, 'dedup.fasta.bz2')
reference = path.join(datadir, this_test, 'dedup.fasta.bz2')
args = ['--out', dedup_out, self.fa_in]
log.info(self.log_info.format(' '.join(map(str, args))))
self.main(args)
self.assertTrue(filecmp.cmp(reference, dedup_out))
def test02(self):
"""
Test primary and secondary group species and tax_id
"""
datadir = self.datadir
outdir = self.mkoutdir()
this_test = sys._getframe().f_code.co_name
dedup_out = path.join(outdir, 'dedup.fasta.bz2')
reference = path.join(datadir, this_test, 'dedup.fasta.bz2')
args = ['--primary-group', 'species', '--secondary-group', 'tax_id',
'--split-info', self.split_info, '--out', dedup_out, self.fa_in]
log.info(self.log_info.format(' '.join(map(str, args))))
self.main(args)
self.assertTrue(filecmp.cmp(reference, dedup_out))
def test03(self):
"""
Test primary and secondary group phylum and tax_id
"""
datadir = self.datadir
outdir = self.mkoutdir()
this_test = sys._getframe().f_code.co_name
dedup_out = path.join(outdir, 'dedup.fasta.bz2')
reference = path.join(datadir, this_test, 'dedup.fasta.bz2')
args = ['--primary-group', 'phylum', '--secondary-group', 'tax_id',
'--split-info', self.split_info, '--out', dedup_out, self.fa_in]
log.info(self.log_info.format(' '.join(map(str, args))))
self.main(args)
self.assertTrue(filecmp.cmp(reference, dedup_out))
def test04(self):
"""
Test weights out with primary and secondary group species and tax_id
"""
datadir = self.datadir
outdir = self.mkoutdir()
this_test = sys._getframe().f_code.co_name
dedup_out = path.join(outdir, 'dedup.fasta.bz2')
weights_out = path.join(outdir, 'weights.fasta.bz2')
dedup_ref = path.join(datadir, this_test, 'dedup.fasta.bz2')
weights_ref = path.join(datadir, this_test, 'weights.fasta.bz2')
args = ['--primary-group', 'species', '--secondary-group', 'tax_id',
'--out-weights', weights_out,
'--split-info', self.split_info, '--out', dedup_out,
self.fa_in]
log.info(self.log_info.format(' '.join(map(str, args))))
self.main(args)
self.assertTrue(filecmp.cmp(dedup_ref, dedup_out))
self.assertTrue(filecmp.cmp(weights_ref, weights_out))
def test05(self):
"""
Test weights out with no split-info and primary and secondary group species and tax_id
"""
datadir = self.datadir
outdir = self.mkoutdir()
this_test = sys._getframe().f_code.co_name
dedup_out = path.join(outdir, 'dedup.fasta.bz2')
weights_out = path.join(outdir, 'weights.fasta.bz2')
dedup_ref = path.join(datadir, this_test, 'dedup.fasta.bz2')
weights_ref = path.join(datadir, this_test, 'weights.fasta.bz2')
args = ['--primary-group', 'species', '--secondary-group', 'tax_id',
'--out-weights', weights_out, '--out', dedup_out, self.fa_in]
log.info(self.log_info.format(' '.join(map(str, args))))
self.main(args)
self.assertTrue(filecmp.cmp(dedup_ref, dedup_out))
self.assertTrue(filecmp.cmp(weights_ref, weights_out))
def test06(self):
"""
Test weights out with no split-info and primary and secondary group species and tax_id
"""
datadir = self.datadir
outdir = self.mkoutdir()
this_test = sys._getframe().f_code.co_name
dedup_out = path.join(outdir, 'dedup.fasta.bz2')
map_out = path.join(outdir, 'map.fasta.bz2')
dedup_ref = path.join(datadir, this_test, 'dedup.fasta.bz2')
map_ref = path.join(datadir, this_test, 'map.fasta.bz2')
args = ['--primary-group', 'species', '--secondary-group', 'tax_id',
'--out-map', map_out, '--out', dedup_out, self.fa_in]
log.info(self.log_info.format(' '.join(map(str, args))))
self.main(args)
self.assertTrue(filecmp.cmp(dedup_ref, dedup_out))
self.assertTrue(filecmp.cmp(map_ref, map_out))
def test07(self):
"""
Test everything together with primary and secondary group species and tax_id
"""
datadir = self.datadir
outdir = self.mkoutdir()
this_test = sys._getframe().f_code.co_name
dedup_out = path.join(outdir, 'dedup.fasta.bz2')
map_out = path.join(outdir, 'map.fasta.bz2')
weights_out = path.join(outdir, 'weights.fasta.bz2')
dedup_ref = path.join(datadir, this_test, 'dedup.fasta.bz2')
map_ref = path.join(datadir, this_test, 'map.fasta.bz2')
weights_ref = path.join(datadir, this_test, 'weights.fasta.bz2')
args = ['--primary-group', 'species', '--secondary-group', 'tax_id',
'--split-info', self.split_info,
'--out-weights', weights_out,
'--out-map', map_out,
'--out', dedup_out, self.fa_in]
log.info(self.log_info.format(' '.join(map(str, args))))
self.main(args)
self.assertTrue(filecmp.cmp(dedup_ref, dedup_out))
self.assertTrue(filecmp.cmp(map_ref, map_out))
self.assertTrue(filecmp.cmp(weights_ref, weights_out))
|
nhoffman/bioy
|
tests/test_dedup.py
|
Python
|
gpl-3.0
| 6,245
|
# -*- coding: utf-8 -*-
import re
from module.plugins.Crypter import Crypter
from module.common.json_layer import json_loads
from time import time
class MultiuploadCom(Crypter):
__name__ = "MultiuploadCom"
__type__ = "crypter"
__pattern__ = r"http://(?:www\.)?multiupload.com/(\w+)"
__version__ = "0.01"
__description__ = """MultiUpload.com crypter"""
__config__ = [("preferedHoster", "str", "Prefered hoster list (bar-separated) ", "multiupload"),
("ignoredHoster", "str", "Ignored hoster list (bar-separated) ", "")]
__author_name__ = ("zoidberg")
__author_mail__ = ("zoidberg@mujmail.cz")
ML_LINK_PATTERN = r'<div id="downloadbutton_" style=""><a href="([^"]+)"'
def decrypt(self, pyfile):
self.html = self.load(pyfile.url)
found = re.search(self.ML_LINK_PATTERN, self.html)
ml_url = found.group(1) if found else None
json_list = json_loads(self.load("http://multiupload.com/progress/", get = {
"d": re.search(self.__pattern__, pyfile.url).group(1),
"r": str(int(time()*1000))
}))
new_links = []
prefered_set = map(lambda s: s.lower().split('.')[0], set(self.getConfig("preferedHoster").split('|')))
if ml_url and 'multiupload' in prefered_set: new_links.append(ml_url)
for link in json_list:
if link['service'].lower() in prefered_set and int(link['status']) and not int(link['deleted']):
url = self.getLocation(link['url'])
if url: new_links.append(url)
if not new_links:
ignored_set = map(lambda s: s.lower().split('.')[0], set(self.getConfig("ignoredHoster").split('|')))
if 'multiupload' not in ignored_set: new_links.append(ml_url)
for link in json_list:
if link['service'].lower() not in ignored_set and int(link['status']) and not int(link['deleted']):
url = self.getLocation(link['url'])
if url: new_links.append(url)
if new_links:
self.core.files.addLinks(new_links, self.pyfile.package().id)
else:
self.fail('Could not extract any links')
def getLocation(self, url):
header = self.load(url, just_header = True)
return header['location'] if "location" in header else None
|
fener06/pyload
|
module/plugins/crypter/MultiuploadCom.py
|
Python
|
gpl-3.0
| 2,521
|
import os
import subprocess
import platform
from .sourcefile import SourceFile
class Git(object):
def __init__(self, repo_root, url_base):
self.root = os.path.abspath(repo_root)
self.git = Git.get_func(repo_root)
self.url_base = url_base
@staticmethod
def get_func(repo_path):
def git(cmd, *args):
full_cmd = ["git", cmd] + list(args)
try:
return subprocess.check_output(full_cmd, cwd=repo_path, stderr=subprocess.STDOUT)
except Exception as e:
if platform.uname()[0] == "Windows" and isinstance(e, WindowsError):
full_cmd[0] = "git.bat"
return subprocess.check_output(full_cmd, cwd=repo_path, stderr=subprocess.STDOUT)
else:
raise
return git
@classmethod
def for_path(cls, path, url_base):
git = Git.get_func(path)
try:
return cls(git("rev-parse", "--show-toplevel").rstrip(), url_base)
except subprocess.CalledProcessError:
return None
def _local_changes(self):
changes = {}
cmd = ["status", "-z", "--ignore-submodules=all"]
data = self.git(*cmd)
if data == "":
return changes
rename_data = None
for entry in data.split("\0")[:-1]:
if rename_data is not None:
status, rel_path = entry.split(" ")
if status[0] == "R":
rename_data = (rel_path, status)
else:
changes[rel_path] = (status, None)
else:
rel_path = entry
changes[rel_path] = rename_data
rename_data = None
return changes
def _show_file(self, path):
path = os.path.relpath(os.path.abspath(path), self.root)
return self.git("show", "HEAD:%s" % path)
def __iter__(self):
cmd = ["ls-tree", "-r", "-z", "HEAD"]
local_changes = self._local_changes()
for result in self.git(*cmd).split("\0")[:-1]:
rel_path = result.split("\t")[-1]
hash = result.split()[2]
if not os.path.isdir(os.path.join(self.root, rel_path)):
if rel_path in local_changes:
contents = self._show_file(rel_path)
else:
contents = None
yield SourceFile(self.root,
rel_path,
self.url_base,
hash,
contents=contents)
class FileSystem(object):
def __init__(self, root, url_base):
self.root = root
self.url_base = url_base
from gitignore import gitignore
self.path_filter = gitignore.PathFilter(self.root, extras=[".git/"])
def __iter__(self):
paths = self.get_paths()
for path in paths:
yield SourceFile(self.root, path, self.url_base)
def get_paths(self):
for dirpath, dirnames, filenames in os.walk(self.root):
for filename in filenames:
path = os.path.relpath(os.path.join(dirpath, filename), self.root)
if self.path_filter(path):
yield path
dirnames[:] = [item for item in dirnames if self.path_filter(
os.path.relpath(os.path.join(dirpath, item), self.root) + "/")]
|
sadmansk/servo
|
tests/wpt/web-platform-tests/tools/manifest/vcs.py
|
Python
|
mpl-2.0
| 3,482
|
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from nose.tools import assert_equal, assert_true
from selenic.util import Result, Condition
step_matcher("re")
@when(ur"(?P<step>the user opens the typeahead popup"
ur"(?P<where> at the end of the title text)?)")
@given("(?P<step>that a typeahead popup is open)")
def step_impl(context, step, where=None):
driver = context.driver
util = context.util
if step.startswith("that a typeahead"):
element, ph = driver.execute_script("""
var ph = document.querySelector(".p>._placeholder");
return [ph.parentNode, ph];
""")
ActionChains(driver) \
.click(ph) \
.perform()
# ph.click()
elif where:
element = driver.execute_script("""
var title = document.querySelector("._real.title");
var text;
var child = title.firstChild;
while (child && !text) {
if (child.nodeType === Node.TEXT_NODE)
text = child;
child = child.nextSibling;
}
wed_editor.caretManager.setCaret(text, text.length);
return title;
""")
else:
element = None
util.ctrl_equivalent_x("/")
context.context_menu_for = element
context.execute_steps(u"""
When the user clicks the choice named "Test typeahead"
""")
def check(driver):
ret = driver.execute_script("""
var input = document.querySelector(
'.wed-typeahead-popup input.tt-input');
if (!input)
return [false, "cannot find input element"];
return [document.activeElement === input, "input not focused"];
""")
return Result(ret[0], ret[1])
result = Condition(util, check).wait()
assert_true(result, result.payload)
@then("the typeahead popup is not visible")
def step_impl(context):
util = context.util
util.wait(
lambda driver:
len(driver.find_elements_by_class_name("wed-typeahead-popup")) == 0)
@then("the typeahead popup's action (?P<is_>is|is not) performed")
def step_impl(context, is_):
util = context.util
element = context.context_menu_for
expected = "Test 0" if is_ == "is" else ""
assert_equal(util.get_text_excluding_children(element), expected)
@when(ur"the user clicks the first typeahead choice")
def step_impl(context):
util = context.util
element = util.find_element(
(By.CSS_SELECTOR, ".wed-typeahead-popup .tt-menu .tt-suggestion"))
element.click()
@when(ur"the user clicks outside the typeahead")
def step_impl(context):
util = context.util
title = util.find_element((By.CLASS_NAME, "title"))
ActionChains(context.driver) \
.click(title) \
.perform()
@then(ur"the typeahead popup's choice list has a vertical scrollbar")
def step_impl(context):
def check(driver):
return driver.execute_script("""
var menu = document.getElementsByClassName("tt-menu")[0];
var menu_style = window.getComputedStyle(menu);
var left_border =
Number(menu_style.getPropertyValue("border-left-width")
.replace("px", ""));
var right_border =
Number(menu_style.getPropertyValue("border-right-width")
.replace("px", ""));
return menu.clientWidth <
menu.offsetWidth - left_border - right_border;
""")
context.util.wait(check)
@then(ur"the typeahead popup is visible and completely inside the window")
def step_impl(context):
util = context.util
popup = util.find_element((By.CLASS_NAME, "wed-typeahead-popup"))
assert_true(util.completely_visible_to_user(popup),
"menu is completely visible")
@then(ur'the typeahead popup overflows the editor pane')
def step_impl(context):
driver = context.driver
assert_true(driver.execute_script("""
var dropdown = document.querySelector(".wed-typeahead-popup .tt-menu");
var scroller = document.getElementsByClassName("wed-scroller")[0];
var rect = dropdown.getBoundingClientRect();
var scroller_rect = scroller.getBoundingClientRect();
return rect.bottom > scroller_rect.bottom;
"""), "the typeahead should overflow the editor pane")
@when(ur"the user clicks the last visible completion")
def step_impl(context):
driver = context.driver
x, y = driver.execute_script("""
var dropdown = document.querySelector(".wed-typeahead-popup .tt-menu");
var rect = dropdown.getBoundingClientRect();
return [rect.left + rect.width / 2, rect.bottom - 5];
""")
ActionChains(driver) \
.move_to_element_with_offset(context.origin_object, x, y) \
.click() \
.perform()
@then(ur"dump caret position")
def step_impl(context):
driver = context.driver
print(driver.execute_script("""
var caret = wed_editor.caretManager.caret;
if (!caret)
return "no caret!"
return [caret.node.innerHTML, caret.offset];
"""))
print("")
@then(ur"the typeahead popup shows suggestions")
def step_impl(context):
util = context.util
assert_true(len(util.find_elements(
(By.CSS_SELECTOR, ".wed-typeahead-popup .tt-menu .tt-suggestion"))))
|
mangalam-research/wed
|
selenium_test/steps/typeahead_popup.py
|
Python
|
mpl-2.0
| 5,266
|
import os
import re
from pkg_resources import get_distribution
import mardor
def test_version_in_setuppy():
dist = get_distribution('mar')
assert mardor.version_str == dist.version
assert ".".join(str(_) for _ in mardor.version) == dist.version
def test_version_in_changelog():
dist = get_distribution('mar')
here = os.path.abspath(os.path.dirname(__file__))
changelog_path = os.path.join(here, '..', 'CHANGELOG.rst')
changelog = open(changelog_path).read()
assert re.search('^{}'.format(re.escape(mardor.version_str)), changelog,
re.M)
|
catlee/build-mar
|
tests/test_version.py
|
Python
|
mpl-2.0
| 596
|
from cStringIO import StringIO
from datetime import date, datetime, timedelta
import json
import time
from xml.sax.saxutils import escape
import mock
from nose.tools import eq_, ok_
from nose.plugins.attrib import attr
from django.conf import settings
from django.core.exceptions import ValidationError
from django.test.utils import override_settings
from constance import config
from waffle.models import Switch
from kuma.core.exceptions import ProgrammingError
from kuma.core.tests import override_constance_settings, KumaTestCase
from kuma.users.tests import UserTestCase, user
from . import (document, revision, doc_rev, normalize_html,
create_template_test_users, create_topical_parents_docs)
from .. import tasks
from ..constants import REDIRECT_CONTENT
from ..exceptions import (PageMoveError,
DocumentRenderedContentNotAvailable,
DocumentRenderingInProgress)
from ..jobs import DocumentZoneStackJob
from ..models import (Document, Revision, RevisionIP, DocumentZone,
TaggedDocument)
def _objects_eq(manager, list_):
"""Assert that the objects contained by `manager` are those in `list_`."""
eq_(set(manager.all()), set(list_))
def redirect_rev(title, redirect_to):
return revision(
document=document(title=title, save=True),
content='REDIRECT [[%s]]' % redirect_to,
is_approved=True,
save=True)
class DocumentTests(UserTestCase):
"""Tests for the Document model"""
@attr('bug875349')
def test_json_data(self):
# Set up a doc with tags
doc, rev = doc_rev('Sample document')
doc.save()
expected_tags = sorted(['foo', 'bar', 'baz'])
expected_review_tags = sorted(['tech', 'editorial'])
doc.tags.set(*expected_tags)
doc.current_revision.review_tags.set(*expected_review_tags)
# Create a translation with some tags
de_doc = document(parent=doc, locale='de', save=True)
revision(document=de_doc, save=True)
expected_l10n_tags = ['inprogress']
de_doc.current_revision.localization_tags.set(*expected_l10n_tags)
de_doc.tags.set(*expected_tags)
de_doc.current_revision.review_tags.set(*expected_review_tags)
# Ensure the doc's json field is empty at first
eq_(None, doc.json)
# Get JSON data for the doc, and ensure the doc's json field is now
# properly populated.
data = doc.get_json_data()
eq_(json.dumps(data), doc.json)
# Load up another copy of the doc from the DB, and check json
saved_doc = Document.objects.get(pk=doc.pk)
eq_(json.dumps(data), saved_doc.json)
# Check the fields stored in JSON of the English doc
# (the fields are created in build_json_data in models.py)
eq_(doc.title, data['title'])
eq_(doc.title, data['label'])
eq_(doc.get_absolute_url(), data['url'])
eq_(doc.id, data['id'])
eq_(doc.slug, data['slug'])
result_tags = sorted([str(x) for x in data['tags']])
eq_(expected_tags, result_tags)
result_review_tags = sorted([str(x) for x in data['review_tags']])
eq_(expected_review_tags, result_review_tags)
eq_(doc.locale, data['locale'])
eq_(doc.current_revision.summary, data['summary'])
eq_(doc.modified.isoformat(), data['modified'])
eq_(doc.current_revision.created.isoformat(), data['last_edit'])
# Check fields of translated doc
ok_('translations' in data)
eq_(de_doc.locale, data['translations'][0]['locale'])
result_l10n_tags = sorted([str(x) for x
in data['translations'][0]['localization_tags']])
eq_(expected_l10n_tags, result_l10n_tags)
result_tags = sorted([str(x) for x in data['translations'][0]['tags']])
eq_(expected_tags, result_tags)
result_review_tags = sorted([str(x) for x
in data['translations'][0]['review_tags']])
eq_(expected_review_tags, result_review_tags)
eq_(de_doc.current_revision.summary, data['translations'][0]['summary'])
eq_(de_doc.title, data['translations'][0]['title'])
def test_document_is_template(self):
"""is_template stays in sync with the title"""
d = document(title='test')
d.save()
assert not d.is_template
d.slug = 'Template:test'
d.save()
assert d.is_template
d.slug = 'Back-to-document'
d.save()
assert not d.is_template
def test_error_on_delete(self):
"""Ensure error-on-delete is only thrown when waffle switch active"""
switch = Switch.objects.create(name='wiki_error_on_delete')
for active in (True, False):
switch.active = active
switch.save()
d = document()
d.save()
try:
d.delete()
if active:
self.fail('Exception on delete when active')
except Exception:
if not active:
self.fail('No exception on delete when not active')
def test_delete_tagged_document(self):
"""Make sure deleting a tagged doc deletes its tag relationships."""
# TODO: Move to wherever the tests for TaggableMixin are.
# This works because Django's delete() sees the `tags` many-to-many
# field (actually a manager) and follows the reference.
d = document()
d.save()
d.tags.add('grape')
eq_(1, TaggedDocument.objects.count())
d.delete()
eq_(0, TaggedDocument.objects.count())
def _test_m2m_inheritance(self, enum_class, attr, direct_attr):
"""Test a descriptor's handling of parent delegation."""
parent = document()
child = document(parent=parent, title='Some Other Title')
e1 = enum_class(item_id=1)
parent.save()
# Make sure child sees stuff set on parent:
getattr(parent, attr).add(e1)
_objects_eq(getattr(child, attr), [e1])
# Make sure parent sees stuff set on child:
child.save()
e2 = enum_class(item_id=2)
getattr(child, attr).add(e2)
_objects_eq(getattr(parent, attr), [e1, e2])
# Assert the data are attached to the parent, not the child:
_objects_eq(getattr(parent, direct_attr), [e1, e2])
_objects_eq(getattr(child, direct_attr), [])
def test_category_inheritance(self):
"""A document's categories must always be those of its parent."""
some_category = Document.CATEGORIES[1][0]
other_category = Document.CATEGORIES[0][0]
# Notice if somebody ever changes the default on the category field,
# which would invalidate our test:
assert some_category != document().category
parent = document(category=some_category)
parent.save()
child = document(parent=parent, locale='de')
child.save()
# Make sure child sees stuff set on parent:
eq_(some_category, child.category)
# Child'd category should revert to parent's on save:
child.category = other_category
child.save()
eq_(some_category, child.category)
# Changing the parent category should change the child's:
parent.category = other_category
parent.save()
eq_(other_category,
parent.translations.get(locale=child.locale).category)
def _test_int_sets_and_descriptors(self, enum_class, attr):
"""Test our lightweight int sets & descriptors' getting and setting."""
d = document()
d.save()
_objects_eq(getattr(d, attr), [])
i1 = enum_class(item_id=1)
getattr(d, attr).add(i1)
_objects_eq(getattr(d, attr), [i1])
i2 = enum_class(item_id=2)
getattr(d, attr).add(i2)
_objects_eq(getattr(d, attr), [i1, i2])
def test_only_localizable_allowed_children(self):
"""You can't have children for a non-localizable document."""
# Make English rev:
en_doc = document(is_localizable=False)
en_doc.save()
# Make Deutsch translation:
de_doc = document(parent=en_doc, locale='de')
self.assertRaises(ValidationError, de_doc.save)
def test_cannot_make_non_localizable_if_children(self):
"""You can't make a document non-localizable if it has children."""
# Make English rev:
en_doc = document(is_localizable=True)
en_doc.save()
# Make Deutsch translation:
de_doc = document(parent=en_doc, locale='de')
de_doc.save()
en_doc.is_localizable = False
self.assertRaises(ValidationError, en_doc.save)
def test_non_english_implies_nonlocalizable(self):
d = document(is_localizable=True, locale='de')
d.save()
assert not d.is_localizable
def test_validate_category_on_save(self):
"""Make sure invalid categories can't be saved.
Invalid categories cause errors when viewing documents.
"""
d = document(category=9999)
self.assertRaises(ValidationError, d.save)
def test_new_doc_does_not_update_categories(self):
"""Make sure that creating a new document doesn't change the
category of all the other documents."""
d1 = document(category=10)
d1.save()
assert d1.pk
d2 = document(category=00)
assert not d2.pk
d2._clean_category()
d1prime = Document.objects.get(pk=d1.pk)
eq_(10, d1prime.category)
@attr('doc_translations')
def test_other_translations(self):
"""
parent doc should list all docs for which it is parent
A child doc should list all its parent's docs, excluding itself, and
including its parent
"""
parent = document(locale=settings.WIKI_DEFAULT_LANGUAGE, title='test',
save=True)
enfant = document(locale='fr', title='le test', parent=parent,
save=True)
bambino = document(locale='es', title='el test', parent=parent,
save=True)
children = Document.objects.filter(parent=parent).order_by('locale').values_list('pk', flat=True)
eq_(list(children),
list(parent.other_translations.values_list('pk', flat=True)))
enfant_translation_pks = enfant.other_translations.values_list('pk', flat=True)
ok_(parent.pk in enfant_translation_pks)
ok_(bambino.pk in enfant_translation_pks)
eq_(False, enfant.pk in enfant_translation_pks)
def test_topical_parents(self):
d1, d2 = create_topical_parents_docs()
ok_(d2.parents == [d1])
d3 = document(title='Smell accessibility')
d3.parent_topic = d2
d3.save()
ok_(d3.parents == [d1, d2])
@attr('redirect')
def test_redirect_url_allows_site_url(self):
href = "%s/en-US/Mozilla" % settings.SITE_URL
title = "Mozilla"
html = REDIRECT_CONTENT % {'href': href, 'title': title}
d = document(is_redirect=True, html=html)
eq_(href, d.redirect_url())
@attr('redirect')
def test_redirect_url_allows_domain_relative_url(self):
href = "/en-US/Mozilla"
title = "Mozilla"
html = REDIRECT_CONTENT % {'href': href, 'title': title}
d = document(is_redirect=True, html=html)
eq_(href, d.redirect_url())
@attr('redirect')
def test_redirect_url_rejects_protocol_relative_url(self):
href = "//evilsite.com"
title = "Mozilla"
html = REDIRECT_CONTENT % {'href': href, 'title': title}
d = document(is_redirect=True, html=html)
eq_(None, d.redirect_url())
@attr('bug1082034')
@attr('redirect')
def test_redirect_url_works_for_home_path(self):
href = "/"
title = "Mozilla"
html = REDIRECT_CONTENT % {'href': href, 'title': title}
d = document(is_redirect=True, html=html)
eq_(href, d.redirect_url())
class PermissionTests(KumaTestCase):
def setUp(self):
"""Set up the permissions, groups, and users needed for the tests"""
super(PermissionTests, self).setUp()
(self.perms, self.groups, self.users, self.superuser) = (
create_template_test_users())
def test_template_permissions(self):
msg = ('should not', 'should')
for is_add in (True, False):
slug_trials = (
('test_for_%s', (
(True, self.superuser),
(True, self.users['none']),
(True, self.users['all']),
(True, self.users['add']),
(True, self.users['change']),
)),
('Template:test_for_%s', (
(True, self.superuser),
(False, self.users['none']),
(True, self.users['all']),
(is_add, self.users['add']),
(not is_add, self.users['change']),
))
)
for slug_tmpl, trials in slug_trials:
for expected, trial_user in trials:
slug = slug_tmpl % trial_user.username
if is_add:
eq_(expected,
Document.objects.allows_add_by(trial_user, slug),
'User %s %s able to create %s' % (
trial_user, msg[expected], slug))
else:
doc = document(slug=slug, title=slug)
eq_(expected,
doc.allows_revision_by(trial_user),
'User %s %s able to revise %s' % (
trial_user, msg[expected], slug))
eq_(expected,
doc.allows_editing_by(trial_user),
'User %s %s able to edit %s' % (
trial_user, msg[expected], slug))
class DocumentTestsWithFixture(UserTestCase):
"""Document tests which need the users fixture"""
def test_redirect_document_non_redirect(self):
"""Assert redirect_document on non-redirects returns None."""
eq_(None, document().redirect_document())
def test_redirect_document_external_redirect(self):
"""Assert redirects to external pages return None."""
eq_(None, revision(content='REDIRECT [http://example.com]',
is_approved=True,
save=True).document.redirect_document())
def test_redirect_document_nonexistent(self):
"""Assert redirects to non-existent pages return None."""
eq_(None, revision(content='REDIRECT [[kersmoo]]',
is_approved=True,
save=True).document.redirect_document())
def test_default_topic_parents_for_translation(self):
"""A translated document with no topic parent should by default use
the translation of its translation parent's topic parent."""
orig_pt = document(locale=settings.WIKI_DEFAULT_LANGUAGE,
title='test section',
save=True)
orig = document(locale=settings.WIKI_DEFAULT_LANGUAGE, title='test',
parent_topic=orig_pt, save=True)
trans_pt = document(locale='fr', title='le test section',
parent=orig_pt, save=True)
trans = document(locale='fr', title='le test',
parent=orig, save=True)
ok_(trans.parent_topic)
eq_(trans.parent_topic.pk, trans_pt.pk)
def test_default_topic_with_stub_creation(self):
orig_pt = document(locale=settings.WIKI_DEFAULT_LANGUAGE,
title='test section',
save=True)
orig = document(locale=settings.WIKI_DEFAULT_LANGUAGE, title='test',
parent_topic=orig_pt, save=True)
trans = document(locale='fr', title='le test',
parent=orig, save=True)
# There should be a translation topic parent
trans_pt = trans.parent_topic
ok_(trans_pt)
# The locale of the topic parent should match the new translation
eq_(trans.locale, trans_pt.locale)
# But, the translation's topic parent must *not* be the translation
# parent's topic parent
ok_(trans_pt.pk != orig_pt.pk)
# Still, since the topic parent is an autocreated stub, it shares its
# title with the original.
eq_(trans_pt.title, orig_pt.title)
# Oh, and it should point to the original parent topic as its
# translation parent
eq_(trans_pt.parent, orig_pt)
def test_default_topic_with_path_gaps(self):
# Build a path of docs in en-US
orig_path = ('MDN', 'web', 'CSS', 'properties', 'banana', 'leaf')
docs, doc = [], None
for title in orig_path:
doc = document(locale=settings.WIKI_DEFAULT_LANGUAGE, title=title,
parent_topic=doc, save=True)
revision(document=doc, title=title, save=True)
docs.append(doc)
# Translate, but leave gaps for stubs
trans_0 = document(locale='fr', title='le MDN',
parent=docs[0], save=True)
revision(document=trans_0, title='le MDN', tags="LeTest!", save=True)
trans_2 = document(locale='fr', title='le CSS',
parent=docs[2], save=True)
revision(document=trans_2, title='le CSS', tags="LeTest!", save=True)
trans_5 = document(locale='fr', title='le leaf',
parent=docs[5], save=True)
revision(document=trans_5, title='le ;eaf', tags="LeTest!", save=True)
# Make sure trans_2 got the right parent
eq_(trans_2.parents[0].pk, trans_0.pk)
# Ensure the translated parents and stubs appear properly in the path
parents_5 = trans_5.parents
eq_(parents_5[0].pk, trans_0.pk)
eq_(parents_5[1].locale, trans_5.locale)
eq_(parents_5[1].title, docs[1].title)
ok_(parents_5[1].current_revision.pk != docs[1].current_revision.pk)
eq_(parents_5[2].pk, trans_2.pk)
eq_(parents_5[3].locale, trans_5.locale)
eq_(parents_5[3].title, docs[3].title)
ok_(parents_5[3].current_revision.pk != docs[3].current_revision.pk)
eq_(parents_5[4].locale, trans_5.locale)
eq_(parents_5[4].title, docs[4].title)
ok_(parents_5[4].current_revision.pk != docs[4].current_revision.pk)
for p in parents_5:
ok_(p.current_revision)
if p.pk not in (trans_0.pk, trans_2.pk, trans_5.pk):
ok_('NeedsTranslation' in p.current_revision.tags)
ok_('TopicStub' in p.current_revision.tags)
ok_(p.current_revision.localization_in_progress)
def test_repair_breadcrumbs(self):
english_top = document(locale=settings.WIKI_DEFAULT_LANGUAGE,
title='English top',
save=True)
english_mid = document(locale=settings.WIKI_DEFAULT_LANGUAGE,
title='English mid',
parent_topic=english_top,
save=True)
english_bottom = document(locale=settings.WIKI_DEFAULT_LANGUAGE,
title='English bottom',
parent_topic=english_mid,
save=True)
french_top = document(locale='fr',
title='French top',
parent=english_top,
save=True)
french_mid = document(locale='fr',
parent=english_mid,
parent_topic=english_mid,
save=True)
french_bottom = document(locale='fr',
parent=english_bottom,
parent_topic=english_bottom,
save=True)
french_bottom.repair_breadcrumbs()
french_bottom_fixed = Document.objects.get(locale='fr',
title=french_bottom.title)
eq_(french_mid.id, french_bottom_fixed.parent_topic.id)
eq_(french_top.id, french_bottom_fixed.parent_topic.parent_topic.id)
def test_code_sample_extraction(self):
"""Make sure sample extraction works from the model.
This is a smaller version of the test from test_content.py"""
sample_html = u'<p class="foo">Hello world!</p>'
sample_css = u'.foo p { color: red; }'
sample_js = u'window.alert("Hi there!");'
doc_src = u"""
<p>This is a page. Deal with it.</p>
<ul id="s2" class="code-sample">
<li><pre class="brush: html">%s</pre></li>
<li><pre class="brush: css">%s</pre></li>
<li><pre class="brush: js">%s</pre></li>
</ul>
<p>More content shows up here.</p>
""" % (escape(sample_html), escape(sample_css), escape(sample_js))
d1, r1 = doc_rev(doc_src)
result = d1.extract_code_sample('s2')
eq_(sample_html.strip(), result['html'].strip())
eq_(sample_css.strip(), result['css'].strip())
eq_(sample_js.strip(), result['js'].strip())
class TaggedDocumentTests(UserTestCase):
"""Tests for tags in Documents and Revisions"""
@attr('tags')
def test_revision_tags(self):
"""Change tags on Document by creating Revisions"""
d, _ = doc_rev('Sample document')
eq_(0, Document.objects.filter(tags__name='foo').count())
eq_(0, Document.objects.filter(tags__name='alpha').count())
r = revision(document=d, content='Update to document',
is_approved=True, tags="foo, bar, baz")
r.save()
eq_(1, Document.objects.filter(tags__name='foo').count())
eq_(0, Document.objects.filter(tags__name='alpha').count())
r = revision(document=d, content='Another update',
is_approved=True, tags="alpha, beta, gamma")
r.save()
eq_(0, Document.objects.filter(tags__name='foo').count())
eq_(1, Document.objects.filter(tags__name='alpha').count())
class RevisionTests(UserTestCase):
"""Tests for the Revision model"""
def test_approved_revision_updates_html(self):
"""Creating an approved revision updates document.html"""
d, _ = doc_rev('Replace document html')
assert 'Replace document html' in d.html, \
'"Replace document html" not in %s' % d.html
# Creating another approved revision replaces it again
r = revision(document=d, content='Replace html again',
is_approved=True)
r.save()
assert 'Replace html again' in d.html, \
'"Replace html again" not in %s' % d.html
def test_unapproved_revision_not_updates_html(self):
"""Creating an unapproved revision does not update document.html"""
d, _ = doc_rev('Here to stay')
assert 'Here to stay' in d.html, '"Here to stay" not in %s' % d.html
# Creating another approved revision keeps initial content
r = revision(document=d, content='Fail to replace html',
is_approved=False)
r.save()
assert 'Here to stay' in d.html, '"Here to stay" not in %s' % d.html
def test_revision_unicode(self):
"""Revision containing unicode characters is saved successfully."""
str = u'Firefox informa\xe7\xf5es \u30d8\u30eb'
_, r = doc_rev(str)
eq_(str, r.content)
def test_save_bad_based_on(self):
"""Saving a Revision with a bad based_on value raises an error."""
r = revision()
r.based_on = revision() # Revision of some other unrelated Document
self.assertRaises(ProgrammingError, r.save)
def test_correct_based_on_to_none(self):
"""Assure Revision.clean() changes a bad based_on value to None when
there is no current_revision of the English document."""
r = revision()
r.based_on = revision() # Revision of some other unrelated Document
self.assertRaises(ValidationError, r.clean)
eq_(None, r.based_on)
def test_correct_based_on_to_current_revision(self):
"""Assure Revision.clean() defaults based_on value to the English
doc's current_revision when there is one."""
# Make English rev:
en_rev = revision(is_approved=True)
en_rev.save()
# Make Deutsch translation:
de_doc = document(parent=en_rev.document, locale='de')
de_doc.save()
de_rev = revision(document=de_doc)
# Set based_on to a de rev to simulate fixing broken translation source
de_rev.based_on = de_rev
de_rev.clean()
eq_(en_rev.document.current_revision, de_rev.based_on)
def test_previous(self):
"""Revision.previous should return this revision's document's
most recent approved revision."""
rev = revision(is_approved=True, save=True)
eq_(None, rev.previous)
# wait a second so next revision is a different datetime
time.sleep(1)
next_rev = revision(document=rev.document, content="Updated",
is_approved=True)
next_rev.save()
eq_(rev, next_rev.previous)
time.sleep(1)
last_rev = revision(document=rev.document, content="Finally",
is_approved=True)
last_rev.save()
eq_(next_rev, last_rev.previous)
@attr('toc')
def test_show_toc(self):
"""Setting toc_depth appropriately affects the Document's
show_toc property."""
d, r = doc_rev('Toggle table of contents.')
assert (r.toc_depth != 0)
assert d.show_toc
r = revision(document=d, content=r.content, toc_depth=0,
is_approved=True)
r.save()
assert not d.show_toc
r = revision(document=d, content=r.content, toc_depth=1,
is_approved=True)
r.save()
assert d.show_toc
def test_revert(self):
"""Reverting to a specific revision."""
d, r = doc_rev('Test reverting')
old_id = r.id
time.sleep(1)
revision(document=d,
title='Test reverting',
content='An edit to revert',
comment='This edit gets reverted',
is_approved=True)
r.save()
time.sleep(1)
reverted = d.revert(r, r.creator)
ok_('Revert to' in reverted.comment)
ok_('Test reverting' == reverted.content)
ok_(old_id != reverted.id)
def test_revert_review_tags(self):
d, r = doc_rev('Test reverting with review tags')
r.review_tags.set('technical')
time.sleep(1)
r2 = revision(document=d, title='Test reverting with review tags',
content='An edit to revert',
comment='This edit gets reverted',
is_approved=True)
r2.save()
r2.review_tags.set('editorial')
reverted = d.revert(r, r.creator)
reverted_tags = [t.name for t in reverted.review_tags.all()]
ok_('technical' in reverted_tags)
ok_('editorial' not in reverted_tags)
class GetCurrentOrLatestRevisionTests(UserTestCase):
"""Tests for current_or_latest_revision."""
def test_single_approved(self):
"""Get approved revision."""
rev = revision(is_approved=True, save=True)
eq_(rev, rev.document.current_or_latest_revision())
def test_multiple_approved(self):
"""When multiple approved revisions exist, return the most recent."""
r1 = revision(is_approved=True, save=True)
r2 = revision(is_approved=True, save=True, document=r1.document)
eq_(r2, r2.document.current_or_latest_revision())
def test_latest(self):
"""Return latest revision when no current exists."""
r1 = revision(is_approved=False, save=True,
created=datetime.now() - timedelta(days=1))
r2 = revision(is_approved=False, save=True, document=r1.document)
eq_(r2, r1.document.current_or_latest_revision())
class DumpAndLoadJsonTests(UserTestCase):
def test_roundtrip(self):
# Create some documents and revisions here, rather than use a fixture
d1, r1 = doc_rev('Doc 1')
d2, r2 = doc_rev('Doc 2')
d3, r3 = doc_rev('Doc 3')
d4, r4 = doc_rev('Doc 4')
d5, r5 = doc_rev('Doc 5')
# Since this happens in dev sometimes, break a doc by deleting its
# current revision and leaving it with none.
d5.current_revision = None
d5.save()
r5.delete()
# The same creator will be used for all the revs, so let's also get a
# non-creator user for the upload.
creator = r1.creator
uploader = self.user_model.objects.exclude(pk=creator.id).all()[0]
# Count docs (with revisions) and revisions in DB
doc_cnt_db = (Document.objects
.filter(current_revision__isnull=False)
.count())
rev_cnt_db = (Revision.objects.count())
# Do the dump, capture it, parse the JSON
fin = StringIO()
Document.objects.dump_json(Document.objects.all(), fin)
data_json = fin.getvalue()
data = json.loads(data_json)
# No objects should come with non-null primary keys
for x in data:
ok_(not x['pk'])
# Count the documents in JSON vs the DB
doc_cnt_json = len([x for x in data if x['model'] == 'wiki.document'])
eq_(doc_cnt_db, doc_cnt_json,
"DB and JSON document counts should match")
# Count the revisions in JSON vs the DB
rev_cnt_json = len([x for x in data if x['model'] == 'wiki.revision'])
eq_(rev_cnt_db, rev_cnt_json,
"DB and JSON revision counts should match")
# For good measure, ensure no documents missing revisions in the dump.
doc_no_rev = (Document.objects
.filter(current_revision__isnull=True))[0]
no_rev_cnt = len([x for x in data
if x['model'] == 'wiki.document' and
x['fields']['slug'] == doc_no_rev.slug and
x['fields']['locale'] == doc_no_rev.locale])
eq_(0, no_rev_cnt,
"There should be no document exported without revision")
# Upload the data as JSON, assert that all objects were loaded
loaded_cnt = Document.objects.load_json(uploader, StringIO(data_json))
eq_(len(data), loaded_cnt)
# Ensure the current revisions of the documents have changed, and that
# the creator matches the uploader.
for d_orig in (d1, d2, d3, d4):
d_curr = Document.objects.get(pk=d_orig.pk)
eq_(2, d_curr.revisions.count())
ok_(d_orig.current_revision.id != d_curr.current_revision.id)
ok_(d_orig.current_revision.creator_id !=
d_curr.current_revision.creator_id)
eq_(uploader.id, d_curr.current_revision.creator_id)
# Everyone out of the pool!
Document.objects.all().delete()
Revision.objects.all().delete()
# Try reloading the data on an empty DB
loaded_cnt = Document.objects.load_json(uploader, StringIO(data_json))
eq_(len(data), loaded_cnt)
# Count docs (with revisions) and revisions in DB. The imported objects
# should have beeen doc/rev pairs.
eq_(loaded_cnt / 2, Document.objects.count())
eq_(loaded_cnt / 2, Revision.objects.count())
# The originals should be gone, now.
for d_orig in (d1, d2, d3, d4):
# The original primary key should have gone away.
try:
d_curr = Document.objects.get(pk=d_orig.pk)
self.fail("This should have been an error")
except Document.DoesNotExist:
pass
# Should be able to fetch document with the original natural key
key = d_orig.natural_key()
d_curr = Document.objects.get_by_natural_key(*key)
eq_(1, d_curr.revisions.count())
eq_(uploader.id, d_curr.current_revision.creator_id)
class DeferredRenderingTests(UserTestCase):
def setUp(self):
super(DeferredRenderingTests, self).setUp()
self.rendered_content = 'THIS IS RENDERED'
self.raw_content = 'THIS IS NOT RENDERED CONTENT'
self.d1, self.r1 = doc_rev('Doc 1')
config.KUMA_DOCUMENT_RENDER_TIMEOUT = 600.0
config.KUMA_DOCUMENT_FORCE_DEFERRED_TIMEOUT = 7.0
def tearDown(self):
super(DeferredRenderingTests, self).tearDown()
self.d1.delete()
def test_rendering_fields(self):
"""Defaults for model fields related to rendering should work as
expected"""
ok_(not self.d1.rendered_html)
ok_(not self.d1.defer_rendering)
ok_(not self.d1.is_rendering_scheduled)
ok_(not self.d1.is_rendering_in_progress)
@override_constance_settings(KUMASCRIPT_TIMEOUT=1.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_get_rendered(self, mock_kumascript_get):
"""get_rendered() should return rendered content when available,
attempt a render() when it's not"""
mock_kumascript_get.return_value = (self.rendered_content, None)
# First, try getting the rendered version of a document. It should
# trigger a call to kumascript.
ok_(not self.d1.rendered_html)
ok_(not self.d1.render_started_at)
ok_(not self.d1.last_rendered_at)
result_rendered, _ = self.d1.get_rendered(None, 'http://testserver/')
ok_(mock_kumascript_get.called)
eq_(self.rendered_content, result_rendered)
eq_(self.rendered_content, self.d1.rendered_html)
# Next, get a fresh copy of the document and try getting a rendering.
# It should *not* call out to kumascript, because the rendered content
# should be in the DB.
d1_fresh = Document.objects.get(pk=self.d1.pk)
eq_(self.rendered_content, d1_fresh.rendered_html)
ok_(d1_fresh.render_started_at)
ok_(d1_fresh.last_rendered_at)
mock_kumascript_get.called = False
result_rendered, _ = d1_fresh.get_rendered(None, 'http://testserver/')
ok_(not mock_kumascript_get.called)
eq_(self.rendered_content, result_rendered)
@attr('bug875349')
@override_constance_settings(KUMASCRIPT_TIMEOUT=1.0)
@override_settings(CELERY_ALWAYS_EAGER=True)
@mock.patch('kuma.wiki.kumascript.get')
def test_build_json_on_render(self, mock_kumascript_get):
"""
A document's json field is refreshed on render(), but not on save()
"""
mock_kumascript_get.return_value = (self.rendered_content, None)
# Initially empty json field should be filled in after render()
eq_(self.d1.json, None)
self.d1.render()
# reloading from db to get the updates done in the celery task
self.d1 = Document.objects.get(pk=self.d1.pk)
ok_(self.d1.json is not None)
time.sleep(1.0) # Small clock-tick to age the results.
# Change the doc title, saving does not actually change the json field.
self.d1.title = "New title"
self.d1.save()
ok_(self.d1.title != self.d1.get_json_data()['title'])
self.d1 = Document.objects.get(pk=self.d1.pk)
# However, rendering refreshes the json field.
self.d1.render()
self.d1 = Document.objects.get(pk=self.d1.pk)
eq_(self.d1.title, self.d1.get_json_data()['title'])
# In case we logically delete a document with a changed title
# we don't update the json blob
deleted_title = 'Deleted title'
self.d1.title = deleted_title
self.d1.save()
self.d1.delete()
self.d1.render()
self.d1 = Document.objects.get(pk=self.d1.pk)
ok_(deleted_title != self.d1.get_json_data()['title'])
@mock.patch('kuma.wiki.kumascript.get')
@override_settings(CELERY_ALWAYS_EAGER=True)
def test_get_summary(self, mock_kumascript_get):
"""
get_summary() should attempt to use rendered
"""
config.KUMASCRIPT_TIMEOUT = 1.0
mock_kumascript_get.return_value = ('<p>summary!</p>', None)
ok_(not self.d1.rendered_html)
result_summary = self.d1.get_summary()
ok_(not mock_kumascript_get.called)
ok_(not self.d1.rendered_html)
self.d1.render()
ok_(self.d1.rendered_html)
ok_(mock_kumascript_get.called)
result_summary = self.d1.get_summary()
eq_("summary!", result_summary)
config.KUMASCRIPT_TIMEOUT = 0.0
@mock.patch('kuma.wiki.kumascript.get')
def test_one_render_at_a_time(self, mock_kumascript_get):
"""Only one in-progress rendering should be allowed for a Document"""
mock_kumascript_get.return_value = (self.rendered_content, None)
self.d1.render_started_at = datetime.now()
self.d1.save()
try:
self.d1.render('', 'http://testserver/')
self.fail("An attempt to render while another appears to be in "
"progress should be disallowed")
except DocumentRenderingInProgress:
pass
@mock.patch('kuma.wiki.kumascript.get')
def test_render_timeout(self, mock_kumascript_get):
"""
A rendering that has taken too long is no longer considered in progress
"""
mock_kumascript_get.return_value = (self.rendered_content, None)
timeout = 5.0
config.KUMA_DOCUMENT_RENDER_TIMEOUT = timeout
self.d1.render_started_at = (datetime.now() -
timedelta(seconds=timeout + 1))
self.d1.save()
try:
self.d1.render('', 'http://testserver/')
except DocumentRenderingInProgress:
self.fail("A timed-out rendering should not be considered as "
"still in progress")
@mock.patch('kuma.wiki.kumascript.get')
def test_long_render_sets_deferred(self, mock_kumascript_get):
"""A rendering that takes more than a desired response time marks the
document as in need of deferred rendering in the future."""
config.KUMASCRIPT_TIMEOUT = 1.0
rendered_content = self.rendered_content
def my_kumascript_get(self, cache_control, base_url, timeout):
time.sleep(1.0)
return (rendered_content, None)
mock_kumascript_get.side_effect = my_kumascript_get
config.KUMA_DOCUMENT_FORCE_DEFERRED_TIMEOUT = 2.0
self.d1.render('', 'http://testserver/')
ok_(not self.d1.defer_rendering)
config.KUMA_DOCUMENT_FORCE_DEFERRED_TIMEOUT = 0.5
self.d1.render('', 'http://testserver/')
ok_(self.d1.defer_rendering)
config.KUMASCRIPT_TIMEOUT = 0.0
@mock.patch('kuma.wiki.kumascript.get')
@mock.patch.object(tasks.render_document, 'delay')
def test_schedule_rendering(self, mock_render_document_delay,
mock_kumascript_get):
mock_kumascript_get.return_value = (self.rendered_content, None)
# Scheduling for a non-deferred render should happen on the spot.
self.d1.defer_rendering = False
self.d1.save()
ok_(not self.d1.render_scheduled_at)
ok_(not self.d1.last_rendered_at)
self.d1.schedule_rendering(None, 'http://testserver/')
ok_(self.d1.render_scheduled_at)
ok_(self.d1.last_rendered_at)
ok_(not mock_render_document_delay.called)
ok_(not self.d1.is_rendering_scheduled)
# Reset the significant fields and try a deferred render.
self.d1.last_rendered_at = None
self.d1.render_started_at = None
self.d1.render_scheduled_at = None
self.d1.defer_rendering = True
self.d1.save()
# Scheduling for a deferred render should result in a queued task.
self.d1.schedule_rendering(None, 'http://testserver/')
ok_(self.d1.render_scheduled_at)
ok_(not self.d1.last_rendered_at)
ok_(mock_render_document_delay.called)
# And, since our mock delay() doesn't actually queue a task, this
# document should appear to be scheduled for a pending render not yet
# in progress.
ok_(self.d1.is_rendering_scheduled)
ok_(not self.d1.is_rendering_in_progress)
@mock.patch('kuma.wiki.kumascript.get')
@mock.patch.object(tasks.render_document, 'delay')
def test_deferred_vs_immediate_rendering(self, mock_render_document_delay,
mock_kumascript_get):
mock_kumascript_get.return_value = (self.rendered_content, None)
switch = Switch.objects.create(name='wiki_force_immediate_rendering')
# When defer_rendering == False, the rendering should be immediate.
switch.active = False
switch.save()
self.d1.rendered_html = ''
self.d1.defer_rendering = False
self.d1.save()
result_rendered, _ = self.d1.get_rendered(None, 'http://testserver/')
ok_(not mock_render_document_delay.called)
# When defer_rendering == True but the waffle switch forces immediate,
# the rendering should be immediate.
switch.active = True
switch.save()
self.d1.rendered_html = ''
self.d1.defer_rendering = True
self.d1.save()
result_rendered, _ = self.d1.get_rendered(None, 'http://testserver/')
ok_(not mock_render_document_delay.called)
# When defer_rendering == True, the rendering should be deferred and an
# exception raised if the content is blank.
switch.active = False
switch.save()
self.d1.rendered_html = ''
self.d1.defer_rendering = True
self.d1.save()
try:
result_rendered, _ = self.d1.get_rendered(None, 'http://testserver/')
self.fail("We should have gotten a "
"DocumentRenderedContentNotAvailable exception")
except DocumentRenderedContentNotAvailable:
pass
ok_(mock_render_document_delay.called)
@mock.patch('kuma.wiki.kumascript.get')
def test_errors_stored_correctly(self, mock_kumascript_get):
errors = [
{'level': 'error', 'message': 'This is a fake error',
'args': ['FakeError']},
]
mock_kumascript_get.return_value = (self.rendered_content, errors)
r_rendered, r_errors = self.d1.get_rendered(None, 'http://testserver/')
ok_(errors, r_errors)
class RenderExpiresTests(UserTestCase):
"""Tests for max-age and automatic document rebuild"""
def test_find_stale_documents(self):
now = datetime.now()
# Fresh
d1 = document(title='Aged 1')
d1.render_expires = now + timedelta(seconds=100)
d1.save()
# Stale, exactly now
d2 = document(title='Aged 2')
d2.render_expires = now
d2.save()
# Stale, a little while ago
d3 = document(title='Aged 3')
d3.render_expires = now - timedelta(seconds=100)
d3.save()
stale_docs = Document.objects.get_by_stale_rendering()
eq_(sorted([d2.pk, d3.pk]),
sorted([x.pk for x in stale_docs]))
@override_constance_settings(KUMASCRIPT_TIMEOUT=1.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_update_expires_with_max_age(self, mock_kumascript_get):
mock_kumascript_get.return_value = ('MOCK CONTENT', None)
max_age = 1000
now = datetime.now()
d1 = document(title='Aged 1')
d1.render_max_age = max_age
d1.save()
d1.render()
# HACK: Exact time comparisons suck, because execution time.
later = now + timedelta(seconds=max_age)
ok_(d1.render_expires > later - timedelta(seconds=1))
ok_(d1.render_expires < later + timedelta(seconds=1))
@override_constance_settings(KUMASCRIPT_TIMEOUT=1.0)
@mock.patch('kuma.wiki.kumascript.get')
def test_update_expires_without_max_age(self, mock_kumascript_get):
mock_kumascript_get.return_value = ('MOCK CONTENT', None)
now = datetime.now()
d1 = document(title='Aged 1')
d1.render_expires = now - timedelta(seconds=100)
d1.save()
d1.render()
ok_(not d1.render_expires)
@override_constance_settings(KUMASCRIPT_TIMEOUT=1.0)
@mock.patch('kuma.wiki.kumascript.get')
@mock.patch.object(tasks.render_document, 'delay')
def test_render_stale(self, mock_render_document_delay,
mock_kumascript_get):
mock_kumascript_get.return_value = ('MOCK CONTENT', None)
now = datetime.now()
earlier = now - timedelta(seconds=1000)
d1 = document(title='Aged 3')
d1.last_rendered_at = earlier
d1.render_expires = now - timedelta(seconds=100)
d1.save()
tasks.render_stale_documents()
d1_fresh = Document.objects.get(pk=d1.pk)
ok_(not mock_render_document_delay.called)
ok_(d1_fresh.last_rendered_at > earlier)
class PageMoveTests(UserTestCase):
"""Tests for page-moving and associated functionality."""
@attr('move')
def test_children_simple(self):
"""A basic tree with two direct children and no sub-trees on
either."""
d1 = document(title='Parent', save=True)
d2 = document(title='Child', save=True)
d2.parent_topic = d1
d2.save()
d3 = document(title='Another child', save=True)
d3.parent_topic = d1
d3.save()
eq_([d2, d3], d1.get_descendants())
def test_get_descendants_limited(self):
"""Tests limiting of descendant levels"""
def _make_doc(title, parent=None):
doc = document(title=title, save=True)
if parent:
doc.parent_topic = parent
doc.save()
return doc
parent = _make_doc('Parent')
child1 = _make_doc('Child 1', parent)
child2 = _make_doc('Child 2', parent)
grandchild = _make_doc('GrandChild 1', child1)
_make_doc('Great GrandChild 1', grandchild)
# Test descendant counts
eq_(len(parent.get_descendants()), 4) # All
eq_(len(parent.get_descendants(1)), 2)
eq_(len(parent.get_descendants(2)), 3)
eq_(len(parent.get_descendants(0)), 0)
eq_(len(child2.get_descendants(10)), 0)
eq_(len(grandchild.get_descendants(4)), 1)
@attr('move')
def test_children_complex(self):
"""A slightly more complex tree, with multiple children, some
of which do/don't have their own children."""
top = document(title='Parent', save=True)
c1 = document(title='Child 1', save=True)
c1.parent_topic = top
c1.save()
gc1 = document(title='Child of child 1', save=True)
gc1.parent_topic = c1
gc1.save()
c2 = document(title='Child 2', save=True)
c2.parent_topic = top
c2.save()
gc2 = document(title='Child of child 2', save=True)
gc2.parent_topic = c2
gc2.save()
gc3 = document(title='Another child of child 2', save=True)
gc3.parent_topic = c2
gc3.save()
ggc1 = document(title='Child of the second child of child 2',
save=True)
ggc1.parent_topic = gc3
ggc1.save()
ok_([c1, gc1, c2, gc2, gc3, ggc1] == top.get_descendants())
@attr('move')
def test_circular_dependency(self):
"""Make sure we can detect potential circular dependencies in
parent/child relationships."""
# Test detection at one level removed.
parent = document(title='Parent of circular-dependency document')
child = document(title='Document with circular dependency')
child.parent_topic = parent
child.save()
ok_(child.is_child_of(parent))
# And at two levels removed.
grandparent = document(title='Grandparent of '
'circular-dependency document')
parent.parent_topic = grandparent
child.save()
ok_(child.is_child_of(grandparent))
@attr('move')
def test_move_tree(self):
"""Moving a tree of documents does the correct thing"""
# Simple multi-level tree:
#
# - top
# - child1
# - child2
# - grandchild
top = revision(title='Top-level parent for tree moves',
slug='first-level/parent',
is_approved=True,
save=True)
old_top_id = top.id
top_doc = top.document
child1 = revision(title='First child of tree-move parent',
slug='first-level/second-level/child1',
is_approved=True,
save=True)
old_child1_id = child1.id
child1_doc = child1.document
child1_doc.parent_topic = top_doc
child1_doc.save()
child2 = revision(title='Second child of tree-move parent',
slug='first-level/second-level/child2',
is_approved=True,
save=True)
old_child2_id = child2.id
child2_doc = child2.document
child2_doc.parent_topic = top_doc
child2.save()
grandchild = revision(title='Child of second child of tree-move parent',
slug='first-level/second-level/third-level/grandchild',
is_approved=True,
save=True)
old_grandchild_id = grandchild.id
grandchild_doc = grandchild.document
grandchild_doc.parent_topic = child2_doc
grandchild_doc.save()
revision(title='New Top-level bucket for tree moves',
slug='new-prefix',
is_approved=True,
save=True)
revision(title='New first-level parent for tree moves',
slug='new-prefix/first-level',
is_approved=True,
save=True)
# Now we do a simple move: inserting a prefix that needs to be
# inherited by the whole tree.
top_doc._move_tree('new-prefix/first-level/parent')
# And for each document verify three things:
#
# 1. The new slug is correct.
# 2. A new revision was created when the page moved.
# 3. A redirect was created.
moved_top = Document.objects.get(pk=top_doc.id)
eq_('new-prefix/first-level/parent',
moved_top.current_revision.slug)
ok_(old_top_id != moved_top.current_revision.id)
ok_(moved_top.current_revision.slug in
Document.objects.get(slug='first-level/parent').redirect_url())
moved_child1 = Document.objects.get(pk=child1_doc.id)
eq_('new-prefix/first-level/parent/child1',
moved_child1.current_revision.slug)
ok_(old_child1_id != moved_child1.current_revision.id)
ok_(moved_child1.current_revision.slug in
Document.objects.get(
slug='first-level/second-level/child1'
).redirect_url())
moved_child2 = Document.objects.get(pk=child2_doc.id)
eq_('new-prefix/first-level/parent/child2',
moved_child2.current_revision.slug)
ok_(old_child2_id != moved_child2.current_revision.id)
ok_(moved_child2.current_revision.slug in
Document.objects.get(
slug='first-level/second-level/child2'
).redirect_url())
moved_grandchild = Document.objects.get(pk=grandchild_doc.id)
eq_('new-prefix/first-level/parent/child2/grandchild',
moved_grandchild.current_revision.slug)
ok_(old_grandchild_id != moved_grandchild.current_revision.id)
ok_(moved_grandchild.current_revision.slug in
Document.objects.get(
slug='first-level/second-level/third-level/grandchild'
).redirect_url())
@attr('move')
def test_conflicts(self):
top = revision(title='Test page-move conflict detection',
slug='test-move-conflict-detection',
is_approved=True,
save=True)
top_doc = top.document
child = revision(title='Child of conflict detection test',
slug='move-tests/conflict-child',
is_approved=True,
save=True)
child_doc = child.document
child_doc.parent_topic = top_doc
child_doc.save()
# We should find the conflict if it's at the slug the document
# will move to.
top_conflict = revision(title='Conflicting document for move conflict detection',
slug='moved/test-move-conflict-detection',
is_approved=True,
save=True)
eq_([top_conflict.document],
top_doc._tree_conflicts('moved/test-move-conflict-detection'))
# Or if it will involve a child document.
child_conflict = revision(title='Conflicting child for move conflict detection',
slug='moved/test-move-conflict-detection/conflict-child',
is_approved=True,
save=True)
eq_([top_conflict.document, child_conflict.document],
top_doc._tree_conflicts('moved/test-move-conflict-detection'))
# But a redirect should not trigger a conflict.
revision(title='Conflicting document for move conflict detection',
slug='moved/test-move-conflict-detection',
content='REDIRECT <a class="redirect" href="/foo">Foo</a>',
document=top_conflict.document,
is_approved=True,
save=True)
eq_([child_conflict.document],
top_doc._tree_conflicts('moved/test-move-conflict-detection'))
@attr('move')
def test_additional_conflicts(self):
top = revision(title='WebRTC',
slug='WebRTC',
content='WebRTC',
is_approved=True,
save=True)
top_doc = top.document
child1 = revision(title='WebRTC Introduction',
slug='WebRTC/WebRTC_Introduction',
content='WebRTC Introduction',
is_approved=True,
save=True)
child1_doc = child1.document
child1_doc.parent_topic = top_doc
child1_doc.save()
child2 = revision(title='Taking webcam photos',
slug='WebRTC/Taking_webcam_photos',
is_approved=True,
save=True)
child2_doc = child2.document
child2_doc.parent_topic = top_doc
child2_doc.save()
eq_([],
top_doc._tree_conflicts('NativeRTC'))
@attr('move')
def test_preserve_tags(self):
tags = "'moving', 'tests'"
rev = revision(title='Test page-move tag preservation',
slug='page-move-tags',
tags=tags,
is_approved=True,
save=True)
rev.review_tags.set('technical')
rev = Revision.objects.get(pk=rev.id)
revision(title='New Top-level parent for tree moves',
slug='new-top',
is_approved=True,
save=True)
doc = rev.document
doc._move_tree('new-top/page-move-tags')
moved_doc = Document.objects.get(pk=doc.id)
new_rev = moved_doc.current_revision
eq_(tags, new_rev.tags)
eq_(['technical'],
[str(tag) for tag in new_rev.review_tags.all()])
@attr('move')
def test_move_tree_breadcrumbs(self):
"""Moving a tree of documents under an existing doc updates breadcrumbs"""
grandpa = revision(title='Top-level parent for breadcrumb move',
slug='grandpa', is_approved=True, save=True)
grandpa_doc = grandpa.document
dad = revision(title='Mid-level parent for breadcrumb move',
slug='grandpa/dad', is_approved=True, save=True)
dad_doc = dad.document
dad_doc.parent_topic = grandpa_doc
dad_doc.save()
son = revision(title='Bottom-level child for breadcrumb move',
slug='grandpa/dad/son', is_approved=True, save=True)
son_doc = son.document
son_doc.parent_topic = dad_doc
son_doc.save()
grandma = revision(title='Top-level parent for breadcrumb move',
slug='grandma', is_approved=True, save=True)
grandma_doc = grandma.document
mom = revision(title='Mid-level parent for breadcrumb move',
slug='grandma/mom', is_approved=True, save=True)
mom_doc = mom.document
mom_doc.parent_topic = grandma_doc
mom_doc.save()
daughter = revision(title='Bottom-level child for breadcrumb move',
slug='grandma/mom/daughter',
is_approved=True,
save=True)
daughter_doc = daughter.document
daughter_doc.parent_topic = mom_doc
daughter_doc.save()
# move grandma under grandpa
grandma_doc._move_tree('grandpa/grandma')
# assert the parent_topics are correctly rooted at grandpa
# note we have to refetch these to see any DB changes.
grandma_moved = Document.objects.get(locale=grandma_doc.locale,
slug='grandpa/grandma')
ok_(grandma_moved.parent_topic == grandpa_doc)
mom_moved = Document.objects.get(locale=mom_doc.locale,
slug='grandpa/grandma/mom')
ok_(mom_moved.parent_topic == grandma_moved)
@attr('move')
def test_move_tree_no_new_parent(self):
"""Moving a tree to a slug that doesn't exist throws error."""
rev = revision(title='doc to move',
slug='doc1', is_approved=True, save=True)
doc = rev.document
try:
doc._move_tree('slug-that-doesnt-exist/doc1')
ok_(False, "Moving page under non-existing doc should error.")
except:
pass
@attr('move')
@attr('top')
def test_move_top_level_docs(self):
"""Moving a top document to a new slug location"""
page_to_move_title = 'Page Move Root'
page_to_move_slug = 'Page_Move_Root'
page_child_slug = 'Page_Move_Root/Page_Move_Child'
page_moved_slug = 'Page_Move_Root_Moved'
page_child_moved_slug = 'Page_Move_Root_Moved/Page_Move_Child'
page_to_move_doc = document(title=page_to_move_title,
slug=page_to_move_slug,
save=True)
rev = revision(document=page_to_move_doc,
title=page_to_move_title,
slug=page_to_move_slug,
save=True)
page_to_move_doc.current_revision = rev
page_to_move_doc.save()
page_child = revision(title='child', slug=page_child_slug,
is_approved=True, save=True)
page_child_doc = page_child.document
page_child_doc.parent_topic = page_to_move_doc
page_child_doc.save()
# move page to new slug
new_title = page_to_move_title + ' Moved'
page_to_move_doc._move_tree(page_moved_slug, user=None,
title=new_title)
page_to_move_doc = Document.objects.get(slug=page_to_move_slug)
page_moved_doc = Document.objects.get(slug=page_moved_slug)
page_child_doc = Document.objects.get(slug=page_child_slug)
page_child_moved_doc = Document.objects.get(slug=page_child_moved_slug)
ok_('REDIRECT' in page_to_move_doc.html)
ok_(page_moved_slug in page_to_move_doc.html)
ok_(new_title in page_to_move_doc.html)
ok_(page_moved_doc)
ok_('REDIRECT' in page_child_doc.html)
ok_(page_moved_slug in page_child_doc.html)
ok_(page_child_moved_doc)
# TODO: Fix this assertion?
# eq_('admin', page_moved_doc.current_revision.creator.username)
@attr('move')
def test_mid_move(self):
root_title = 'Root'
root_slug = 'Root'
child_title = 'Child'
child_slug = 'Root/Child'
moved_child_slug = 'DiffChild'
grandchild_title = 'Grandchild'
grandchild_slug = 'Root/Child/Grandchild'
moved_grandchild_slug = 'DiffChild/Grandchild'
root_doc = document(title=root_title,
slug=root_slug,
save=True)
rev = revision(document=root_doc,
title=root_title,
slug=root_slug,
save=True)
root_doc.current_revision = rev
root_doc.save()
child = revision(title=child_title, slug=child_slug,
is_approved=True, save=True)
child_doc = child.document
child_doc.parent_topic = root_doc
child_doc.save()
grandchild = revision(title=grandchild_title,
slug=grandchild_slug,
is_approved=True, save=True)
grandchild_doc = grandchild.document
grandchild_doc.parent_topic = child_doc
grandchild_doc.save()
child_doc._move_tree(moved_child_slug)
redirected_child = Document.objects.get(slug=child_slug)
Document.objects.get(slug=moved_child_slug)
ok_('REDIRECT' in redirected_child.html)
ok_(moved_child_slug in redirected_child.html)
redirected_grandchild = Document.objects.get(slug=grandchild_doc.slug)
Document.objects.get(slug=moved_grandchild_slug)
ok_('REDIRECT' in redirected_grandchild.html)
ok_(moved_grandchild_slug in redirected_grandchild.html)
@attr('move')
def test_move_special(self):
root_slug = 'User:foo'
child_slug = '%s/child' % root_slug
new_root_slug = 'User:foobar'
special_root = document(title='User:foo',
slug=root_slug,
save=True)
revision(document=special_root,
title=special_root.title,
slug=root_slug,
save=True)
special_child = document(title='User:foo child',
slug=child_slug,
save=True)
revision(document=special_child,
title=special_child.title,
slug=child_slug,
save=True)
special_child.parent_topic = special_root
special_child.save()
original_root_id = special_root.id
original_child_id = special_child.id
# First move, to new slug.
special_root._move_tree(new_root_slug)
# Appropriate redirects were left behind.
root_redirect = Document.objects.get(locale=special_root.locale,
slug=root_slug)
ok_(root_redirect.is_redirect)
root_redirect_id = root_redirect.id
child_redirect = Document.objects.get(locale=special_child.locale,
slug=child_slug)
ok_(child_redirect.is_redirect)
child_redirect_id = child_redirect.id
# Moved documents still have the same IDs.
moved_root = Document.objects.get(locale=special_root.locale,
slug=new_root_slug)
eq_(original_root_id, moved_root.id)
moved_child = Document.objects.get(locale=special_child.locale,
slug='%s/child' % new_root_slug)
eq_(original_child_id, moved_child.id)
# Second move, back to original slug.
moved_root._move_tree(root_slug)
# Once again we left redirects behind.
root_second_redirect = Document.objects.get(locale=special_root.locale,
slug=new_root_slug)
ok_(root_second_redirect.is_redirect)
child_second_redirect = Document.objects.get(locale=special_child.locale,
slug='%s/child' % new_root_slug)
ok_(child_second_redirect.is_redirect)
# The documents at the original URLs aren't redirects anymore.
rerooted_root = Document.objects.get(locale=special_root.locale,
slug=root_slug)
ok_(not rerooted_root.is_redirect)
rerooted_child = Document.objects.get(locale=special_child.locale,
slug=child_slug)
ok_(not rerooted_child.is_redirect)
# The redirects created in the first move no longer exist in the DB.
self.assertRaises(Document.DoesNotExist,
Document.objects.get,
id=root_redirect_id)
self.assertRaises(Document.DoesNotExist,
Document.objects.get,
id=child_redirect_id)
def test_fail_message(self):
"""
When page move fails in moving one of the children, it
generates an informative exception message explaining which
child document failed.
"""
top = revision(title='Test page-move error messaging',
slug='test-move-error-messaging',
is_approved=True,
save=True)
top_doc = top.document
child = revision(title='Child to test page-move error messaging',
slug='test-move-error-messaging/child',
is_approved=True,
save=True)
child_doc = child.document
child_doc.parent_topic = top_doc
child_doc.save()
grandchild = revision(title='Grandchild to test page-move error handling',
slug='test-move-error-messaging/child/grandchild',
is_approved=True,
save=True)
grandchild_doc = grandchild.document
grandchild_doc.parent_topic = child_doc
grandchild_doc.save()
revision(title='Conflict page for page-move error handling',
slug='test-move-error-messaging/moved/grandchild',
is_approved=True,
save=True)
# TODO: Someday when we're on Python 2.7, we can use
# assertRaisesRegexp. Until then, we have to manually catch
# and inspect the exception.
try:
child_doc._move_tree('test-move-error-messaging/moved')
except PageMoveError as e:
err_strings = [
'with id %s' % grandchild_doc.id,
'https://developer.mozilla.org/%s/docs/%s' % (grandchild_doc.locale,
grandchild_doc.slug),
"Exception type: <type 'exceptions.Exception'>",
'Exception message: Requested move would overwrite a non-redirect page.',
'in _move_tree',
'in _move_conflicts',
'raise Exception("Requested move would overwrite a non-redirect page.")',
]
for s in err_strings:
ok_(s in e.args[0])
class DocumentZoneTests(UserTestCase):
"""Tests for content zones in topic hierarchies"""
def test_find_roots(self):
"""Ensure sub pages can find the content zone root"""
root_rev = revision(title='ZoneRoot', slug='ZoneRoot',
content='This is the Zone Root',
is_approved=True, save=True)
root_doc = root_rev.document
middle_rev = revision(title='Zonemiddle', slug='Zonemiddle',
content='This is the Zone middle',
is_approved=True, save=True)
middle_doc = middle_rev.document
middle_doc.parent_topic = root_doc
middle_doc.save()
sub_rev = revision(title='SubPage', slug='SubPage',
content='This is a subpage',
is_approved=True, save=True)
sub_doc = sub_rev.document
sub_doc.parent_topic = middle_doc
sub_doc.save()
sub_sub_rev = revision(title='SubSubPage', slug='SubSubPage',
content='This is a subsubpage',
is_approved=True, save=True)
sub_sub_doc = sub_sub_rev.document
sub_sub_doc.parent_topic = sub_doc
sub_sub_doc.save()
other_rev = revision(title='otherPage', slug='otherPage',
content='This is an otherpage',
is_approved=True, save=True)
other_doc = other_rev.document
root_zone = DocumentZone(document=root_doc)
root_zone.save()
middle_zone = DocumentZone(document=middle_doc)
middle_zone.save()
eq_(self.get_zone_stack(root_doc)[0], root_zone)
eq_(self.get_zone_stack(middle_doc)[0], middle_zone)
eq_(self.get_zone_stack(sub_doc)[0], middle_zone)
eq_(0, len(self.get_zone_stack(other_doc)))
zone_stack = self.get_zone_stack(sub_sub_doc)
eq_(zone_stack[0], middle_zone)
eq_(zone_stack[1], root_zone)
def get_zone_stack(self, doc):
return DocumentZoneStackJob().get(doc.pk)
class DocumentContributorsTests(UserTestCase):
def test_get_contributors(self):
contrib_1 = user(save=True)
revision_1 = revision(creator=contrib_1, save=True)
self.assertIn(contrib_1, revision_1.document.get_contributors())
def test_get_contributors_inactive_or_banned(self):
contrib_2 = user(save=True)
contrib_3 = user(is_active=False, save=True)
contrib_4 = user(save=True)
contrib_4.bans.create(by=contrib_3, reason='because reasons')
revision_2 = revision(creator=contrib_2, save=True)
revision(creator=contrib_3, document=revision_2.document, save=True)
revision(creator=contrib_4, document=revision_2.document, save=True)
contributors = revision_2.document.get_contributors()
self.assertIn(contrib_2, contributors)
self.assertNotIn(contrib_3, contributors)
self.assertNotIn(contrib_4, contributors)
class DocumentParsingTests(UserTestCase):
"""Tests exercising content parsing methods"""
def test_get_section_content(self):
src = """
<h2>Foo</h2>
<p>Bar</p>
<h3 id="Quick_Links">Quick Links</h3>
<p>Foo, yay</p>
<h2>Baz</h2>
<p>Baz</p>
"""
expected = """
<p>Foo, yay</p>
"""
r = revision(title='Document with sections',
slug='document-with-sections',
content=src,
is_approved=True, save=True)
d = r.document
result = d.get_section_content('Quick_Links')
eq_(normalize_html(expected), normalize_html(result))
def test_cached_content_fields(self):
src = """
<h2>First</h2>
<p>This is a document</p>
<h3 id="Quick_Links">Quick Links</h3>
<p>Foo, yay</p>
<h3 id="Subnav">Subnav</h3>
<p>Bar, yay</p>
<h2>Second</h2>
<p>Another section</p>
<a href="/en-US/docs/document-with-sections">Existing link</a>
<a href="/en-US/docs/does-not-exist">New link</a>
"""
body = """
<h2 id="First">First</h2>
<p>This is a document</p>
<!-- -->
<!-- -->
<h2 id="Second">Second</h2>
<p>Another section</p>
<a href="/en-US/docs/document-with-sections">Existing link</a>
<a class="new" href="/en-US/docs/does-not-exist">New link</a>
"""
quick_links = """
<p>Foo, yay</p>
"""
subnav = """
<p>Bar, yay</p>
"""
r = revision(title='Document with sections',
slug='document-with-sections',
content=src,
is_approved=True, save=True)
d = r.document
eq_(normalize_html(body),
normalize_html(d.get_body_html()))
eq_(normalize_html(quick_links),
normalize_html(d.get_quick_links_html()))
eq_(normalize_html(subnav),
normalize_html(d.get_zone_subnav_local_html()))
def test_bug_982174(self):
"""Ensure build_json_data uses rendered HTML when available to extract
sections generated by KumaScript (bug 982174)"""
r = revision(title='Document with sections',
slug='document-with-sections',
is_approved=True, save=True)
d = r.document
# Save document with un-rendered content
d.html = """
<h2>Section 1</h2>
<p>Foo</p>
{{ h2_macro('Section 2') }}
<p>Bar</p>
<h2>Section 3</h2>
<p>Foo</p>
"""
d.save()
json_data = d.build_json_data()
expected_sections = [
{'id': 'Section_1', 'title': 'Section 1'},
{'id': 'Section_3', 'title': 'Section 3'}
]
eq_(expected_sections, json_data['sections'])
# Simulate kumascript rendering by updating rendered_html
d.rendered_html = """
<h2>Section 1</h2>
<p>Foo</p>
<h2>Section 2</h2>
<p>Bar</p>
<h2>Section 3</h2>
<p>Foo</p>
"""
d.save()
json_data = d.build_json_data()
expected_sections = [
{'id': 'Section_1', 'title': 'Section 1'},
{'id': 'Section_2', 'title': 'Section 2'},
{'id': 'Section_3', 'title': 'Section 3'}
]
eq_(expected_sections, json_data['sections'])
class RevisionIPTests(UserTestCase):
def test_delete_older_than_default_30_days(self):
old_date = date.today() - timedelta(days=31)
r = revision(created=old_date, save=True)
RevisionIP.objects.create(revision=r, ip='127.0.0.1').save()
eq_(1, RevisionIP.objects.all().count())
RevisionIP.objects.delete_old()
eq_(0, RevisionIP.objects.all().count())
def test_delete_older_than_days_argument(self):
rev_date = date.today() - timedelta(days=5)
r = revision(created=rev_date, save=True)
RevisionIP.objects.create(revision=r, ip='127.0.0.1').save()
eq_(1, RevisionIP.objects.all().count())
RevisionIP.objects.delete_old(days=4)
eq_(0, RevisionIP.objects.all().count())
def test_delete_older_than_only_deletes_older_than(self):
oldest_date = date.today() - timedelta(days=31)
r1 = revision(created=oldest_date, save=True)
RevisionIP.objects.create(revision=r1, ip='127.0.0.1').save()
old_date = date.today() - timedelta(days=29)
r1 = revision(created=old_date, save=True)
RevisionIP.objects.create(revision=r1, ip='127.0.0.1').save()
now_date = date.today()
r2 = revision(created=now_date, save=True)
RevisionIP.objects.create(revision=r2, ip='127.0.0.1').save()
eq_(3, RevisionIP.objects.all().count())
RevisionIP.objects.delete_old()
eq_(2, RevisionIP.objects.all().count())
|
davehunt/kuma
|
kuma/wiki/tests/test_models.py
|
Python
|
mpl-2.0
| 76,986
|
"""
Start flow task, instantiate new flow process
"""
import functools
from django.db import transaction
from django.core.urlresolvers import reverse
from django.conf.urls import url
from ..activation import Activation, StartViewActivation, STATUS
from ..exceptions import FlowRuntimeError
from . import base
def flow_start_view():
"""
Decorator for start views, creates and initializes start activation
Expects view with the signature `(request, activation, **kwargs)`
or CBV view that implements ViewActivation, in this case, dispatch
would be called with `(request, **kwargs)`
Returns `(request, flow_task, **kwargs)`
"""
class StartViewDecorator(object):
def __init__(self, func, activation=None):
self.func = func
self.activation = activation
functools.update_wrapper(self, func)
def __call__(self, request, flow_cls, flow_task, **kwargs):
if self.activation:
self.activation.initialize(flow_task, None)
with transaction.atomic():
return self.func(request, **kwargs)
else:
activation = flow_task.activation_cls()
activation.initialize(flow_task, None)
with transaction.atomic():
return self.func(request, activation, **kwargs)
def __get__(self, instance, instancetype):
"""
If we decorate method on CBV that implements StartActivation interface,
no custom activation is required.
"""
if instance is None:
return self
func = self.func.__get__(instance, type)
activation = instance if isinstance(instance, StartViewActivation) else None
return self.__class__(func, activation=activation)
return StartViewDecorator
class ManagedStartViewActivation(StartViewActivation):
"""
Tracks task statistics in activation form
"""
management_form_cls = None
def __init__(self, **kwargs):
super(ManagedStartViewActivation, self).__init__(**kwargs)
self.management_form = None
self.management_form_cls = kwargs.pop('management_form_cls', None)
def get_management_form_cls(self):
if self.management_form_cls:
return self.management_form_cls
else:
return self.flow_cls.management_form_cls
@Activation.status.super()
def prepare(self, data=None, user=None):
super(ManagedStartViewActivation, self).prepare.original()
self.task.owner = user
management_form_cls = self.get_management_form_cls()
self.management_form = management_form_cls(data=data, instance=self.task)
if data:
if not self.management_form.is_valid():
raise FlowRuntimeError('Activation metadata is broken {}'.format(self.management_form.errors))
self.task = self.management_form.save(commit=False)
def has_perm(self, user):
return self.flow_task.can_execute(user)
class BaseStart(base.TaskDescriptionMixin,
base.NextNodeMixin,
base.DetailsViewMixin,
base.UndoViewMixin,
base.CancelViewMixin,
base.Event,
base.ViewArgsMixin):
"""
Base class for Start Process Views
"""
task_type = 'START'
activation_cls = ManagedStartViewActivation
def __init__(self, view_or_cls=None, **kwargs):
"""
Accepts view callable or CBV View class with view kwargs,
if CBV view implements StartActivation, it used as activation_cls
"""
self._view, self._view_cls, self._view_args = None, None, None
if isinstance(view_or_cls, type):
self._view_cls = view_or_cls
if issubclass(view_or_cls, StartViewActivation):
kwargs.setdefault('activation_cls', view_or_cls)
else:
self._view = view_or_cls
super(BaseStart, self).__init__(view_or_cls=view_or_cls, **kwargs)
@property
def view(self):
if not self._view:
if not self._view_cls:
from viewflow.views import StartProcessView
return StartProcessView.as_view()
else:
self._view = self._view_cls.as_view(**self._view_args)
return self._view
return self._view
def urls(self):
urls = super(BaseStart, self).urls()
urls.append(
url(r'^{}/$'.format(self.name), self.view, {'flow_task': self}, name=self.name))
return urls
class Start(base.PermissionMixin,
base.ActivateNextMixin,
BaseStart):
"""
Start process event
Example::
start = flow.Start(StartView, fields=["some_process_field"]) \\
.Available(lambda user: user.is_super_user) \\
.Activate(this.first_start)
In case of function based view::
start = flow.Start(start_process)
@flow_start_view()
def start_process(request, activation):
if not activation.has_perm(request.user):
raise PermissionDenied
activation.prepare(request.POST or None)
form = SomeForm(request.POST or None)
if form.is_valid():
form.save()
activation.done()
return redirect('/')
return render(request, {'activation': activation, 'form': form})
Ensure to include `{{ activation.management_form }}` inside template, to proper
track when task was started and other task performance statistics::
<form method="POST">
{{ form }}
{{ activation.management_form }}
<button type="submit"/>
</form>
"""
def Available(self, owner=None, **owner_kwargs):
"""
Make process start action available for the User
accepts user lookup kwargs or callable predicate :: User -> bool::
.Available(username='employee')
.Available(lambda user: user.is_super_user)
"""
if owner:
self._owner = owner
else:
self._owner = owner_kwargs
return self
def get_task_url(self, task, url_type='guess', **kwargs):
if url_type in ['execute', 'guess']:
if 'user' in kwargs and self.can_execute(kwargs['user'], task):
url_name = '{}:{}'.format(self.flow_cls.instance.namespace, self.name)
return reverse(url_name)
return super(Start, self).get_task_url(task, url_type=url_type, **kwargs)
def can_execute(self, user, task=None):
if task and task.status != STATUS.NEW:
return False
from django.contrib.auth import get_user_model
if self._owner:
if callable(self._owner):
return self._owner(user)
else:
owner = get_user_model()._default_manager.get(**self._owner)
return owner == user
elif self._owner_permission:
if callable(self._owner_permission) and self._owner_permission(user):
return True
obj = None
if self._owner_permission_obj:
if callable(self._owner_permission_obj):
obj = self._owner_permission_obj()
else:
obj = self._owner_permission_obj
return user.has_perm(self._owner_permission, obj=obj)
else:
"""
No restriction
"""
return True
|
codingjoe/viewflow
|
viewflow/flow/start_view.py
|
Python
|
agpl-3.0
| 7,680
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, fields, api, exceptions, _
class StockProductionLot(models.Model):
_inherit = 'stock.production.lot'
def _get_locked_value(self):
settings_obj = self.env['stock.config.settings']
config = settings_obj.search([], limit=1, order='id DESC')
return config.group_lot_default_locked
locked = fields.Boolean(string='Locked', default=_get_locked_value,
readonly=True)
@api.multi
def button_lock(self):
stock_quant_obj = self.env['stock.quant']
for lot in self:
cond = [('lot_id', '=', lot.id),
('reservation_id', '!=', False)]
for quant in stock_quant_obj.search(cond):
if quant.reservation_id.state not in ('cancel', 'done'):
raise exceptions.Warning(
_('Error!: Found stock movements for lot: "%" with'
' location destination type in virtual/company')
% (lot.name))
return self.write({'locked': True})
@api.multi
def button_unlock(self):
return self.write({'locked': False})
|
StefanRijnhart/odoomrp-wip
|
mrp_lock_lot/models/mrp_production_lot.py
|
Python
|
agpl-3.0
| 1,431
|
#!/usr/bin/python3
import requests
import json
import sys
misp_key = "" # MISP API key
misp_url = "" # MISP URL
misp_cachefile = "/home/misp/misp-snmp/misp-snmp.cache" # Cache file to store statistics data
# Cache file needs to be writable by the user of your SNMP daemon user
# Add a crontab to update the cache with
# */30 * * * * misp /home/misp/misp-snmp/misp-monitor.py update
# Add to SNMP configuration
# extend misp-workers /home/misp/misp-snmp/misp-snmp-monitor.py workers
# extend misp-jobs /home/misp/misp-snmp/misp-snmp-monitor.py jobs
# extend misp-stats /home/misp/misp-snmp/misp-snmp-monitor.py stats
# extend misp-users /home/misp/misp-snmp/misp-snmp-monitor.py users
misp_fail_data = -1
misp_verifycert = False
misp_useragent = "MISP SNMP"
if not misp_verifycert:
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
headers = {'Authorization': '{misp_key}'.format(misp_key=misp_key), 'Accept': 'application/json', 'content-type': 'application/json', 'User-Agent': '{misp_useragent}'.format(misp_useragent=misp_useragent)}
def get_worker_status():
workers_ok = 0
workers_dead = 0
try:
res = requests.get("{misp_url}/servers/getWorkers".format(misp_url=misp_url), headers=headers, verify=misp_verifycert).json()
for el in res:
worker = res.get(el)
if type(worker) is dict:
if 'ok' in worker:
if worker.get('ok') is True:
workers_ok += len(worker.get('workers'))
else:
workers_dead += 1
except AttributeError:
workers_ok = misp_fail_data
workers_dead = misp_fail_data
print("{}\n{}".format(workers_ok, workers_dead))
def get_job_count():
res = requests.get("{misp_url}/servers/getWorkers".format(misp_url=misp_url), headers=headers, verify=misp_verifycert).json()
jobs = 0
try:
for el in res:
worker = res.get(el)
if type(worker) is dict:
if 'jobCount' in worker:
jobs = int(worker.get('jobCount'))
except AttributeError:
jobs = misp_fail_data
print("{}".format(jobs))
def update_cache():
res = requests.get("{misp_url}/users/statistics.json".format(misp_url=misp_url), headers=headers, verify=misp_verifycert).json()
events = 0
attributes = 0
users = 0
orgs = 0
try:
stats = res.get('stats')
events = stats.get('event_count_month')
attributes = stats.get('attribute_count_month')
users = stats.get('user_count')
orgs = stats.get('org_count')
except AttributeError:
events = misp_fail_data
attributes = misp_fail_data
users = misp_fail_data
orgs = misp_fail_data
cache = {}
cache['events'] = events
cache['attributes'] = attributes
cache['users'] = users
cache['orgs'] = orgs
with open(misp_cachefile, 'w') as outfile:
json.dump(cache, outfile)
def get_data_stats_cached():
with open(misp_cachefile) as json_file:
cache = json.load(json_file)
print("{}\n{}".format(cache['events'], cache['attributes']))
def get_data_users_cached():
with open(misp_cachefile) as json_file:
cache = json.load(json_file)
print("{}\n{}".format(cache['users'], cache['orgs']))
if sys.argv[1] == "jobs":
get_job_count()
elif sys.argv[1] == "workers":
get_worker_status()
elif sys.argv[1] == "stats":
get_data_stats_cached()
elif sys.argv[1] == "users":
get_data_users_cached()
elif sys.argv[1] == "update":
update_cache()
|
MISP/MISP
|
tools/misp-snmp/misp-snmp-monitor.py
|
Python
|
agpl-3.0
| 3,668
|
# coding=utf-8
from dependencies.dependency import getToolByName
from lims.jsonapi import get_include_fields
from lims import bikaMessageFactory as _
from lims.utils import t, dicts_to_dict
from lims.interfaces import IAnalysis, IResultOutOfRange, IJSONReadExtender
from lims.interfaces import IFieldIcons
from lims.utils import to_utf8
from lims.utils import dicts_to_dict
from dependencies.dependency import adapts, getAdapters
from dependencies.dependency import implements
class ResultOutOfRangeIcons(object):
"""An icon provider for Analyses: Result field out-of-range alerts
"""
def __init__(self, context):
self.context = context
def __call__(self, result=None, **kwargs):
translate = self.context.translate
path = '++resource++bika.lims.images'
alerts = {}
# We look for IResultOutOfRange adapters for this object
for name, adapter in getAdapters((self.context, ), IResultOutOfRange):
ret = adapter(result)
if not ret:
continue
spec = ret["spec_values"]
rngstr = "{0} {1}, {2} {3}".format(
t(_("min")), str(spec['min']),
t(_("max")), str(spec['max']))
if ret["out_of_range"]:
if ret["acceptable"]:
message = "{0} ({1})".format(
t(_('Result in shoulder range')),
rngstr
)
icon = path + '/warning.png'
else:
message = "{0} ({1})".format(
t(_('Result out of range')),
rngstr
)
icon = path + '/exclamation.png'
alerts[self.context.UID()] = [
{
'icon': icon,
'msg': message,
'field': 'Result',
},
]
break
return alerts
class ResultOutOfRange(object):
"""Check if results are within tolerated values
"""
def __init__(self, context):
self.context = context
def __call__(self, result=None, specification=None):
workflow = getToolByName(self.context, 'portal_workflow')
astate = workflow.getInfoFor(self.context, 'review_state')
if astate == 'retracted':
return None
result = result is not None and str(result) or self.context.getResult()
if result == '':
return None
# if analysis result is not a number, then we assume in range:
try:
result = float(str(result))
except ValueError:
return None
# The spec is found in the parent AR's ResultsRange field.
if not specification:
rr = dicts_to_dict(self.context.aq_parent.getResultsRange(), 'keyword')
specification = rr.get(self.context.getKeyword(), None)
# No specs available, assume in range:
if not specification:
return None
outofrange, acceptable = \
self.isOutOfRange(result,
specification.get('min', ''),
specification.get('max', ''),
specification.get('error', ''))
return {
'out_of_range': outofrange,
'acceptable': acceptable,
'spec_values': specification
}
def isOutOfShoulderRange(self, result, Min, Max, error):
# check if in 'shoulder' range - out of range, but in acceptable error
spec_min = None
spec_max = None
try:
result = float(result)
except:
return False, None
try:
spec_min = float(Min)
except:
spec_min = None
try:
error = float(error)
except:
error = 0
try:
spec_max = float(Max)
except:
spec_max = None
error_amount = (result / 100) * error
error_min = result - error_amount
error_max = result + error_amount
if (spec_min and result < spec_min and error_max >= spec_min) \
or (spec_max and result > spec_max and error_min <= spec_max):
return True
# Default: in range
return False
def isOutOfRange(self, result, Min, Max, error):
spec_min = None
spec_max = None
try:
result = float(result)
except:
return False, False
try:
spec_min = float(Min)
except:
spec_min = None
try:
error = float(error)
except:
error = 0
try:
spec_max = float(Max)
except:
spec_max = None
if (spec_min is None and spec_max is None):
if self.isOutOfShoulderRange(result, Min, Max, error):
return True, True
else:
return False, False # No Min and Max values defined
elif spec_min is not None and spec_max is not None and spec_min <= result <= spec_max:
if self.isOutOfShoulderRange(result, Min, Max, error):
return True, True
else:
return False, False # Min and Max values defined
elif spec_min is not None and spec_max is None and spec_min <= result:
if self.isOutOfShoulderRange(result, Min, Max, error):
return True, True
else:
return False, False # Max value not defined
elif spec_min is None and spec_max is not None and spec_max >= result:
if self.isOutOfShoulderRange(result, Min, Max, error):
return True, True
else:
return False, False # Min value not defined
if self.isOutOfShoulderRange(result, Min, Max, error):
return True, True
return True, False
class JSONReadExtender(object):
"""- Adds the specification from Analysis Request to Analysis in JSON response
"""
implements(IJSONReadExtender)
adapts(IAnalysis)
def __init__(self, context):
self.context = context
def analysis_specification(self):
ar = self.context.aq_parent
rr = dicts_to_dict(ar.getResultsRange(),'keyword')
return rr[self.context.getService().getKeyword()]
def __call__(self, request, data):
self.request = request
self.include_fields = get_include_fields(request)
if not self.include_fields or "specification" in self.include_fields:
data['specification'] = self.analysis_specification()
return data
|
yasir1brahim/OLiMS
|
lims/browser/analysis.py
|
Python
|
agpl-3.0
| 6,765
|
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from .detail import LoginAsUserView, UserDetailView
from .list import UserListView
from .password import UserChangePasswordView, UserResetPasswordView
from .permissions import UserChangePermissionsView
__all__ = [
"UserListView",
"UserDetailView",
"UserChangePasswordView",
"UserResetPasswordView",
"UserChangePermissionsView",
"LoginAsUserView"
]
|
shawnadelic/shuup
|
shuup/admin/modules/users/views/__init__.py
|
Python
|
agpl-3.0
| 593
|
import time
from datetime import timedelta
from unittest.mock import patch, Mock
from superdesk.utc import utcnow
from superdesk.tests import TestCase
from superdesk.storage import AmazonMediaStorage
from superdesk.storage.amazon_media_storage import _guess_extension
class AmazonMediaStorageTestCase(TestCase):
def setUp(self):
self.amazon = AmazonMediaStorage(self.app)
# Patch config with defaults
p = patch.dict(self.app.config, {
'AMAZON_SECRET_ACCESS_KEY': None,
'AMAZON_CONTAINER_NAME': 'acname',
'AMAZON_REGION': 'us-east-1',
'AMAZON_S3_SUBFOLDER': '',
'MEDIA_PREFIX': 'https://acname.s3-us-east-1.amazonaws.com'
})
p.start()
self.addCleanup(p.stop)
def test_media_url(self):
filename = 'test'
# automatic version is set on 15mins granularity.
mins_granularity = int(int(time.strftime('%M')) / 4) * 4
time_id = '%s%s' % (time.strftime('%Y%m%d%H%m'), mins_granularity)
media_id = self.amazon.media_id(filename)
self.assertEqual('%s/%s' % (time_id, filename), media_id)
self.assertEqual(
self.amazon.url_for_media(media_id),
'https://acname.s3-us-east-1.amazonaws.com/%s' % media_id
)
sub = 'test-sub'
settings = {
'AMAZON_S3_SUBFOLDER': sub,
'MEDIA_PREFIX': 'https://acname.s3-us-east-1.amazonaws.com/' + sub
}
with patch.dict(self.app.config, settings):
media_id = self.amazon.media_id(filename)
self.assertEqual('%s/%s' % (time_id, filename), media_id)
path = '%s/%s' % (sub, media_id)
self.assertEqual(
self.amazon.url_for_media(media_id),
'https://acname.s3-us-east-1.amazonaws.com/%s' % path
)
with patch.object(self.amazon, 'client') as s3:
self.amazon.get(media_id)
self.assertTrue(s3.get_object.called)
self.assertEqual(
s3.get_object.call_args[1],
dict(Bucket='acname', Key=path)
)
def test_put_and_delete(self):
"""Test amazon if configured.
If the environment variables have a Amazon secret key set then assume
that we can attempt to put and delete into s3
:return:
"""
if self.app.config['AMAZON_SECRET_ACCESS_KEY']:
id = self.amazon.put('test', content_type='text/plain')
self.assertIsNot(id, None)
self.assertTrue(self.amazon.exists(id))
fromS3 = self.amazon.get(id)
self.assertEqual(fromS3.read().decode('utf-8'), 'test')
self.amazon.delete(id)
self.assertFalse(self.amazon.exists(id))
else:
self.assertTrue(True)
def test_put_into_folder(self):
data = b'test data'
folder = 's3test'
filename = 'abc123.zip'
content_type = 'text/plain'
self.amazon.client.put_object = Mock()
self.amazon.media_id = Mock(return_value=filename)
self.amazon._check_exists = Mock(return_value=False)
self.amazon.put(data, filename, content_type, folder=folder)
kwargs = {
'Key': '{}/{}'.format(folder, filename),
'Body': data,
'Bucket': 'acname',
'ContentType': content_type,
}
self.amazon.client.put_object.assert_called_once_with(**kwargs)
def test_find_folder(self):
self.amazon.client = Mock()
# Mock getting list of files from Amazon, first request returns a file, second request returns empty list
self.amazon.client.list_objects = Mock(side_effect=[
{'Contents': [{
'Key': 'gridtest/abcd1234',
'LastModified': utcnow() - timedelta(minutes=30),
'Size': 500,
'ETag': 'abcd1234'
}]},
{'Contents': []}
])
folder = 'gridtest'
self.amazon.find(folder=folder)
call_arg_list = [({
'Bucket': 'acname',
'Marker': '',
'MaxKeys': 1000,
'Prefix': '{}/'.format(folder)
},), ({
'Bucket': 'acname',
'Marker': 'gridtest/abcd1234',
'MaxKeys': 1000,
'Prefix': '{}/'.format(folder)
},)]
# We test the call_args_list as self.amazon.client.list_objects would have been called twice
self.assertEqual(self.amazon.client.list_objects.call_count, 2)
self.assertEqual(self.amazon.client.list_objects.call_args_list, call_arg_list)
def test_guess_extension(self):
self.assertEqual('.jpg', _guess_extension('image/jpeg'))
self.assertEqual('.png', _guess_extension('image/png'))
self.assertEqual('.mp3', _guess_extension('audio/mp3'))
self.assertEqual('.mp3', _guess_extension('audio/mpeg'))
self.assertEqual('.flac', _guess_extension('audio/flac'))
self.assertEqual('.mp4', _guess_extension('video/mp4'))
# leave empty when there is no extension
self.assertEqual('', _guess_extension('audio/foo'))
|
mdhaman/superdesk-core
|
tests/storage/amazon_media_storage_test.py
|
Python
|
agpl-3.0
| 5,227
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase
from base.tests.factories.user import UserFactory
from webservices.api.views.user import CurrentUser
from rest_framework import authentication
class CurrentUserTestCase(APITestCase):
@classmethod
def setUpTestData(cls):
cls.user = UserFactory()
cls.url = reverse(CurrentUser.name)
def setUp(self):
self.client.force_authenticate(user=self.user)
def test_auth_token_without_credentials(self):
self.client.force_authenticate(user=None)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_auth_token_method_not_allowed(self):
methods_not_allowed = ['post', 'delete', 'put', 'patch']
for method in methods_not_allowed:
response = getattr(self.client, method)(self.url)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_ensure_authentication_classes_have_session(self):
self.assertIn(authentication.SessionAuthentication, CurrentUser.authentication_classes)
|
uclouvain/OSIS-Louvain
|
webservices/tests/api/views/test_user.py
|
Python
|
agpl-3.0
| 2,479
|
# nif.py - functions for handling Portuguese VAT numbers
# coding: utf-8
#
# Copyright (C) 2012, 2013 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""NIF (Número de identificação fiscal, Portuguese VAT number).
The NIF (Número de identificação fiscal, NIPC, Número de Identificação de
Pessoa Colectiva) is used for VAT purposes. It is a 9-digit number with a
simple checksum.
>>> validate('PT 501 964 843')
'501964843'
>>> validate('PT 501 964 842') # invalid check digits
Traceback (most recent call last):
...
InvalidChecksum: ...
"""
from stdnum.exceptions import *
from stdnum.util import clean
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
number = clean(number, ' -.').upper().strip()
if number.startswith('PT'):
number = number[2:]
return number
def calc_check_digit(number):
"""Calculate the check digit. The number passed should not have the
check digit included."""
s = sum((9 - i) * int(n) for i, n in enumerate(number))
return str((11 - s) % 11 % 10)
def validate(number):
"""Checks to see if the number provided is a valid VAT number. This
checks the length, formatting and check digit."""
number = compact(number)
if not number.isdigit() or number[0] == '0':
raise InvalidFormat()
if len(number) != 9:
raise InvalidLength()
if calc_check_digit(number[:-1]) != number[-1]:
raise InvalidChecksum()
return number
def is_valid(number):
"""Checks to see if the number provided is a valid VAT number. This
checks the length, formatting and check digit."""
try:
return bool(validate(number))
except ValidationError:
return False
|
holvi/python-stdnum
|
stdnum/pt/nif.py
|
Python
|
lgpl-2.1
| 2,504
|
# -*- coding: utf-8 -*-
#
# Licensed under the terms of the Qwt License
# Copyright (c) 2002 Uwe Rathmann, for the original C++ code
# Copyright (c) 2015 Pierre Raybaut, for the Python translation/optimization
# (see LICENSE file for more details)
from qwt.interval import QwtInterval
from qwt.painter import QwtPainter
from qwt.qt.QtGui import QPolygonF, QPalette
from qwt.qt.QtCore import QRectF, Qt
def qwtDrawBox(p, rect, pal, lw):
if lw > 0.:
if rect.width() == 0.:
p.setPen(pal.dark().color())
p.drawLine(rect.topLeft(), rect.bottomLeft())
return
if rect.height() == 0.:
p.setPen(pal.dark().color())
p.drawLine(rect.topLeft(), rect.topRight())
return
lw = min([lw, rect.height()/2.-1.])
lw = min([lw, rect.width()/2.-1.])
outerRect = rect.adjusted(0, 0, 1, 1)
polygon = QPolygonF(outerRect)
if outerRect.width() > 2*lw and outerRect.height() > 2*lw:
innerRect = outerRect.adjusted(lw, lw, -lw, -lw)
polygon = polygon.subtracted(innerRect)
p.setPen(Qt.NoPen)
p.setBrush(pal.dark())
p.drawPolygon(polygon)
windowRect = rect.adjusted(lw, lw, -lw+1, -lw+1)
if windowRect.isValid():
p.fillRect(windowRect, pal.window())
def qwtDrawPanel(painter, rect, pal, lw):
if lw > 0.:
if rect.width() == 0.:
painter.setPen(pal.window().color())
painter.drawLine(rect.topLeft(), rect.bottomLeft())
return
if rect.height() == 0.:
painter.setPen(pal.window().color())
painter.drawLine(rect.topLeft(), rect.topRight())
return
lw = min([lw, rect.height()/2.-1.])
lw = min([lw, rect.width()/2.-1.])
outerRect = rect.adjusted(0, 0, 1, 1)
innerRect = outerRect.adjusted(lw, lw, -lw, -lw)
lines = [QPolygonF(), QPolygonF()]
lines[0] += outerRect.bottomLeft()
lines[0] += outerRect.topLeft()
lines[0] += outerRect.topRight()
lines[0] += innerRect.topRight()
lines[0] += innerRect.topLeft()
lines[0] += innerRect.bottomLeft()
lines[1] += outerRect.topRight()
lines[1] += outerRect.bottomRight()
lines[1] += outerRect.bottomLeft()
lines[1] += innerRect.bottomLeft()
lines[1] += innerRect.bottomRight()
lines[1] += innerRect.topRight()
painter.setPen(Qt.NoPen)
painter.setBrush(pal.light())
painter.drawPolygon(lines[0])
painter.setBrush(pal.dark())
painter.drawPolygon(lines[1])
painter.fillRect(rect.adjusted(lw, lw, -lw+1, -lw+1), pal.window())
class QwtColumnSymbol_PrivateData(object):
def __init__(self):
self.style = QwtColumnSymbol.Box
self.frameStyle = QwtColumnSymbol.Raised
self.lineWidth = 2
self.palette = QPalette(Qt.gray)
class QwtColumnSymbol(object):
# enum Style
NoStyle = -1
Box = 0
UserStyle = 1000
# enum FrameStyle
NoFrame, Plain, Raised = list(range(3))
def __init__(self, style):
self.__data = QwtColumnSymbol_PrivateData()
self.__data.style = style
def setStyle(self, style):
self.__data.style = style
def style(self):
return self.__data.style
def setPalette(self, palette):
self.__data.palette = palette
def palette(self):
return self.__data.palette
def setFrameStyle(self, frameStyle):
self.__data.frameStyle = frameStyle
def frameStyle(self):
return self.__data.frameStyle
def setLineWidth(self, width):
self.__data.lineWidth = width
def lineWidth(self):
return self.__data.lineWidth
def draw(self, painter, rect):
painter.save()
if self.__data.style == QwtColumnSymbol.Box:
self.drawBox(painter, rect)
painter.restore()
def drawBox(self, painter, rect):
r = rect.toRect()
if QwtPainter().roundingAlignment(painter):
r.setLeft(round(r.left()))
r.setRight(round(r.right()))
r.setTop(round(r.top()))
r.setBottom(round(r.bottom()))
if self.__data.frameStyle == QwtColumnSymbol.Raised:
qwtDrawPanel(painter, r, self.__data.palette, self.__data.lineWidth)
elif self.__data.frameStyle == QwtColumnSymbol.Plain:
qwtDrawBox(painter, r, self.__data.palette, self.__data.lineWidth)
else:
painter.fillRect(r, self.__data.palette.window())
class QwtColumnRect(object):
# enum Direction
LeftToRight, RightToLeft, BottomToTop, TopToBottom = list(range(4))
def __init__(self):
self.hInterval = QwtInterval()
self.vInterval = QwtInterval()
self.direction = 0
def toRect(self):
r = QRectF(self.hInterval.minValue(), self.vInterval.minValue(),
self.hInterval.maxValue()-self.hInterval.minValue(),
self.vInterval.maxValue()-self.vInterval.minValue())
r = r.normalized()
if self.hInterval.borderFlags() & QwtInterval.ExcludeMinimum:
r.adjust(1, 0, 0, 0)
if self.hInterval.borderFlags() & QwtInterval.ExcludeMaximum:
r.adjust(0, 0, -1, 0)
if self.vInterval.borderFlags() & QwtInterval.ExcludeMinimum:
r.adjust(0, 1, 0, 0)
if self.vInterval.borderFlags() & QwtInterval.ExcludeMaximum:
r.adjust(0, 0, 0, -1)
return r
def orientation(self):
if self.direction in (self.LeftToRight, self.RightToLeft):
return Qt.Horizontal
return Qt.Vertical
|
mindw/python-qwt
|
qwt/column_symbol.py
|
Python
|
lgpl-2.1
| 5,918
|
# -*- encoding: utf-8 -*-
import sys
import unittest
from PyQt4 import QtGui
import pilasengine
class TestHabilidades(unittest.TestCase):
app = QtGui.QApplication(sys.argv)
def setUp(self):
self.pilas = pilasengine.iniciar()
def testPuedeCrearHabilidad(self):
habilidad = self.pilas.habilidades.Habilidad(self.pilas)
self.assertTrue(habilidad, 'Puede crear habilidad')
def testNoPuedeRepetirHabilidad(self):
actor = self.pilas.actores.Aceituna()
actor.aprender(self.pilas.habilidades.Habilidad)
actor.aprender(self.pilas.habilidades.Habilidad)
self.assertEquals(len(actor._habilidades), 1,
'No puede Repetir la habilidad')
def testPuedeIniciarHabilidad(self):
actor = self.pilas.actores.Aceituna()
actor.aprender(self.pilas.habilidades.Habilidad)
self.assertTrue(actor.habilidades.Habilidad.iniciar,
'Puede iniciar habilidad')
def testPuedeActualizarHabilidad(self):
actor = self.pilas.actores.Aceituna()
actor.aprender(self.pilas.habilidades.Habilidad)
self.assertTrue(actor.habilidades.Habilidad.actualizar,
'Puede actualizar habilidad')
def testPuedeEliminarHabilidad(self):
actor = self.pilas.actores.Aceituna()
actor.aprender(self.pilas.habilidades.Habilidad)
actor.habilidades.Habilidad.eliminar()
self.assertEquals(actor._habilidades, list(), 'Puede eliminar habilidad')
def testPuedeCrearHabilidadPersonalizada(self):
class MiHabilidad(pilasengine.habilidades.Habilidad):
def actualizar(self):
pass
actor = self.pilas.actores.Aceituna()
actor.aprender(MiHabilidad)
self.assertEquals(1, len(actor._habilidades),
'Pude aprender habilidad personalizada')
def testFallaConHabilidadInvalida(self):
class MiHabilidadInvalida():
def actualizar(self):
pass
actor = self.pilas.actores.Aceituna()
with self.assertRaises(Exception):
actor.aprender(MiHabilidadInvalida)
def testPuedeAprenderHabilidadesUsandoStrings(self):
actor = self.pilas.actores.Aceituna()
actor.aprender('arrastrable')
self.assertTrue(actor.habilidades.Arrastrable.actualizar, 'Puede acceder a la nueva habilidad')
def testPuedenAprenderADisparar(self):
actor = self.pilas.actores.Aceituna(0, 0)
actor.aprender(self.pilas.habilidades.Disparar,
#municion=self.municion,
angulo_salida_disparo=90,
frecuencia_de_disparo=6,
distancia=5,
escala=1)
self.assertTrue(actor.disparar, "Tiene el método disparar")
def testPuedeAprenderHabilidadPersonalizadaUsandoStrings(self):
class MiHabilidad(pilasengine.habilidades.Habilidad):
def actualizar(self):
pass
actor = self.pilas.actores.Aceituna()
self.pilas.habilidades.vincular(MiHabilidad)
actor.aprender('mihabilidad')
self.assertEquals(1, len(actor._habilidades), 'Pude aprender habilidad personalizada desde string')
def testPuedeReportarErroresAlAprenderHabilidadesIncorrectamente(self):
actor = self.pilas.actores.Aceituna()
with self.assertRaises(NameError):
actor.aprender('arrastrablen12')
with self.assertRaises(NameError):
actor.aprender('')
def testPuedeReportarErrorerAlAprenderADispararIncorrectamente(self):
actor = self.pilas.actores.Aceituna()
with self.assertRaises(NameError):
actor.aprender('disparar', municion="unActorQueNoExiste")
with self.assertRaises(TypeError):
actor.aprender('disparar', municion=self)
with self.assertRaises(TypeError):
actor.aprender('disparar', municion=12313)
def testPuedeAprenderADispararUnActor(self):
actor = self.pilas.actores.Aceituna()
actor.aprender('disparar', municion="Aceituna")
actor.aprender('disparar', municion="Mono")
actor.aprender('disparar', municion="Caja")
if __name__ == '__main__':
unittest.main()
|
fsalamero/pilas
|
pilasengine/tests/test_habilidades.py
|
Python
|
lgpl-3.0
| 4,320
|
import os, logging
__author__ = "Moritz Wade"
__contact__ = "wade@zib.de"
__copyright__ = "Zuse Institute Berlin 2010"
def removeFilesOfTypeFromDir(dir, type):
"""
Remove all files of the given type in the given directory.
File type is determined by its suffix.
"""
for file in os.listdir(dir):
if file.endswith("." + type):
name = os.path.join(dir, file)
try: os.remove(name)
except:
logging.error("File %s could not be removed." & name)
def removeFileFromDir(dir, name):
"""
Delete the given file in the given directory.
"""
for file in os.listdir(dir):
if file.endswith(name):
name = os.path.join(dir, file)
try: os.remove(name)
except:
logging.error("File %s could not be removed." & name)
def renameFile(dir, oldName, newName):
"""
Helper method to rename a file.
"""
oldPath = os.path.join(dir, oldName)
newPath = os.path.join(dir, newName)
os.rename(oldPath, newPath)
def getHomeDir():
"""
Returns the home directory. OS independent (hopefully ;)).
"""
home = os.getenv('USERPROFILE') or os.getenv('HOME')
return home
|
gabrielmueller/aljebra-topo
|
src/basics/helpers/filehelpers.py
|
Python
|
lgpl-3.0
| 1,189
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Rakuten NLP Project
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Distributed representation models via category vector's "CV-DM and CV-DBoW models", using either
hierarchical softmax or negative sampling [1]_ [2]_ [3]_ [4]_.
The algorithms for training word vectors were originally ported from C package https://code.google.com/p/word2vec/
and extended with additional functionality and optimization implemented in Cython [5]_.
.. [1] Junki Marui, and Masato Hagiwara. Category2Vec: 単語・段落・カテゴリに対するベクトル分散表現. 言語処理学会第21回年次大会(NLP2015).
.. [2] Quoc Le, and Tomas Mikolov. Distributed Representations of Sentence and Documents. In Proceedings of ICML 2014.
.. [3] Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. Efficient Estimation of Word Representations
in Vector Space. In Proceedings of Workshop at ICLR, 2013.
.. [4] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean. Distributed Representations
of Words and Phrases and their Compositionality. In Proceedings of NIPS, 2013.
.. [5] Radim Rehurek, Optimizing word2vec in gensim, http://radimrehurek.com/2013/09/word2vec-in-python-part-two-optimizing/
"""
import logging
import sys
import os
import heapq
import time
from copy import deepcopy
import threading
try:
from queue import Queue
except ImportError:
from Queue import Queue
try:
from cat2vec_bind import train_from_job, train_cat_vec, catvec_sim, sentvec_sim, catsentvec_sim_concat, catsentvec_sim_sum, init_pairtable, FAST_VERSION, IS_DOUBLE, ADAM_BETA1, ADAM_BETA2
except ImportError:
# failed... fall back to plain numpy (20-80x slower training than the above)
from cat2vec_pp import train_from_job, train_cat_vec, catvec_sim, sentvec_sim, catsentvec_sim_concat, catsentvec_sim_sum, init_pairtable, FAST_VERSION, IS_DOUBLE, ADAM_BETA1, ADAM_BETA2
if IS_DOUBLE:
from numpy import float64 as REAL
else:
from numpy import float32 as REAL
from numpy import exp, dot, zeros, outer, random, dtype, get_include, amax,\
uint32, seterr, array, uint8, vstack, argsort, fromstring, sqrt, newaxis, ndarray, empty, sum as np_sum
from numpy.linalg import norm as np_norm
logger = logging.getLogger("cat2vec")
import utils, matutils # utility fnc for pickling, common scipy operations etc
from six import iteritems, itervalues, string_types
from six.moves import xrange
from word2vec import Word2Vec, Vocab
from multiprocessing import cpu_count
from argparse import ArgumentParser
CAT2VEC_VERSION = "0.01"
class Category2Vec(utils.SaveLoad):
def __init__(self, sentences, model_file=None, size=200, alpha=0.025, window=5, min_count = 5,
sample=0, seed=1, workers=16, min_alpha=0.0001, model="cb", hs=1, negative=0, cbow_mean=0,
iteration=1, word_learn=1, init_adjust=True, update_mode = 0, normalize_each_epoch = False):
self.sg = 1 if model == "sg" or model == "dbow" else 0
self.table = None # for negative sampling --> this needs a lot of RAM! consider setting back to None before saving
self.alpha = float(alpha)
self.window = int(window)
self.seed = seed
self.sample = sample
self.workers = workers
self.min_alpha = min_alpha
self.hs = hs
self.negative = negative
self.cbow_mean = int(cbow_mean)
self.iteration = iteration
self.word_learn = int(word_learn)
self.cat_learn = 1
self.layer1_size = size
self.min_count = min_count
self.sent_no_hash = {} # mapping sent_id to index of self.sents
self.sent_id_list = [] # mapping sent_no to sent_id
self.cat_no_hash = {} # mapping cat_id to index of self.cats
self.cat_id_list = [] # mapping cat_no to cat_id
self.sane_vec_len = 100000 # for sanity check
self.sane_max_sim10 = 0.9 # for sanity check
self.init_adjust = init_adjust # for adjustment of initialization
self.update_mode = update_mode # 0:SGD, 1: AdaGrad, 2:AdaDelta, 3:ADAM
self.normalize_each_epoch = normalize_each_epoch # normalize vectors after each epoch
if sentences:
if model_file:
self.w2v = Word2Vec.load(model_file)
self.vocab = self.w2v.vocab
self.layer1_size = self.w2v.layer1_size
self.build_vec(sentences, has_vocab = True)
else:
self.word_learn = 1
self.w2v = Word2Vec(None, self.layer1_size, self.alpha, self.window, self.min_count, self.sample, self.seed, self.workers, self.min_alpha, self.sg, self.hs, self.negative, self.cbow_mean)
self.build_vec(sentences, has_vocab = False)
self.train_iteration(sentences, iteration=iteration)
def build_vec(self, sentences, has_vocab = False):
"""Reset all projection weights to an initial (untrained) state, but keep the existing vocabulary."""
if not has_vocab :
logger.info("build vocabulary and")
logger.info("resetting vectors")
random.seed(self.seed)
sentence_no, vocab = -1, {}
total_words = 0
self.sents_len = 0 #the num of sentence ids
self.total_sents = 0 #the num of sentences
self.cat_len = 0 #the num of category ids
sent_cat_hash = {} #hash table for sent_no and cat_no
for sentence_no, sent_tuple in enumerate(sentences):
if sentence_no % 10000 == 0:
logger.info("PROGRESS: at sentence #%i, processed %i words and %i word types" %
(sentence_no, total_words, len(vocab)))
sentence = sent_tuple[0]
for word in sentence:
total_words += 1
if word in vocab:
vocab[word].count += 1
else:
vocab[word] = Vocab(count=1)
sent_id = sent_tuple[1]
cat_id = sent_tuple[2]
self.total_sents += 1
if not self.cat_no_hash.has_key(cat_id):
self.cat_no_hash[cat_id] = self.cat_len
self.cat_id_list.append(cat_id)
self.cat_len += 1
if not self.sent_no_hash.has_key(sent_id):
self.sent_no_hash[sent_id] = self.sents_len
self.sent_id_list.append(sent_id)
self.sents_len += 1
sent_cat = str(self.sent_no_hash[sent_id])+" "+str(self.cat_no_hash[cat_id])
sent_cat_hash.setdefault(sent_cat,0)
sent_cat_hash[sent_cat] += 1
logger.info("collected %i word types from a corpus of %i words and %i sentences(ident:%i) with %i categories" %
(len(vocab), total_words, self.total_sents, self.sents_len, self.cat_len))
self.build_vocab(vocab)
self.sents = matutils.zeros_aligned((self.sents_len, self.layer1_size), dtype=REAL)
self.cats = matutils.zeros_aligned((self.cat_len, self.layer1_size), dtype=REAL)
# randomize weights vector by vector, rather than materializing a huge random matrix in RAM at once
self.reset_weights()
# make sent_cat_pair
self.sent_cat_pair = empty((len(sent_cat_hash),2), dtype=uint32)
self.pair_len = len(sent_cat_hash)
idx = 0
for sent_cat in sent_cat_hash.keys():
tpl = sent_cat.split(" ")
self.sent_cat_pair[idx][0] = uint32(tpl[0])
self.sent_cat_pair[idx][1] = uint32(tpl[1])
idx += 1
#sort by cat_no, sent_no in place
self.sent_cat_pair.view('u4,u4').sort(order=['f1','f0'], axis=0)
def build_vocab(self, vocab):
# assign a unique index to each word
self.w2v.vocab, self.w2v.index2word = {}, []
for word, v in iteritems(vocab):
if v.count >= self.w2v.min_count:
v.index = len(self.w2v.vocab)
self.w2v.index2word.append(word)
self.w2v.vocab[word] = v
logger.info("total %i word types after removing those with count<%s" % (len(self.w2v.vocab), self.w2v.min_count))
if self.hs:
# add info about each word's Huffman encoding
self.w2v.create_binary_tree()
if self.negative:
# build the table for drawing random words (for negative sampling)
self.w2v.make_table()
# precalculate downsampling thresholds
self.w2v.precalc_sampling()
self.w2v.reset_weights()
self.vocab = self.w2v.vocab
# initialization adjustment
if self.init_adjust:
self.w2v.syn0 *= sqrt(self.layer1_size)
if self.hs: self.w2v.syn1 *= sqrt(self.layer1_size)
if self.negative: self.w2v.syn1neg *= sqrt(self.layer1_size)
def reset_weights(self):
if self.init_adjust:
denom = sqrt(self.layer1_size)
else:
denom = self.layer1_size
for idx in xrange(self.sents_len):
self.sents[idx] = (random.rand(self.layer1_size) - 0.5) / denom
for idx in xrange(self.cat_len):
self.cats[idx] = (random.rand(self.layer1_size) - 0.5) / denom
# gradients for vectors
self.syn0_grad = self.init_grad_weight(len(self.w2v.vocab))
self.syn1_grad = self.init_grad_weight(len(self.w2v.vocab)) if self.hs > 0 else zeros(0, dtype=REAL)
self.syn1neg_grad = self.init_grad_weight(len(self.w2v.vocab)) if self.negative > 0 else zeros(0, dtype=REAL)
self.sents_grad = self.init_grad_weight(self.sents_len)
self.cats_grad = self.init_grad_weight(self.cat_len)
self.pairnorm = None
def init_grad_weight(self, length):
grad_size = 0
if self.update_mode == 1:
grad_size = self.layer1_size
elif self.update_mode == 2:
grad_size = 2 * self.layer1_size
elif self.update_mode == 3:
grad_size = 2 * self.layer1_size + 3
grad = matutils.zeros_aligned((length, grad_size), dtype=REAL)
if self.update_mode == 3:
grad[:,grad_size - 3] = ADAM_BETA1
grad[:,grad_size - 2] = ADAM_BETA1
grad[:,grad_size - 1] = ADAM_BETA2
return grad
def train_iteration(self, sentences, iteration=None):
if not iteration:
iteration = self.iteration
i = 0
while i < iteration:
logger.info("-------------iteration:%i-------------" % (i+1))
self.train(sentences)
(flag, warn_str) = self.sanity_check()
if self.normalize_each_epoch:
logger.info("normalize vectors")
self.normalize_vectors()
if not flag :
logger.info("Warning: %s" % warn_str)
i += 1
def train(self, sentences, total_words=None, word_count=0, sent_count=0, chunksize=100):
"""
Update the model's neural weights from a sequence of sentences (can be a once-only generator stream).
Each sentence must be a list of unicode strings.
"""
logger.info("training model with %i workers on %i sentences and %i features, "
"using 'skipgram'=%s 'hierarchical softmax'=%s 'subsample'=%s and 'negative sampling'=%s" %
(self.workers, self.sents_len, self.layer1_size, self.sg, self.hs, self.sample, self.negative))
if not self.vocab:
raise RuntimeError("you must first build vocabulary before training the model")
start, next_report = time.time(), [1.0]
word_count = [word_count]
sent_count = [sent_count]
total_words = total_words or sum(v.count * v.sample_probability for v in itervalues(self.vocab))
total_sents = self.total_sents #it's now different from self.sents_len
jobs = Queue(maxsize=2 * self.workers) # buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(
lock = threading.Lock() # for shared state (=number of words trained so far, log reports...)
def worker_train():
"""Train the model, lifting lists of sentences from the jobs queue."""
work = matutils.zeros_aligned(self.layer1_size + 8, dtype=REAL) # each thread must have its own work memory
neu1 = matutils.zeros_aligned(self.layer1_size + 8, dtype=REAL)
while True:
job = jobs.get()
if job is None: # data finished, exit
break
# update the learning rate before every job
if self.update_mode == 0:
alpha = max(self.min_alpha, self.alpha * (1 - 1.0 * word_count[0] / total_words))
else:
alpha = self.alpha
job_words = train_from_job(self, job, alpha, work, neu1)
with lock:
word_count[0] += job_words
sent_count[0] += chunksize
elapsed = time.time() - start
if elapsed >= next_report[0]:
logger.info("PROGRESS: at %.2f%% sents, alpha %.05f, %.0f words/s" %
(100.0 * sent_count[0] / total_sents, alpha, word_count[0] / elapsed if elapsed else 0.0))
next_report[0] = elapsed + 1.0 # don't flood the log, wait at least a second between progress reports
workers = [threading.Thread(target=worker_train) for _ in xrange(self.workers)]
for thread in workers:
thread.daemon = True # make interrupting the process with ctrl+c easier
thread.start()
def prepare_sentences():
for sent_tuple in sentences:
sentence = sent_tuple[0]
sent_id = sent_tuple[1]
cat_id = sent_tuple[2]
sent_no = self.sent_no_hash[sent_id]
cat_no = self.cat_no_hash[cat_id]
sampled = [self.vocab.get(word, None) for word in sentence
if word in self.vocab and (self.vocab[word].sample_probability >= 1.0 or self.vocab[word].sample_probability >= random.random_sample())]
yield (cat_no, sent_no, sampled)
# convert input strings to Vocab objects (eliding OOV/downsampled words), and start filling the jobs queue
for job_no, job in enumerate(utils.grouper(prepare_sentences(), chunksize)):
logger.debug("putting job #%i in the queue, qsize=%i" % (job_no, jobs.qsize()))
jobs.put(job)
logger.info("reached the end of input; waiting to finish %i outstanding jobs" % jobs.qsize())
for _ in xrange(self.workers):
jobs.put(None) # give the workers heads up that they can finish -- no more work!
for thread in workers:
thread.join()
elapsed = time.time() - start
logger.info("training on %i words took %.1fs, %.0f words/s" %
(word_count[0], elapsed, word_count[0] / elapsed if elapsed else 0.0))
return word_count[0]
def sanity_check(self):
veclens = empty(self.cat_len, dtype=REAL)
for i in xrange(self.cat_len):
veclens[i] = np_norm(self.cats[i])
max_len = amax(veclens)
logger.info("max vector length: %f" % max_len)
if max_len > self.sane_vec_len:
return False, "insane max vector length > %f" % (self.sane_vec_len)
if self.sg:
return True, None
rand_indices = random.randint(len(self.w2v.vocab),size=10)
sim_top10_avg = 0
for idx in rand_indices:
w = self.w2v.index2word[idx]
sim_words = self.w2v.most_similar(positive=[w],topn=10)
sim_top10_avg += sim_words[9][1]
sim_top10_avg /= len(rand_indices)
logger.info("average similarity: %f"% sim_top10_avg)
if sim_top10_avg > self.sane_max_sim10:
return False, "insane average similarity > %f" % (self.sane_max_sim10)
return True, None
def normalize_vectors(self):
for i in xrange(self.w2v.syn0.shape[0]):
self.w2v.syn0[i, :] /= sqrt((self.w2v.syn0[i, :] ** 2).sum(-1))
if self.hs:
for i in xrange(self.w2v.syn1.shape[0]):
self.w2v.syn1[i, :] /= sqrt((self.w2v.syn1[i, :] ** 2).sum(-1))
if self.negative:
for i in xrange(self.w2v.syn1neg.shape[0]):
self.w2v.syn1neg[i, :] /= sqrt((self.w2v.syn1neg[i, :] ** 2).sum(-1))
for i in xrange(self.sents.shape[0]):
self.sents[i, :] /= sqrt((self.sents[i, :] ** 2).sum(-1))
for i in xrange(self.cats.shape[0]):
self.cats[i, :] /= sqrt((self.cats[i, :] ** 2).sum(-1))
def init_pairnorm(self):
# avoid initializing from multiple threads
lock = threading.Lock()
with lock:
if getattr(self, 'pairnorm', None) is not None: return
self.pairnorm = matutils.zeros_aligned((self.pair_len, self.layer1_size), dtype=REAL)
init_pairtable(self)
def train_single_sent_id(self, sentences, iteration, work=None, neu1=None, sent_vec=None, cat_vec=None):
if work is None: work = matutils.zeros_aligned(self.layer1_size + 8, dtype=REAL)
if neu1 is None: neu1 = matutils.zeros_aligned(self.layer1_size + 8, dtype=REAL)
sent_grad = self.init_grad_weight(1)
cat_grad = self.init_grad_weight(1)
if sent_vec is None:
sent_vec = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
if self.init_adjust:
denom = sqrt(self.layer1_size)
else:
denom = self.layer1_size
sent_vec[:] = (random.rand(self.layer1_size).astype(REAL) - 0.5) / denom
if cat_vec is None:
cat_vec = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
self.cat_learn = 0
for i in range(iteration):
alpha = max(self.min_alpha, self.alpha * (1 - 1.0 * i / iteration)) if self.update_mode == 0 else self.alpha
for sentence in sentences:
sampled = [self.vocab.get(word, None) for word in sentence]
train_cat_vec(self, sent_vec, cat_vec, sampled, alpha, work, neu1, sent_grad, cat_grad)
return sent_vec, cat_vec
def infer(self, sentences, iteration=5, k=1, work=None, neu1=None):
self.init_pairnorm()
sent_vec, cat_vec = self.train_single_sent_id(sentences, iteration, work, neu1)
neighbors = self.most_similar_catsent(sent_vec, cat_vec, k, ident_cat = True)
cat_ids = []
sent_ids = []
similarity = []
for neighbor in neighbors:
cat_id = neighbor[2]
sent_ids.append(neighbor[0])
cat_ids.append(cat_id)
similarity.append(neighbor[1])
sent_vec += cat_vec
cat_vec = deepcopy(self.cats[self.cat_no_hash[cat_ids[0]]])
sent_vec -= cat_vec
return sent_vec, cat_vec, cat_ids, sent_ids, similarity
def infer_sent(self, sentences, cat_id, iteration=5, k=1, work=None, neu1=None):
cat_vec = self.cats[cat_id]
self.cat_learn = 0
sent_vec, cat_vec = self.train_single_sent_id(sentences, iteration, work, neu1, None, cat_vec)
neighbors = self.most_similar_sentence(sent_vec, k)
sent_ids = []
similarity = []
for neighbor in neighbors:
sent_ids.append(neighbor[0])
similarity.append(neighbor[1])
return sent_vec, sent_ids, similarity
def most_similar_sentence(self, vec, num):
sims = empty(self.sents_len,dtype=REAL)
sentvec_sim(self,vec,num,sims)
nearest = []
topN = argsort(sims)[::-1][0:num]
for top_sent in topN:
sent_id = self.sent_id_list[top_sent]
nearest.append((sent_id,float(sims[top_sent])))
return nearest
def most_similar_category(self, vec, num):
sims = empty(self.cat_len,dtype=REAL)
catvec_sim(self,vec,num,sims)
nearest = []
topN = argsort(sims)[::-1][0:num]
for top_cand in topN:
cat_id = self.cat_id_list[top_cand]
nearest.append((cat_id,float(sims[top_cand])))
return nearest
def most_similar_catsent_concat(self, svec, cvec, num, sent2cat):
self.sent2cat = sent2cat
sims = zeros(self.sents_len,dtype=REAL)
catsentvec_sim_concat(self, svec, cvec, num, sims)
nearest = []
topN = argsort(sims)[::-1][0:num]
for top_cand in topN:
sent_id = self.sent_id_list[top_cand]
nearest.append((sent_id,float(sims[top_cand])))
return nearest
def most_similar_catsent(self, svec, cvec, num, ident_cat = False):
sims = zeros(self.pair_len, dtype=REAL)
catsentvec_sim_sum(self, svec, cvec, sims)
nearest = []
cat_ids = {}
neighbors = argsort(sims)[::-1]
for top_cand in neighbors:
(sent_no, cat_no) = self.sent_cat_pair[top_cand]
sent_id = self.sent_id_list[sent_no]
cat_id = self.cat_id_list[cat_no]
if not ident_cat or not cat_ids.has_key(cat_id):
cat_ids[cat_id] = 1
nearest.append((sent_id,float(sims[top_cand]),cat_id))
if len(nearest) == num: break
return nearest
def save(self, fname, separately=None, sep_limit=10 * 1024**2, ignore=["pairnorm"]):
ignore.append("w2v")
self.w2v.save(fname+"_w2v", separately, sep_limit)
super(Category2Vec, self).save(fname, separately, sep_limit, ignore)
def save_sent2vec_format(self, fname):
"""
Store sentence vectors
"""
logger.info("storing %sx%s projection weights into %s" % (self.sents_len, self.layer1_size, fname))
assert (self.sents_len, self.layer1_size) == self.sents.shape
with utils.smart_open(fname, 'wb') as fout:
fout.write(utils.to_utf8("#sents_len: %d\n#size:%d\n" % self.sents.shape))
fout.write(utils.to_utf8("#sg:%d\n#hs:%d\n#negative:%d\n#cbow_mean:%d\n" % (self.sg,self.hs,self.negative,self.cbow_mean)))
for sent_id in self.sent_no_hash.keys():
row = self.sents[self.sent_no_hash[sent_id]]
fout.write(utils.to_utf8("%s\t%s\n" % (sent_id, ' '.join("%f" % val for val in row))))
def save_cat2vec_format(self, fname):
"""
Store cat vectors
"""
logger.info("storing %sx%s projection weights into %s" % (self.cat_len, self.layer1_size, fname))
assert (self.cat_len, self.layer1_size) == self.cats.shape
with utils.smart_open(fname, 'wb') as fout:
fout.write(utils.to_utf8("#cats_len: %d\n#size:%d\n" % self.cats.shape))
fout.write(utils.to_utf8("#sg:%d\n#hs:%d\n#negative:%d\n#cbow_mean:%d\n" % (self.sg,self.hs,self.negative,self.cbow_mean)))
for cat_id in self.cat_no_hash.keys():
row = self.cats[self.cat_no_hash[cat_id]]
fout.write(utils.to_utf8("%s\t%s\n" % (cat_id, ' '.join("%f" % val for val in row))))
@classmethod
def load(cls, fname, mmap=None):
model = super(Category2Vec, cls).load(fname, mmap)
if os.path.isfile(fname+"_w2v"):
model.w2v = Word2Vec.load(fname+"_w2v", mmap)
model.vocab = model.w2v.vocab
return model
@classmethod
def load_cat2vec_format(cls, cat_model=None, sent_model=None, word_model=None):
"""
Load sentence vectors
"""
model = Category2Vec(None)
count = 0
if cat_model:
logger.info("loading %s object(cat) from %s" % (cls.__name__, cat_model))
for line in open(cat_model,"r"):
line = line.rstrip()
if count == 0:
info = line.split()
model.cat_len = int(info[0])
model.layer1_size = int(info[1])
model.sg = int(info[2])
model.hs = int(info[3])
model.negative = int(info[4])
model.cbow_mean = int(info[5])
model.cats = empty((model.cat_len, model.layer1_size), dtype=REAL)
model.cat_no_hash = {}
model.cat_id_list = []
else:
idx = count - 1
row = line.split("\t")
cat_id = utils.to_unicode(row[0])
model.cat_no_hash[cat_id] = idx
model.cat_id_list.append(cat_id)
vals = row[1].split()
for j in xrange(model.layer1_size):
model.cats[idx][j] = float(vals[j])
count += 1
count = 0
if sent_model:
logger.info("loading %s object(sentence) from %s" % (cls.__name__, sent_model))
for line in open(sent_model,"r"):
line = line.rstrip()
if count == 0:
info = line.split()
model.sents_len = int(info[0])
model.sents = empty((model.sents_len, model.layer1_size), dtype=REAL)
model.sent_no_hash = {}
model.sent_id_list = []
else:
idx = count - 1
row = line.split("\t")
sent_id = utils.to_unicode(row[0])
model.sent_no_hash[sent_id] = idx
model.sent_id_list.append(sent_id)
vals = row[1].split()
for j in xrange(model.layer1_size):
model.sents[idx][j] = float(vals[j])
count += 1
if word_model:
logger.info("loading word2vec from %s" % word_model)
model.w2v = Word2Vec.load(word_model)
model.vocab = model.w2v.vocab
return model
@classmethod
def arg_parser(cls):
parser = ArgumentParser(description="Category2Vec ver." + CAT2VEC_VERSION)
parser.set_defaults(model="cb")
parser.set_defaults(hs=0)
parser.set_defaults(neg=0)
parser.set_defaults(sample=0)
parser.set_defaults(alpha=0.025)
parser.set_defaults(dim=200)
parser.set_defaults(iteration=20)
parser.set_defaults(thread=cpu_count())
parser.set_defaults(update=0)
parser.set_defaults(norm=False)
parser.add_argument("--version", action="version", version="Category2Vec version: " + CAT2VEC_VERSION)
parser.add_argument("-m", "--model", dest="model", type=str, help="specify model(cb for cbow/dm, sg for skip-gram/dbow)")
parser.add_argument("--hs", dest="hs", type=int, help="hierarchical softmax 0:disable 1:enable")
parser.add_argument("--neg", dest="neg", type=int, help="negative sampling 0:disable >=1:number of sampling")
parser.add_argument("-s", "--sample", dest="sample", type=float, help="subsampling")
parser.add_argument("-a", "--alpha", dest="alpha", type=float, help="(initial) learning rate")
parser.add_argument("-d", "--dim", dest="dim", type=int, help="dimension")
parser.add_argument("-i", "--iteration", dest="iteration", type=int, help="iterations / epochs")
parser.add_argument("-t", "--thread", dest="thread", type=int, help="threads")
parser.add_argument("-u", "--update", dest="update", type=int, help="update mode 0:SGD(default) 1:AdaGrad 2:AdaDelta 3:ADAM")
parser.add_argument("-o", "--outdir", dest="outdir", type=str, help="output directory")
parser.add_argument('-n', "--normalize", dest="norm", action='store_true')
parser.add_argument("--train", nargs="+", help="training file(s)")
return parser
def identifier(self):
name = "cat%d" % (self.layer1_size)
if self.sg:
name += "_sg"
else:
name += "_cb"
if self.hs:
name += "_hs"
else:
name += "_neg%d" % self.negative
name += "_a%g" % self.alpha
name += "_it%d" % self.iteration
if self.normalize_each_epoch:
name += "_n"
if self.update_mode == 0:
name += "_sgd"
elif self.update_mode == 1:
name += "_adagrad"
elif self.update_mode == 2:
name += "_adadel"
elif self.update_mode == 3:
name += "_adam"
return name
def sent_vec_similarity(self, sent_id1, sent_id2):
"""
Compute cosine similarity between two sentences. sent1 and sent2 are
the indexs in the train file.
Example::
>>> trained_model.sent_vec_similarity(sent_id1, sent_id1)
1.0
>>> trained_model.sent_vec_similarity(sent_id1, sent_id3)
0.73
"""
return dot(matutils.unitvec(self.sents[self.sent_no_hash[sent_id1]]), matutils.unitvec(self.sents[self.sent_no_hash[sent_id2]]))
def cat_vec_similarity(self, cat_id1, cat_id2):
"""
Compute cosine similarity between two sentences. sent1 and sent2 are
the indexs in the train file.
Example::
>>> trained_model.cat_vec_similarity(cat_id1, cat_id1)
1.0
>>> trained_model.cat_vec_similarity(cat_id1, cat_id3)
0.73
"""
return dot(matutils.unitvec(self.cats[self.cat_no_hash[cat_id1]]), matutils.unitvec(self.cats[self.cat_no_hash[cat_id2]]))
# Example: ./word2vec.py ~/workspace/word2vec/text8 ~/workspace/word2vec/questions-words.txt ./text8
if __name__ == "__main__":
import re
from sentences import CatSentence
logging.basicConfig(format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s', level=logging.INFO)
logging.info("running %s" % " ".join(sys.argv))
logging.info("using optimization %s" % FAST_VERSION)
# check and process cmdline input
program = os.path.basename(sys.argv[0])
if len(sys.argv) < 2:
print(globals()['__doc__'] % locals())
sys.exit(1)
parser = Category2Vec.arg_parser()
parser.add_argument("--split", dest="split", action="store_true", help="use this option for split training data", default=False)
args = parser.parse_args()
seterr(all='raise') # don't ignore numpy errors
input_file = args.train[0]
p_dir = re.compile("^.*/")
basename = p_dir.sub("",input_file)
if args.outdir:
outdir = args.outdir
else:
m = p_dir.search(input_file)
outdir = m.group(0) if m else ""
logging.info("save to %s%s_{model_id}.model" % (outdir, basename))
if args.split and len(args.train) > 1:
input_file = args.train
model = Category2Vec(CatSentence(input_file, split=args.split), iteration=args.iteration, model=args.model, hs = args.hs, negative = args.neg, workers = args.thread, alpha=args.alpha, size=args.dim, update_mode = args.update, normalize_each_epoch = args.norm)
model.save("%s%s_%s.model" % (outdir, basename, model.identifier()))
program = os.path.basename(sys.argv[0])
logging.info("finished running %s" % program)
|
pvthuy/category2vec
|
cat2vec.py
|
Python
|
lgpl-3.0
| 31,440
|
# Copyright 2011-2013, Damian Johnson
# See LICENSE for licensing information
"""
Functions to aid library logging. The default logging
:data:`~stem.util.log.Runlevel` is usually NOTICE and above.
**Stem users are more than welcome to listen for stem events, but these
functions are not being vended to our users. They may change in the future, use
them at your own risk.**
**Module Overview:**
::
get_logger - provides the stem's Logger instance
logging_level - converts a runlevel to its logging number
escape - escapes special characters in a message in preparation for logging
log - logs a message at the given runlevel
log_once - logs a message, deduplicating if it has already been logged
trace - logs a message at the TRACE runlevel
debug - logs a message at the DEBUG runlevel
info - logs a message at the INFO runlevel
notice - logs a message at the NOTICE runlevel
warn - logs a message at the WARN runlevel
error - logs a message at the ERROR runlevel
LogBuffer - Buffers logged events so they can be iterated over.
|- is_empty - checks if there's events in our buffer
+- __iter__ - iterates over and removes the buffered events
log_to_stdout - reports further logged events to stdout
.. data:: Runlevel (enum)
Enumeration for logging runlevels.
========== ===========
Runlevel Description
========== ===========
**ERROR** critical issue occurred, the user needs to be notified
**WARN** non-critical issue occurred that the user should be aware of
**NOTICE** information that is helpful to the user
**INFO** high level library activity
**DEBUG** low level library activity
**TRACE** request/reply logging
========== ===========
"""
import logging
import stem.prereq
import stem.util.enum
import stem.util.str_tools
# Logging runlevels. These are *very* commonly used so including shorter
# aliases (so they can be referenced as log.DEBUG, log.WARN, etc).
Runlevel = stem.util.enum.UppercaseEnum("TRACE", "DEBUG", "INFO", "NOTICE", "WARN", "ERROR")
TRACE, DEBUG, INFO, NOTICE, WARN, ERR = list(Runlevel)
# mapping of runlevels to the logger module's values, TRACE and DEBUG aren't
# built into the module
LOG_VALUES = {
Runlevel.TRACE: logging.DEBUG - 5,
Runlevel.DEBUG: logging.DEBUG,
Runlevel.INFO: logging.INFO,
Runlevel.NOTICE: logging.INFO + 5,
Runlevel.WARN: logging.WARN,
Runlevel.ERROR: logging.ERROR,
}
logging.addLevelName(LOG_VALUES[TRACE], "TRACE")
logging.addLevelName(LOG_VALUES[NOTICE], "NOTICE")
LOGGER = logging.getLogger("stem")
LOGGER.setLevel(LOG_VALUES[TRACE])
# There's some messages that we don't want to log more than once. This set has
# the messages IDs that we've logged which fall into this category.
DEDUPLICATION_MESSAGE_IDS = set()
# Adds a default nullhandler for the stem logger, suppressing the 'No handlers
# could be found for logger "stem"' warning as per...
# http://docs.python.org/release/3.1.3/library/logging.html#configuring-logging-for-a-library
class _NullHandler(logging.Handler):
def emit(self, record):
pass
if not LOGGER.handlers:
LOGGER.addHandler(_NullHandler())
def get_logger():
"""
Provides the stem logger.
:return: **logging.Logger** for stem
"""
return LOGGER
def logging_level(runlevel):
"""
Translates a runlevel into the value expected by the logging module.
:param stem.util.log.Runlevel runlevel: runlevel to be returned, no logging if **None**
"""
if runlevel:
return LOG_VALUES[runlevel]
else:
return logging.FATAL + 5
def escape(message):
"""
Escapes specific sequences for logging (newlines, tabs, carriage returns). If
the input is **bytes** then this converts it to **unicode** under python 3.x.
:param str message: string to be escaped
:returns: str that is escaped
"""
if stem.prereq.is_python_3():
message = stem.util.str_tools._to_unicode(message)
for pattern, replacement in (("\n", "\\n"), ("\r", "\\r"), ("\t", "\\t")):
message = message.replace(pattern, replacement)
return message
def log(runlevel, message):
"""
Logs a message at the given runlevel.
:param stem.util.log.Runlevel runlevel: runlevel to log the message at, logging is skipped if **None**
:param str message: message to be logged
"""
if runlevel:
LOGGER.log(LOG_VALUES[runlevel], message)
def log_once(message_id, runlevel, message):
"""
Logs a message at the given runlevel. If a message with this ID has already
been logged then this is a no-op.
:param str message_id: unique message identifier to deduplicate on
:param stem.util.log.Runlevel runlevel: runlevel to log the message at, logging is skipped if **None**
:param str message: message to be logged
:returns: **True** if we log the message, **False** otherwise
"""
if not runlevel or message_id in DEDUPLICATION_MESSAGE_IDS:
return False
else:
DEDUPLICATION_MESSAGE_IDS.add(message_id)
log(runlevel, message)
# shorter aliases for logging at a runlevel
def trace(message):
log(Runlevel.TRACE, message)
def debug(message):
log(Runlevel.DEBUG, message)
def info(message):
log(Runlevel.INFO, message)
def notice(message):
log(Runlevel.NOTICE, message)
def warn(message):
log(Runlevel.WARN, message)
def error(message):
log(Runlevel.ERROR, message)
class LogBuffer(logging.Handler):
"""
Basic log handler that listens for stem events and stores them so they can be
read later. Log entries are cleared as they are read.
"""
def __init__(self, runlevel):
# TODO: At least in python 2.6 logging.Handler has a bug in that it doesn't
# extend object, causing our super() call to fail. When we drop python 2.6
# support we should switch back to using super() instead.
#super(LogBuffer, self).__init__(level = logging_level(runlevel))
logging.Handler.__init__(self, level = logging_level(runlevel))
self.formatter = logging.Formatter(
fmt = '%(asctime)s [%(levelname)s] %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S')
self._buffer = []
def is_empty(self):
return not bool(self._buffer)
def __iter__(self):
while self._buffer:
yield self.formatter.format(self._buffer.pop(0))
def emit(self, record):
self._buffer.append(record)
class _StdoutLogger(logging.Handler):
def __init__(self, runlevel):
logging.Handler.__init__(self, level = logging_level(runlevel))
self.formatter = logging.Formatter(
fmt = '%(asctime)s [%(levelname)s] %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S')
def emit(self, record):
print self.formatter.format(record)
def log_to_stdout(runlevel):
"""
Logs further events to stdout.
:param stem.util.log.Runlevel runlevel: minimum runlevel a message needs to be to be logged
"""
get_logger().addHandler(_StdoutLogger(runlevel))
|
arlolra/stem
|
stem/util/log.py
|
Python
|
lgpl-3.0
| 6,833
|
# -*- coding: utf-8 -*-
import pytest
class TestSnippet:
def test_a(self):
assert(1 == 1)
def test_b(self):
assert(1 == 0)
|
a1ezzz/wasp-launcher
|
extra/snippets/test.py
|
Python
|
lgpl-3.0
| 134
|
from typing import Any, Dict
EMOJI_NAME_MAPS: Dict[str, Dict[str, Any]] = {
# seems like best emoji for happy
"1f600": {"canonical_name": "grinning", "aliases": ["happy"]},
"1f603": {"canonical_name": "smiley", "aliases": []},
# the Google emoji for this is not great, so made People/9 'smile' and
# renamed this one
"1f604": {"canonical_name": "big_smile", "aliases": []},
# from gemoji/Unicode
"1f601": {"canonical_name": "grinning_face_with_smiling_eyes", "aliases": []},
# satisfied doesn't seem like a good description of these images
"1f606": {"canonical_name": "laughing", "aliases": ["lol"]},
"1f605": {"canonical_name": "sweat_smile", "aliases": []},
# laughter_tears from https://beebom.com/emoji-meanings/
"1f602": {"canonical_name": "joy", "aliases": ["tears", "laughter_tears"]},
"1f923": {"canonical_name": "rolling_on_the_floor_laughing", "aliases": ["rofl"]},
# not sure how the glyphs match relaxed, but both iamcal and gemoji have it
"263a": {"canonical_name": "smiling_face", "aliases": ["relaxed"]},
"1f60a": {"canonical_name": "blush", "aliases": []},
# halo comes from gemoji/Unicode
"1f607": {"canonical_name": "innocent", "aliases": ["halo"]},
"1f642": {"canonical_name": "smile", "aliases": []},
"1f643": {"canonical_name": "upside_down", "aliases": ["oops"]},
"1f609": {"canonical_name": "wink", "aliases": []},
"1f60c": {"canonical_name": "relieved", "aliases": []},
# in_love from https://beebom.com/emoji-meanings/
"1f60d": {"canonical_name": "heart_eyes", "aliases": ["in_love"]},
# blow_a_kiss from https://beebom.com/emoji-meanings/
"1f618": {"canonical_name": "heart_kiss", "aliases": ["blow_a_kiss"]},
"1f617": {"canonical_name": "kiss", "aliases": []},
"1f619": {"canonical_name": "kiss_smiling_eyes", "aliases": []},
"1f61a": {"canonical_name": "kiss_with_blush", "aliases": []},
"1f60b": {"canonical_name": "yum", "aliases": []},
# crazy from https://beebom.com/emoji-meanings/, seems like best emoji for
# joking
"1f61b": {"canonical_name": "stuck_out_tongue", "aliases": ["mischievous"]},
"1f61c": {"canonical_name": "stuck_out_tongue_wink", "aliases": ["joking", "crazy"]},
"1f61d": {"canonical_name": "stuck_out_tongue_closed_eyes", "aliases": []},
# kaching suggested by user
"1f911": {"canonical_name": "money_face", "aliases": ["kaching"]},
# arms_open seems like a natural addition
"1f917": {"canonical_name": "hug", "aliases": ["arms_open"]},
"1f913": {"canonical_name": "nerd", "aliases": ["geek"]},
# several sites suggested this was used for "cool", but cool is taken by
# Symbols/137
"1f60e": {"canonical_name": "sunglasses", "aliases": []},
"1f921": {"canonical_name": "clown", "aliases": []},
"1f920": {"canonical_name": "cowboy", "aliases": []},
# https://emojipedia.org/smirking-face/
"1f60f": {"canonical_name": "smirk", "aliases": ["smug"]},
"1f612": {"canonical_name": "unamused", "aliases": []},
"1f61e": {"canonical_name": "disappointed", "aliases": []},
# see People/41
"1f614": {"canonical_name": "pensive", "aliases": ["tired"]},
"1f61f": {"canonical_name": "worried", "aliases": []},
# these seem to better capture the glyphs. This is also what :/ turns into
# in Google Hangouts
"1f615": {"canonical_name": "oh_no", "aliases": ["half_frown", "concerned", "confused"]},
"1f641": {"canonical_name": "frown", "aliases": ["slight_frown"]},
# sad seemed better than putting another frown as the primary name (see
# People/37)
"2639": {"canonical_name": "sad", "aliases": ["big_frown"]},
# helpless from https://emojipedia.org/persevering-face/
"1f623": {"canonical_name": "persevere", "aliases": ["helpless"]},
# agony seemed like a good addition
"1f616": {"canonical_name": "confounded", "aliases": ["agony"]},
# tired doesn't really match any of the 4 images, put it on People/34
"1f62b": {"canonical_name": "anguish", "aliases": []},
# distraught from https://beebom.com/emoji-meanings/
"1f629": {"canonical_name": "weary", "aliases": ["distraught"]},
"1f624": {"canonical_name": "triumph", "aliases": []},
"1f620": {"canonical_name": "angry", "aliases": []},
# mad and grumpy from https://beebom.com/emoji-meanings/, very_angry to
# parallel People/44 and show up in typeahead for "ang.."
"1f621": {"canonical_name": "rage", "aliases": ["mad", "grumpy", "very_angry"]},
# blank from https://beebom.com/emoji-meanings/, speechless and poker_face
# seemed like good ideas for this
"1f636": {"canonical_name": "speechless", "aliases": ["no_mouth", "blank", "poker_face"]},
"1f610": {"canonical_name": "neutral", "aliases": []},
"1f611": {"canonical_name": "expressionless", "aliases": []},
"1f62f": {"canonical_name": "hushed", "aliases": []},
"1f626": {"canonical_name": "frowning", "aliases": []},
# pained from https://beebom.com/emoji-meanings/
"1f627": {"canonical_name": "anguished", "aliases": ["pained"]},
# surprise from https://emojipedia.org/face-with-open-mouth/
"1f62e": {"canonical_name": "open_mouth", "aliases": ["surprise"]},
"1f632": {"canonical_name": "astonished", "aliases": []},
"1f635": {"canonical_name": "dizzy", "aliases": []},
# the alternates are from https://emojipedia.org/flushed-face/. shame
# doesn't work with the Google emoji
"1f633": {"canonical_name": "flushed", "aliases": ["embarrassed", "blushing"]},
"1f631": {"canonical_name": "scream", "aliases": []},
# scared from https://emojipedia.org/fearful-face/, shock seemed like a
# nice addition
"1f628": {"canonical_name": "fear", "aliases": ["scared", "shock"]},
"1f630": {"canonical_name": "cold_sweat", "aliases": []},
"1f622": {"canonical_name": "cry", "aliases": []},
# stressed from https://beebom.com/emoji-meanings/. The internet generally
# didn't seem to know what to make of the dissapointed_relieved name, and I
# got the sense it wasn't an emotion that was often used. Hence replaced it
# with exhausted.
"1f625": {"canonical_name": "exhausted", "aliases": ["disappointed_relieved", "stressed"]},
"1f924": {"canonical_name": "drooling", "aliases": []},
"1f62d": {"canonical_name": "sob", "aliases": []},
"1f613": {"canonical_name": "sweat", "aliases": []},
"1f62a": {"canonical_name": "sleepy", "aliases": []},
"1f634": {"canonical_name": "sleeping", "aliases": []},
"1f644": {"canonical_name": "rolling_eyes", "aliases": []},
"1f914": {"canonical_name": "thinking", "aliases": []},
"1f925": {"canonical_name": "lying", "aliases": []},
# seems like best emoji for nervous/anxious
"1f62c": {"canonical_name": "grimacing", "aliases": ["nervous", "anxious"]},
# zip_it from https://mashable.com/2015/10/23/ios-9-1-emoji-guide/,
# lips_sealed from https://emojipedia.org/zipper-mouth-face/, rest seemed
# like reasonable additions
"1f910": {
"canonical_name": "silence",
"aliases": ["quiet", "hush", "zip_it", "lips_are_sealed"],
},
# queasy seemed like a natural addition
"1f922": {"canonical_name": "nauseated", "aliases": ["queasy"]},
"1f927": {"canonical_name": "sneezing", "aliases": []},
"1f637": {"canonical_name": "mask", "aliases": []},
# flu from https://mashable.com/2015/10/23/ios-9-1-emoji-guide/, sick from
# https://emojipedia.org/face-with-thermometer/, face_with_thermometer so
# it shows up in typeahead (thermometer taken by Objects/82)
"1f912": {
"canonical_name": "sick",
"aliases": ["flu", "face_with_thermometer", "ill", "fever"],
},
# hurt and injured from https://beebom.com/emoji-meanings/. Chose hurt as
# primary since I think it can cover a wider set of things (e.g. emotional
# hurt)
"1f915": {"canonical_name": "hurt", "aliases": ["head_bandage", "injured"]},
# devil from https://emojipedia.org/smiling-face-with-horns/,
# smiling_face_with_horns from gemoji/Unicode
"1f608": {
"canonical_name": "smiling_devil",
"aliases": ["smiling_imp", "smiling_face_with_horns"],
},
# angry_devil from https://beebom.com/emoji-meanings/
"1f47f": {"canonical_name": "devil", "aliases": ["imp", "angry_devil"]},
"1f479": {"canonical_name": "ogre", "aliases": []},
"1f47a": {"canonical_name": "goblin", "aliases": []},
# pile_of_poo from gemoji/Unicode
"1f4a9": {"canonical_name": "poop", "aliases": ["pile_of_poo"]},
# alternates seemed like reasonable additions
"1f47b": {"canonical_name": "ghost", "aliases": ["boo", "spooky", "haunted"]},
"1f480": {"canonical_name": "skull", "aliases": []},
# alternates seemed like reasonable additions
"2620": {
"canonical_name": "skull_and_crossbones",
"aliases": ["pirate", "death", "hazard", "toxic", "poison"],
},
# ufo seemed like a natural addition
"1f47d": {"canonical_name": "alien", "aliases": ["ufo"]},
"1f47e": {"canonical_name": "space_invader", "aliases": []},
"1f916": {"canonical_name": "robot", "aliases": []},
# pumpkin seemed like a natural addition
"1f383": {"canonical_name": "jack-o-lantern", "aliases": ["pumpkin"]},
"1f63a": {"canonical_name": "smiley_cat", "aliases": []},
"1f638": {"canonical_name": "smile_cat", "aliases": []},
"1f639": {"canonical_name": "joy_cat", "aliases": []},
"1f63b": {"canonical_name": "heart_eyes_cat", "aliases": []},
# smug_cat to parallel People/31
"1f63c": {"canonical_name": "smirk_cat", "aliases": ["smug_cat"]},
"1f63d": {"canonical_name": "kissing_cat", "aliases": []},
# weary_cat from Unicode/gemoji
"1f640": {"canonical_name": "scream_cat", "aliases": ["weary_cat"]},
"1f63f": {"canonical_name": "crying_cat", "aliases": []},
# angry_cat to better parallel People/45
"1f63e": {"canonical_name": "angry_cat", "aliases": ["pouting_cat"]},
"1f450": {"canonical_name": "open_hands", "aliases": []},
# praise from
# https://emojipedia.org/person-raising-both-hands-in-celebration/
"1f64c": {"canonical_name": "raised_hands", "aliases": ["praise"]},
# applause from https://emojipedia.org/clapping-hands-sign/
"1f44f": {"canonical_name": "clap", "aliases": ["applause"]},
# welcome and thank_you from
# https://emojipedia.org/person-with-folded-hands/, namaste from indian
# culture
"1f64f": {"canonical_name": "pray", "aliases": ["welcome", "thank_you", "namaste"]},
# done_deal seems like a natural addition
"1f91d": {"canonical_name": "handshake", "aliases": ["done_deal"]},
"1f44d": {"canonical_name": "+1", "aliases": ["thumbs_up", "like"]},
"1f44e": {"canonical_name": "-1", "aliases": ["thumbs_down"]},
# fist_bump from https://beebom.com/emoji-meanings/
"1f44a": {"canonical_name": "fist_bump", "aliases": ["punch"]},
# used as power in social justice movements
"270a": {"canonical_name": "fist", "aliases": ["power"]},
"1f91b": {"canonical_name": "left_fist", "aliases": []},
"1f91c": {"canonical_name": "right_fist", "aliases": []},
"1f91e": {"canonical_name": "fingers_crossed", "aliases": []},
# seems to be mostly used as peace on twitter
"270c": {"canonical_name": "peace_sign", "aliases": ["victory"]},
# https://emojipedia.org/sign-of-the-horns/
"1f918": {"canonical_name": "rock_on", "aliases": ["sign_of_the_horns"]},
# got_it seems like a natural addition
"1f44c": {"canonical_name": "ok", "aliases": ["got_it"]},
"1f448": {"canonical_name": "point_left", "aliases": []},
"1f449": {"canonical_name": "point_right", "aliases": []},
# :this: is a way of emphasizing the previous message. point_up instead of
# point_up_2 so that point_up better matches the other point_*s
"1f446": {"canonical_name": "point_up", "aliases": ["this"]},
"1f447": {"canonical_name": "point_down", "aliases": []},
# People/114 is point_up. These seemed better than naming it point_up_2,
# and point_of_information means it will come up in typeahead for 'point'
"261d": {
"canonical_name": "wait_one_second",
"aliases": ["point_of_information", "asking_a_question"],
},
"270b": {"canonical_name": "hand", "aliases": ["raised_hand"]},
# seems like best emoji for stop, raised_back_of_hand doesn't seem that
# useful
"1f91a": {"canonical_name": "stop", "aliases": []},
# seems like best emoji for high_five, raised_hand_with_fingers_splayed
# doesn't seem that useful
"1f590": {"canonical_name": "high_five", "aliases": ["palm"]},
# https://mashable.com/2015/10/23/ios-9-1-emoji-guide/
"1f596": {"canonical_name": "spock", "aliases": ["live_long_and_prosper"]},
# People/119 is a better 'hi', but 'hi' will never show up in the typeahead
# due to 'high_five'
"1f44b": {"canonical_name": "wave", "aliases": ["hello", "hi"]},
"1f919": {"canonical_name": "call_me", "aliases": []},
# flexed_biceps from gemoji/Unicode, strong seemed like a good addition
"1f4aa": {"canonical_name": "muscle", "aliases": []},
"1f595": {"canonical_name": "middle_finger", "aliases": []},
"270d": {"canonical_name": "writing", "aliases": []},
"1f933": {"canonical_name": "selfie", "aliases": []},
# Couldn't figure out why iamcal chose nail_care. Unicode uses nail_polish,
# gemoji uses both
"1f485": {"canonical_name": "nail_polish", "aliases": ["nail_care"]},
"1f48d": {"canonical_name": "ring", "aliases": []},
"1f484": {"canonical_name": "lipstick", "aliases": []},
# People/18 seems like a better kiss for most circumstances
"1f48b": {"canonical_name": "lipstick_kiss", "aliases": []},
# mouth from gemoji/Unicode
"1f444": {"canonical_name": "lips", "aliases": ["mouth"]},
"1f445": {"canonical_name": "tongue", "aliases": []},
"1f442": {"canonical_name": "ear", "aliases": []},
"1f443": {"canonical_name": "nose", "aliases": []},
# seems a better feet than Nature/86 (paw_prints)
"1f463": {"canonical_name": "footprints", "aliases": ["feet"]},
"1f441": {"canonical_name": "eye", "aliases": []},
# seemed the best emoji for looking
"1f440": {"canonical_name": "eyes", "aliases": ["looking"]},
"1f5e3": {"canonical_name": "speaking_head", "aliases": []},
# shadow seems like a good addition
"1f464": {"canonical_name": "silhouette", "aliases": ["shadow"]},
# to parallel People/139
"1f465": {"canonical_name": "silhouettes", "aliases": ["shadows"]},
"1f476": {"canonical_name": "baby", "aliases": []},
"1f466": {"canonical_name": "boy", "aliases": []},
"1f467": {"canonical_name": "girl", "aliases": []},
"1f468": {"canonical_name": "man", "aliases": []},
"1f469": {"canonical_name": "woman", "aliases": []},
# It's used on twitter a bunch, either when showing off hair, or in a way
# where People/144 would substitute. It'd be nice if there were another
# emoji one could use for "good hair", but I think not a big loss to not
# have one for Zulip, and not worth the eurocentrism.
# '1f471': {'canonical_name': 'X', 'aliases': ['person_with_blond_hair']},
# Added elderly since I think some people prefer that term
"1f474": {"canonical_name": "older_man", "aliases": ["elderly_man"]},
# Added elderly since I think some people prefer that term
"1f475": {"canonical_name": "older_woman", "aliases": ["elderly_woman"]},
"1f472": {"canonical_name": "gua_pi_mao", "aliases": []},
"1f473": {"canonical_name": "turban", "aliases": []},
# police seems like a more polite term, and matches the Unicode
"1f46e": {"canonical_name": "police", "aliases": ["cop"]},
"1f477": {"canonical_name": "construction_worker", "aliases": []},
"1f482": {"canonical_name": "guard", "aliases": []},
# detective from gemoji, sneaky from
# https://mashable.com/2015/10/23/ios-9-1-emoji-guide/, agent seems a
# reasonable addition
"1f575": {"canonical_name": "detective", "aliases": ["spy", "sleuth", "agent", "sneaky"]},
# mrs_claus from https://emojipedia.org/mother-christmas/
"1f936": {"canonical_name": "mother_christmas", "aliases": ["mrs_claus"]},
"1f385": {"canonical_name": "santa", "aliases": []},
"1f478": {"canonical_name": "princess", "aliases": []},
"1f934": {"canonical_name": "prince", "aliases": []},
"1f470": {"canonical_name": "bride", "aliases": []},
"1f935": {"canonical_name": "tuxedo", "aliases": []},
"1f47c": {"canonical_name": "angel", "aliases": []},
# expecting seems like a good addition
"1f930": {"canonical_name": "pregnant", "aliases": ["expecting"]},
"1f647": {"canonical_name": "bow", "aliases": []},
# mostly used sassily. person_tipping_hand from
# https://emojipedia.org/information-desk-person/
"1f481": {"canonical_name": "information_desk_person", "aliases": ["person_tipping_hand"]},
# no_signal to parallel People/207. Nope seems like a reasonable addition
"1f645": {"canonical_name": "no_signal", "aliases": ["nope"]},
"1f646": {"canonical_name": "ok_signal", "aliases": []},
# pick_me seems like a good addition
"1f64b": {"canonical_name": "raising_hand", "aliases": ["pick_me"]},
"1f926": {"canonical_name": "face_palm", "aliases": []},
"1f937": {"canonical_name": "shrug", "aliases": []},
"1f64e": {"canonical_name": "person_pouting", "aliases": []},
"1f64d": {"canonical_name": "person_frowning", "aliases": []},
"1f487": {"canonical_name": "haircut", "aliases": []},
"1f486": {"canonical_name": "massage", "aliases": []},
# hover seems like a reasonable addition
"1f574": {"canonical_name": "levitating", "aliases": ["hover"]},
"1f483": {"canonical_name": "dancer", "aliases": []},
"1f57a": {"canonical_name": "dancing", "aliases": ["disco"]},
"1f46f": {"canonical_name": "dancers", "aliases": []},
# pedestrian seems like reasonable addition
"1f6b6": {"canonical_name": "walking", "aliases": ["pedestrian"]},
"1f3c3": {"canonical_name": "running", "aliases": ["runner"]},
"1f46b": {"canonical_name": "man_and_woman_holding_hands", "aliases": ["man_and_woman_couple"]},
# to parallel People/234
"1f46d": {"canonical_name": "two_women_holding_hands", "aliases": ["women_couple"]},
# to parallel People/234
"1f46c": {"canonical_name": "two_men_holding_hands", "aliases": ["men_couple"]},
# no need for man-woman-boy, since we aren't including the other family
# combos
"1f46a": {"canonical_name": "family", "aliases": []},
"1f45a": {"canonical_name": "clothing", "aliases": []},
"1f455": {"canonical_name": "shirt", "aliases": ["tshirt"]},
# denim seems like a good addition
"1f456": {"canonical_name": "jeans", "aliases": ["denim"]},
# tie is shorter, and a bit more general
"1f454": {"canonical_name": "tie", "aliases": []},
"1f457": {"canonical_name": "dress", "aliases": []},
"1f459": {"canonical_name": "bikini", "aliases": []},
"1f458": {"canonical_name": "kimono", "aliases": []},
# I feel like this is always used in the plural
"1f460": {"canonical_name": "high_heels", "aliases": []},
# flip_flops seems like a reasonable addition
"1f461": {"canonical_name": "sandal", "aliases": ["flip_flops"]},
"1f462": {"canonical_name": "boot", "aliases": []},
"1f45e": {"canonical_name": "shoe", "aliases": []},
# running_shoe is from gemoji, sneaker seems like a reasonable addition
"1f45f": {"canonical_name": "athletic_shoe", "aliases": ["sneaker", "running_shoe"]},
"1f452": {"canonical_name": "hat", "aliases": []},
"1f3a9": {"canonical_name": "top_hat", "aliases": []},
# graduate seems like a better word for this
"1f393": {"canonical_name": "graduate", "aliases": ["mortar_board"]},
# king and queen seem like good additions
"1f451": {"canonical_name": "crown", "aliases": ["queen", "king"]},
# safety and invincibility inspired by
# https://mashable.com/2015/10/23/ios-9-1-emoji-guide/. hard_hat and
# rescue_worker seem like good additions
"26d1": {
"canonical_name": "helmet",
"aliases": ["hard_hat", "rescue_worker", "safety_first", "invincible"],
},
# backpack from gemoji, dominates satchel on Google Trends
"1f392": {"canonical_name": "backpack", "aliases": ["satchel"]},
"1f45d": {"canonical_name": "pouch", "aliases": []},
"1f45b": {"canonical_name": "purse", "aliases": []},
"1f45c": {"canonical_name": "handbag", "aliases": []},
"1f4bc": {"canonical_name": "briefcase", "aliases": []},
# glasses seems a more common term than eyeglasses, spectacles seems like a
# reasonable synonym to add
"1f453": {"canonical_name": "glasses", "aliases": ["spectacles"]},
"1f576": {"canonical_name": "dark_sunglasses", "aliases": []},
"1f302": {"canonical_name": "closed_umbrella", "aliases": []},
"2602": {"canonical_name": "umbrella", "aliases": []},
# Some animals have a Unicode codepoint "<animal>", some have a codepoint
# "<animal> face", and some have both. If an animal has just a single
# codepoint, we call it <animal>, regardless of what the codepoint is. If
# an animal has both, we call the "<animal>" codepoint <animal>, and come
# up with something else useful-seeming for the "<animal> face" codepoint.
# The reason we chose "<animal> face" for the non-standard name (instead of
# giving "<animal>" the non-standard name, as iamcal does) is because the
# apple emoji for the "<animal>"s are too realistic. E.g. Apple's Nature/76
# is less plausibly a puppy than this one.
"1f436": {"canonical_name": "puppy", "aliases": []},
"1f431": {"canonical_name": "kitten", "aliases": []},
"1f42d": {"canonical_name": "dormouse", "aliases": []},
"1f439": {"canonical_name": "hamster", "aliases": []},
"1f430": {"canonical_name": "bunny", "aliases": []},
"1f98a": {"canonical_name": "fox", "aliases": []},
"1f43b": {"canonical_name": "bear", "aliases": []},
"1f43c": {"canonical_name": "panda", "aliases": []},
"1f428": {"canonical_name": "koala", "aliases": []},
"1f42f": {"canonical_name": "tiger_cub", "aliases": []},
"1f981": {"canonical_name": "lion", "aliases": []},
"1f42e": {"canonical_name": "calf", "aliases": []},
"1f437": {"canonical_name": "piglet", "aliases": []},
"1f43d": {"canonical_name": "pig_nose", "aliases": []},
"1f438": {"canonical_name": "frog", "aliases": []},
"1f435": {"canonical_name": "monkey_face", "aliases": []},
"1f648": {"canonical_name": "see_no_evil", "aliases": []},
"1f649": {"canonical_name": "hear_no_evil", "aliases": []},
"1f64a": {"canonical_name": "speak_no_evil", "aliases": []},
"1f412": {"canonical_name": "monkey", "aliases": []},
# cluck seemed like a good addition
"1f414": {"canonical_name": "chicken", "aliases": ["cluck"]},
"1f427": {"canonical_name": "penguin", "aliases": []},
"1f426": {"canonical_name": "bird", "aliases": []},
"1f424": {"canonical_name": "chick", "aliases": ["baby_chick"]},
"1f423": {"canonical_name": "hatching", "aliases": ["hatching_chick"]},
# https://www.iemoji.com/view/emoji/668/animals-nature/front-facing-baby-chick
"1f425": {"canonical_name": "new_baby", "aliases": []},
"1f986": {"canonical_name": "duck", "aliases": []},
"1f985": {"canonical_name": "eagle", "aliases": []},
"1f989": {"canonical_name": "owl", "aliases": []},
"1f987": {"canonical_name": "bat", "aliases": []},
"1f43a": {"canonical_name": "wolf", "aliases": []},
"1f417": {"canonical_name": "boar", "aliases": []},
"1f434": {"canonical_name": "pony", "aliases": []},
"1f984": {"canonical_name": "unicorn", "aliases": []},
# buzz seemed like a reasonable addition
"1f41d": {"canonical_name": "bee", "aliases": ["buzz", "honeybee"]},
# caterpillar seemed like a reasonable addition
"1f41b": {"canonical_name": "bug", "aliases": ["caterpillar"]},
"1f98b": {"canonical_name": "butterfly", "aliases": []},
"1f40c": {"canonical_name": "snail", "aliases": []},
# spiral_shell from Unicode/gemoji, the others seemed like reasonable
# additions
"1f41a": {"canonical_name": "shell", "aliases": ["seashell", "conch", "spiral_shell"]},
# Unicode/gemoji have lady_beetle; hopefully with ladybug we get both the
# people that prefer lady_beetle (with beetle) and ladybug. There is also
# ladybird, but seems a bit much for this to complete for bird.
"1f41e": {"canonical_name": "beetle", "aliases": ["ladybug"]},
"1f41c": {"canonical_name": "ant", "aliases": []},
"1f577": {"canonical_name": "spider", "aliases": []},
"1f578": {"canonical_name": "web", "aliases": ["spider_web"]},
# tortoise seemed like a reasonable addition
"1f422": {"canonical_name": "turtle", "aliases": ["tortoise"]},
# put in a few animal sounds, including this one
"1f40d": {"canonical_name": "snake", "aliases": ["hiss"]},
"1f98e": {"canonical_name": "lizard", "aliases": ["gecko"]},
"1f982": {"canonical_name": "scorpion", "aliases": []},
"1f980": {"canonical_name": "crab", "aliases": []},
"1f991": {"canonical_name": "squid", "aliases": []},
"1f419": {"canonical_name": "octopus", "aliases": []},
"1f990": {"canonical_name": "shrimp", "aliases": []},
"1f420": {"canonical_name": "tropical_fish", "aliases": []},
"1f41f": {"canonical_name": "fish", "aliases": []},
"1f421": {"canonical_name": "blowfish", "aliases": []},
"1f42c": {"canonical_name": "dolphin", "aliases": ["flipper"]},
"1f988": {"canonical_name": "shark", "aliases": []},
"1f433": {"canonical_name": "whale", "aliases": []},
# https://emojipedia.org/whale/
"1f40b": {"canonical_name": "humpback_whale", "aliases": []},
"1f40a": {"canonical_name": "crocodile", "aliases": []},
"1f406": {"canonical_name": "leopard", "aliases": []},
"1f405": {"canonical_name": "tiger", "aliases": []},
"1f403": {"canonical_name": "water_buffalo", "aliases": []},
"1f402": {"canonical_name": "ox", "aliases": ["bull"]},
"1f404": {"canonical_name": "cow", "aliases": []},
"1f98c": {"canonical_name": "deer", "aliases": []},
# https://emojipedia.org/dromedary-camel/
"1f42a": {"canonical_name": "arabian_camel", "aliases": []},
"1f42b": {"canonical_name": "camel", "aliases": []},
"1f418": {"canonical_name": "elephant", "aliases": []},
"1f98f": {"canonical_name": "rhinoceros", "aliases": []},
"1f98d": {"canonical_name": "gorilla", "aliases": []},
"1f40e": {"canonical_name": "horse", "aliases": []},
"1f416": {"canonical_name": "pig", "aliases": ["oink"]},
"1f410": {"canonical_name": "goat", "aliases": []},
"1f40f": {"canonical_name": "ram", "aliases": []},
"1f411": {"canonical_name": "sheep", "aliases": ["baa"]},
"1f415": {"canonical_name": "dog", "aliases": ["woof"]},
"1f429": {"canonical_name": "poodle", "aliases": []},
"1f408": {"canonical_name": "cat", "aliases": ["meow"]},
# alarm seemed like a fun addition
"1f413": {"canonical_name": "rooster", "aliases": ["alarm", "cock-a-doodle-doo"]},
"1f983": {"canonical_name": "turkey", "aliases": []},
"1f54a": {"canonical_name": "dove", "aliases": ["dove_of_peace"]},
"1f407": {"canonical_name": "rabbit", "aliases": []},
"1f401": {"canonical_name": "mouse", "aliases": []},
"1f400": {"canonical_name": "rat", "aliases": []},
"1f43f": {"canonical_name": "chipmunk", "aliases": []},
# paws seemed like reasonable addition. Put feet at People/135
"1f43e": {"canonical_name": "paw_prints", "aliases": ["paws"]},
"1f409": {"canonical_name": "dragon", "aliases": []},
"1f432": {"canonical_name": "dragon_face", "aliases": []},
"1f335": {"canonical_name": "cactus", "aliases": []},
"1f384": {"canonical_name": "holiday_tree", "aliases": []},
"1f332": {"canonical_name": "evergreen_tree", "aliases": []},
"1f333": {"canonical_name": "tree", "aliases": ["deciduous_tree"]},
"1f334": {"canonical_name": "palm_tree", "aliases": []},
# sprout seemed like a reasonable addition
"1f331": {"canonical_name": "seedling", "aliases": ["sprout"]},
# seemed like the best emoji for plant
"1f33f": {"canonical_name": "herb", "aliases": ["plant"]},
# clover seemed like a reasonable addition
"2618": {"canonical_name": "shamrock", "aliases": ["clover"]},
# lucky seems more useful
"1f340": {"canonical_name": "lucky", "aliases": ["four_leaf_clover"]},
"1f38d": {"canonical_name": "bamboo", "aliases": []},
# https://emojipedia.org/tanabata-tree/
"1f38b": {"canonical_name": "wish_tree", "aliases": ["tanabata_tree"]},
# seemed like good additions. Used fall instead of autumn, since don't have
# the rest of the seasons, and could imagine someone using both meanings of
# fall.
"1f343": {"canonical_name": "leaves", "aliases": ["wind", "fall"]},
"1f342": {"canonical_name": "fallen_leaf", "aliases": []},
"1f341": {"canonical_name": "maple_leaf", "aliases": []},
"1f344": {"canonical_name": "mushroom", "aliases": []},
# harvest seems more useful
"1f33e": {"canonical_name": "harvest", "aliases": ["ear_of_rice"]},
"1f490": {"canonical_name": "bouquet", "aliases": []},
# seems like the best emoji for flower
"1f337": {"canonical_name": "tulip", "aliases": ["flower"]},
"1f339": {"canonical_name": "rose", "aliases": []},
# crushed suggest by a user
"1f940": {"canonical_name": "wilted_flower", "aliases": ["crushed"]},
"1f33b": {"canonical_name": "sunflower", "aliases": []},
"1f33c": {"canonical_name": "blossom", "aliases": []},
"1f338": {"canonical_name": "cherry_blossom", "aliases": []},
"1f33a": {"canonical_name": "hibiscus", "aliases": []},
"1f30e": {"canonical_name": "earth_americas", "aliases": []},
"1f30d": {"canonical_name": "earth_africa", "aliases": []},
"1f30f": {"canonical_name": "earth_asia", "aliases": []},
"1f315": {"canonical_name": "full_moon", "aliases": []},
# too many useless moons. Don't seem to get much use on twitter, and clog
# up typeahead for moon.
# '1f316': {'canonical_name': 'X', 'aliases': ['waning_crescent_moon']},
# '1f317': {'canonical_name': 'X', 'aliases': ['last_quarter_moon']},
# '1f318': {'canonical_name': 'X', 'aliases': ['waning_crescent_moon']},
"1f311": {"canonical_name": "new_moon", "aliases": []},
# '1f312': {'canonical_name': 'X', 'aliases': ['waxing_crescent_moon']},
# '1f313': {'canonical_name': 'X', 'aliases': ['first_quarter_moon']},
"1f314": {"canonical_name": "waxing_moon", "aliases": []},
"1f31a": {"canonical_name": "new_moon_face", "aliases": []},
"1f31d": {"canonical_name": "moon_face", "aliases": []},
"1f31e": {"canonical_name": "sun_face", "aliases": []},
# goodnight seems way more useful
"1f31b": {"canonical_name": "goodnight", "aliases": []},
# '1f31c': {'canonical_name': 'X', 'aliases': ['last_quarter_moon_with_face']},
# seems like the best emoji for moon
"1f319": {"canonical_name": "moon", "aliases": []},
# dizzy taken by People/54, had to come up with something else
"1f4ab": {"canonical_name": "seeing_stars", "aliases": []},
"2b50": {"canonical_name": "star", "aliases": []},
# glowing_star from gemoji/Unicode
"1f31f": {"canonical_name": "glowing_star", "aliases": []},
# glamour seems like a reasonable addition
"2728": {"canonical_name": "sparkles", "aliases": ["glamour"]},
# high_voltage from gemoji/Unicode
"26a1": {"canonical_name": "high_voltage", "aliases": ["zap"]},
# https://emojipedia.org/fire/
"1f525": {"canonical_name": "fire", "aliases": ["lit", "hot", "flame"]},
# explosion and crash seem like reasonable additions
"1f4a5": {"canonical_name": "boom", "aliases": ["explosion", "crash", "collision"]},
# meteor seems like a reasonable addition
"2604": {"canonical_name": "comet", "aliases": ["meteor"]},
"2600": {"canonical_name": "sunny", "aliases": []},
"1f324": {"canonical_name": "mostly_sunny", "aliases": []},
# partly_cloudy for the glass half empty people
"26c5": {"canonical_name": "partly_sunny", "aliases": ["partly_cloudy"]},
"1f325": {"canonical_name": "cloudy", "aliases": []},
# sunshowers seems like a more fun term
"1f326": {
"canonical_name": "sunshowers",
"aliases": ["sun_and_rain", "partly_sunny_with_rain"],
},
# pride and lgbtq seem like reasonable additions
"1f308": {"canonical_name": "rainbow", "aliases": ["pride", "lgbtq"]},
# overcast seems like a good addition
"2601": {"canonical_name": "cloud", "aliases": ["overcast"]},
# suggested by user typing these into their typeahead.
"1f327": {"canonical_name": "rainy", "aliases": ["soaked", "drenched"]},
# thunderstorm seems better for this emoji, and thunder_and_rain more
# evocative than thunder_cloud_and_rain
"26c8": {"canonical_name": "thunderstorm", "aliases": ["thunder_and_rain"]},
# lightning_storm seemed better than lightning_cloud
"1f329": {"canonical_name": "lightning", "aliases": ["lightning_storm"]},
# snowy to parallel sunny, cloudy, etc; snowstorm seems like a good
# addition
"1f328": {"canonical_name": "snowy", "aliases": ["snowstorm"]},
"2603": {"canonical_name": "snowman", "aliases": []},
# don't need two snowmen. frosty is nice because it's a weather (primary
# benefit) and also a snowman (one that suffered from not having snow, in
# fact)
"26c4": {"canonical_name": "frosty", "aliases": []},
"2744": {"canonical_name": "snowflake", "aliases": []},
# the internet didn't seem to have a good use for this emoji. windy is a
# good weather that is otherwise not represented. mother_nature from
# https://emojipedia.org/wind-blowing-face/
"1f32c": {"canonical_name": "windy", "aliases": ["mother_nature"]},
"1f4a8": {"canonical_name": "dash", "aliases": []},
# tornado_cloud comes from the Unicode, but e.g. gemoji drops the cloud
"1f32a": {"canonical_name": "tornado", "aliases": []},
# hazy seemed like a good addition
"1f32b": {"canonical_name": "fog", "aliases": ["hazy"]},
"1f30a": {"canonical_name": "ocean", "aliases": []},
# drop seems better than droplet, since could be used for its other
# meanings. water drop partly so that it shows up in typeahead for water
"1f4a7": {"canonical_name": "drop", "aliases": ["water_drop"]},
"1f4a6": {"canonical_name": "sweat_drops", "aliases": []},
"2614": {"canonical_name": "umbrella_with_rain", "aliases": []},
"1f34f": {"canonical_name": "green_apple", "aliases": []},
"1f34e": {"canonical_name": "apple", "aliases": []},
"1f350": {"canonical_name": "pear", "aliases": []},
# An argument for not calling this orange is to save the color for a color
# swatch, but we can deal with that when it happens. Mandarin is from
# https://emojipedia.org/tangerine/, also like that it has a second meaning
"1f34a": {"canonical_name": "orange", "aliases": ["tangerine", "mandarin"]},
"1f34b": {"canonical_name": "lemon", "aliases": []},
"1f34c": {"canonical_name": "banana", "aliases": []},
"1f349": {"canonical_name": "watermelon", "aliases": []},
"1f347": {"canonical_name": "grapes", "aliases": []},
"1f353": {"canonical_name": "strawberry", "aliases": []},
"1f348": {"canonical_name": "melon", "aliases": []},
"1f352": {"canonical_name": "cherries", "aliases": []},
"1f351": {"canonical_name": "peach", "aliases": []},
"1f34d": {"canonical_name": "pineapple", "aliases": []},
"1f95d": {"canonical_name": "kiwi", "aliases": []},
"1f951": {"canonical_name": "avocado", "aliases": []},
"1f345": {"canonical_name": "tomato", "aliases": []},
"1f346": {"canonical_name": "eggplant", "aliases": []},
"1f952": {"canonical_name": "cucumber", "aliases": []},
"1f955": {"canonical_name": "carrot", "aliases": []},
# maize is from Unicode
"1f33d": {"canonical_name": "corn", "aliases": ["maize"]},
# chili_pepper seems like a reasonable addition
"1f336": {"canonical_name": "hot_pepper", "aliases": ["chili_pepper"]},
"1f954": {"canonical_name": "potato", "aliases": []},
# yam seems better than sweet_potato, since we already have a potato (not a
# strong argument, but is better on the typeahead not to have emoji that
# share long prefixes)
"1f360": {"canonical_name": "yam", "aliases": ["sweet_potato"]},
"1f330": {"canonical_name": "chestnut", "aliases": []},
"1f95c": {"canonical_name": "peanuts", "aliases": []},
"1f36f": {"canonical_name": "honey", "aliases": []},
"1f950": {"canonical_name": "croissant", "aliases": []},
"1f35e": {"canonical_name": "bread", "aliases": []},
"1f956": {"canonical_name": "baguette", "aliases": []},
"1f9c0": {"canonical_name": "cheese", "aliases": []},
"1f95a": {"canonical_name": "egg", "aliases": []},
# already have an egg in Foods/31, though I guess wouldn't be a big deal to
# add it here.
"1f373": {"canonical_name": "cooking", "aliases": []},
"1f953": {"canonical_name": "bacon", "aliases": []},
# there's no lunch and dinner, which is a small negative against adding
# breakfast
"1f95e": {"canonical_name": "pancakes", "aliases": ["breakfast"]},
# There is already shrimp in Nature/51, and tempura seems like a better
# description
"1f364": {"canonical_name": "tempura", "aliases": []},
# drumstick seems like a better description
"1f357": {"canonical_name": "drumstick", "aliases": ["poultry"]},
"1f356": {"canonical_name": "meat", "aliases": []},
"1f355": {"canonical_name": "pizza", "aliases": []},
"1f32d": {"canonical_name": "hotdog", "aliases": []},
"1f354": {"canonical_name": "hamburger", "aliases": []},
"1f35f": {"canonical_name": "fries", "aliases": []},
# https://emojipedia.org/stuffed-flatbread/
"1f959": {
"canonical_name": "doner_kebab",
"aliases": ["shawarma", "souvlaki", "stuffed_flatbread"],
},
"1f32e": {"canonical_name": "taco", "aliases": []},
"1f32f": {"canonical_name": "burrito", "aliases": []},
"1f957": {"canonical_name": "salad", "aliases": []},
# I think Foods/49 is a better :food:
"1f958": {"canonical_name": "paella", "aliases": []},
"1f35d": {"canonical_name": "spaghetti", "aliases": []},
# seems like the best noodles? maybe this should be Foods/47? Noodles seem
# like a bigger thing in east asia than in europe, so going with that.
"1f35c": {"canonical_name": "ramen", "aliases": ["noodles"]},
# seems like the best :food:. Also a reasonable :soup:, though the Google
# one is indeed more a pot of food (the Unicode) than a soup
"1f372": {"canonical_name": "food", "aliases": ["soup", "stew"]},
# naruto is actual name, and I think don't need this to autocomplete for
# "fish"
"1f365": {"canonical_name": "naruto", "aliases": []},
"1f363": {"canonical_name": "sushi", "aliases": []},
"1f371": {"canonical_name": "bento", "aliases": []},
"1f35b": {"canonical_name": "curry", "aliases": []},
"1f35a": {"canonical_name": "rice", "aliases": []},
# onigiri is actual name, and I think don't need this to typeahead complete
# for "rice"
"1f359": {"canonical_name": "onigiri", "aliases": []},
# leaving rice_cracker in, so that we have something for cracker
"1f358": {"canonical_name": "senbei", "aliases": ["rice_cracker"]},
"1f362": {"canonical_name": "oden", "aliases": []},
"1f361": {"canonical_name": "dango", "aliases": []},
"1f367": {"canonical_name": "shaved_ice", "aliases": []},
# seemed like the best emoji for gelato
"1f368": {"canonical_name": "ice_cream", "aliases": ["gelato"]},
# already have ice_cream in Foods/60, and soft_serve seems like a
# potentially fun emoji to have in conjunction with ice_cream. Put in
# soft_ice_cream so it typeahead completes on ice_cream as well.
"1f366": {"canonical_name": "soft_serve", "aliases": ["soft_ice_cream"]},
"1f370": {"canonical_name": "cake", "aliases": []},
"1f382": {"canonical_name": "birthday", "aliases": []},
# flan seems like a reasonable addition
"1f36e": {"canonical_name": "custard", "aliases": ["flan"]},
"1f36d": {"canonical_name": "lollipop", "aliases": []},
"1f36c": {"canonical_name": "candy", "aliases": []},
"1f36b": {"canonical_name": "chocolate", "aliases": []},
"1f37f": {"canonical_name": "popcorn", "aliases": []},
# donut dominates doughnut on
# https://trends.google.com/trends/explore?q=doughnut,donut
"1f369": {"canonical_name": "donut", "aliases": ["doughnut"]},
"1f36a": {"canonical_name": "cookie", "aliases": []},
"1f95b": {"canonical_name": "milk", "aliases": ["glass_of_milk"]},
"1f37c": {"canonical_name": "baby_bottle", "aliases": []},
"2615": {"canonical_name": "coffee", "aliases": []},
"1f375": {"canonical_name": "tea", "aliases": []},
"1f376": {"canonical_name": "sake", "aliases": []},
"1f37a": {"canonical_name": "beer", "aliases": []},
"1f37b": {"canonical_name": "beers", "aliases": []},
"1f942": {"canonical_name": "clink", "aliases": ["toast"]},
"1f377": {"canonical_name": "wine", "aliases": []},
# tumbler means something different in india, and don't want to use
# shot_glass given our policy of using school-age-appropriate terms
"1f943": {"canonical_name": "small_glass", "aliases": []},
"1f378": {"canonical_name": "cocktail", "aliases": []},
"1f379": {"canonical_name": "tropical_drink", "aliases": []},
"1f37e": {"canonical_name": "champagne", "aliases": []},
"1f944": {"canonical_name": "spoon", "aliases": []},
# Added eating_utensils so this would show up in typeahead for eat.
"1f374": {"canonical_name": "fork_and_knife", "aliases": ["eating_utensils"]},
# Seems like the best emoji for hungry and meal. fork_and_knife_and_plate
# is from gemoji/Unicode, and I think is better than the shorter iamcal
# version in this case. The rest just seemed like good additions.
"1f37d": {
"canonical_name": "hungry",
"aliases": ["meal", "table_setting", "fork_and_knife_with_plate", "lets_eat"],
},
# most people interested in this sport call it football
"26bd": {"canonical_name": "football", "aliases": ["soccer"]},
"1f3c0": {"canonical_name": "basketball", "aliases": []},
# to distinguish from Activity/1, but is also the Unicode name
"1f3c8": {"canonical_name": "american_football", "aliases": []},
"26be": {"canonical_name": "baseball", "aliases": []},
"1f3be": {"canonical_name": "tennis", "aliases": []},
"1f3d0": {"canonical_name": "volleyball", "aliases": []},
"1f3c9": {"canonical_name": "rugby", "aliases": []},
# https://emojipedia.org/billiards/ suggests this is actually used for
# billiards, not for "unlucky" or "losing" or some other connotation of
# 8ball. The Unicode name is billiards.
"1f3b1": {"canonical_name": "billiards", "aliases": ["pool", "8_ball"]},
# ping pong is the Unicode name, and seems slightly more popular on
# https://trends.google.com/trends/explore?q=table%20tennis,ping%20pong
"1f3d3": {"canonical_name": "ping_pong", "aliases": ["table_tennis"]},
"1f3f8": {"canonical_name": "badminton", "aliases": []},
# gooooooooal seems more useful of a name, though arguably this isn't the
# best emoji for it
"1f945": {"canonical_name": "gooooooooal", "aliases": ["goal"]},
"1f3d2": {"canonical_name": "ice_hockey", "aliases": []},
"1f3d1": {"canonical_name": "field_hockey", "aliases": []},
# would say bat, but taken by Nature/30
"1f3cf": {"canonical_name": "cricket", "aliases": ["cricket_bat"]},
# hole_in_one seems like a more useful name to have. Sent golf to
# Activity/39
"26f3": {"canonical_name": "hole_in_one", "aliases": []},
# archery seems like a reasonable addition
"1f3f9": {"canonical_name": "bow_and_arrow", "aliases": ["archery"]},
"1f3a3": {"canonical_name": "fishing", "aliases": []},
"1f94a": {"canonical_name": "boxing_glove", "aliases": []},
# keikogi and dogi are the actual names for this, I believe. black_belt is
# I think a more useful name here
"1f94b": {"canonical_name": "black_belt", "aliases": ["keikogi", "dogi", "martial_arts"]},
"26f8": {"canonical_name": "ice_skate", "aliases": []},
"1f3bf": {"canonical_name": "ski", "aliases": []},
"26f7": {"canonical_name": "skier", "aliases": []},
"1f3c2": {"canonical_name": "snowboarder", "aliases": []},
# lift is both what lifters call it, and potentially can be used more
# generally than weight_lift. The others seemed like good additions.
"1f3cb": {"canonical_name": "lift", "aliases": ["work_out", "weight_lift", "gym"]},
# The decisions on tenses here and in the rest of the sports section are
# mostly from gut feel. The Unicode itself is all over the place.
"1f93a": {"canonical_name": "fencing", "aliases": []},
"1f93c": {"canonical_name": "wrestling", "aliases": []},
# seemed like reasonable additions
"1f938": {"canonical_name": "cartwheel", "aliases": ["acrobatics", "gymnastics", "tumbling"]},
# seemed the best emoji for sports
"26f9": {"canonical_name": "ball", "aliases": ["sports"]},
"1f93e": {"canonical_name": "handball", "aliases": []},
"1f3cc": {"canonical_name": "golf", "aliases": []},
"1f3c4": {"canonical_name": "surf", "aliases": []},
"1f3ca": {"canonical_name": "swim", "aliases": []},
"1f93d": {"canonical_name": "water_polo", "aliases": []},
# rest seem like reasonable additions
"1f6a3": {"canonical_name": "rowboat", "aliases": ["crew", "sculling", "rowing"]},
# horse_riding seems like a reasonable addition
"1f3c7": {"canonical_name": "horse_racing", "aliases": ["horse_riding"]},
# at least in the US: this = cyclist, Activity/53 = mountain biker, and
# motorcyclist = biker. Mainly from googling around and personal
# experience. E.g. https://grammarist.com/usage/cyclist-biker/ for cyclist
# and biker,
# https://www.theguardian.com/lifeandstyle/2010/oct/24/bike-snobs-guide-cycling-tribes
# for mountain biker (I've never heard the term "mountain cyclist", and
# they are the only group on that page that gets "biker" instead of
# "cyclist")
"1f6b4": {"canonical_name": "cyclist", "aliases": []},
# see Activity/51
"1f6b5": {"canonical_name": "mountain_biker", "aliases": []},
"1f3bd": {"canonical_name": "running_shirt", "aliases": []},
# I feel like people call sports medals "medals", and military medals
# "military medals". Also see Activity/56
"1f3c5": {"canonical_name": "medal", "aliases": []},
# See Activity/55. military_medal is the gemoji/Unicode
"1f396": {"canonical_name": "military_medal", "aliases": []},
# gold and number_one seem like good additions
"1f947": {"canonical_name": "first_place", "aliases": ["gold", "number_one"]},
# to parallel Activity/57
"1f948": {"canonical_name": "second_place", "aliases": ["silver"]},
# to parallel Activity/57
"1f949": {"canonical_name": "third_place", "aliases": ["bronze"]},
# seemed the best emoji for winner
"1f3c6": {"canonical_name": "trophy", "aliases": ["winner"]},
"1f3f5": {"canonical_name": "rosette", "aliases": []},
"1f397": {"canonical_name": "reminder_ribbon", "aliases": []},
# don't need ticket and admission_ticket (see Activity/64), so made one of
# them :pass:.
"1f3ab": {"canonical_name": "pass", "aliases": []},
# see Activity/63
"1f39f": {"canonical_name": "ticket", "aliases": []},
"1f3aa": {"canonical_name": "circus", "aliases": []},
"1f939": {"canonical_name": "juggling", "aliases": []},
# rest seem like good additions
"1f3ad": {"canonical_name": "performing_arts", "aliases": ["drama", "theater"]},
# rest seem like good additions
"1f3a8": {"canonical_name": "art", "aliases": ["palette", "painting"]},
# action seems more useful than clapper, and clapper doesn't seem like that
# common of a term
"1f3ac": {"canonical_name": "action", "aliases": []},
# seem like good additions
"1f3a4": {"canonical_name": "microphone", "aliases": ["mike", "mic"]},
"1f3a7": {"canonical_name": "headphones", "aliases": []},
"1f3bc": {"canonical_name": "musical_score", "aliases": []},
# piano seems more useful than musical_keyboard
"1f3b9": {"canonical_name": "piano", "aliases": ["musical_keyboard"]},
"1f941": {"canonical_name": "drum", "aliases": []},
"1f3b7": {"canonical_name": "saxophone", "aliases": []},
"1f3ba": {"canonical_name": "trumpet", "aliases": []},
"1f3b8": {"canonical_name": "guitar", "aliases": []},
"1f3bb": {"canonical_name": "violin", "aliases": []},
# dice seems more useful
"1f3b2": {"canonical_name": "dice", "aliases": ["die"]},
# direct_hit from gemoji/Unicode, and seems more useful. bulls_eye seemed
# like a reasonable addition
"1f3af": {"canonical_name": "direct_hit", "aliases": ["darts", "bulls_eye"]},
# strike seemed more useful than bowling
"1f3b3": {"canonical_name": "strike", "aliases": ["bowling"]},
"1f3ae": {"canonical_name": "video_game", "aliases": []},
# gambling seemed more useful than slot_machine
"1f3b0": {"canonical_name": "slot_machine", "aliases": []},
# the Google emoji for this is not red
"1f697": {"canonical_name": "car", "aliases": []},
# rideshare seems like a reasonable addition
"1f695": {"canonical_name": "taxi", "aliases": ["rideshare"]},
# the Google emoji for this is not blue. recreational_vehicle is from
# gemoji/Unicode, jeep seemed like a good addition
"1f699": {"canonical_name": "recreational_vehicle", "aliases": ["jeep"]},
# school_bus seemed like a reasonable addition, even though the twitter
# glyph for this doesn't really look like a school bus
"1f68c": {"canonical_name": "bus", "aliases": ["school_bus"]},
"1f68e": {"canonical_name": "trolley", "aliases": []},
"1f3ce": {"canonical_name": "racecar", "aliases": []},
"1f693": {"canonical_name": "police_car", "aliases": []},
"1f691": {"canonical_name": "ambulance", "aliases": []},
# https://trends.google.com/trends/explore?q=fire%20truck,fire%20engine
"1f692": {"canonical_name": "fire_truck", "aliases": ["fire_engine"]},
"1f690": {"canonical_name": "minibus", "aliases": []},
# moving_truck and truck for Places/11 and Places/12 seem much better than
# the iamcal names
"1f69a": {"canonical_name": "moving_truck", "aliases": []},
# see Places/11 for truck. Rest seem reasonable additions.
"1f69b": {
"canonical_name": "truck",
"aliases": ["tractor-trailer", "big_rig", "semi_truck", "transport_truck"],
},
"1f69c": {"canonical_name": "tractor", "aliases": []},
# kick_scooter and scooter seem better for Places/14 and Places /16 than
# scooter and motor_scooter.
"1f6f4": {"canonical_name": "kick_scooter", "aliases": []},
"1f6b2": {"canonical_name": "bike", "aliases": ["bicycle"]},
# see Places/14. Called motor_bike (or bike) in India
"1f6f5": {"canonical_name": "scooter", "aliases": ["motor_bike"]},
"1f3cd": {"canonical_name": "motorcycle", "aliases": []},
# siren seems more useful. alert seems like a reasonable addition
"1f6a8": {"canonical_name": "siren", "aliases": ["rotating_light", "alert"]},
"1f694": {"canonical_name": "oncoming_police_car", "aliases": []},
"1f68d": {"canonical_name": "oncoming_bus", "aliases": []},
# car to parallel e.g. Places/1
"1f698": {"canonical_name": "oncoming_car", "aliases": ["oncoming_automobile"]},
"1f696": {"canonical_name": "oncoming_taxi", "aliases": []},
# ski_lift seems like a good addition
"1f6a1": {"canonical_name": "aerial_tramway", "aliases": ["ski_lift"]},
# gondola seems more useful
"1f6a0": {"canonical_name": "gondola", "aliases": ["mountain_cableway"]},
"1f69f": {"canonical_name": "suspension_railway", "aliases": []},
# train_car seems like a reasonable addition
"1f683": {"canonical_name": "railway_car", "aliases": ["train_car"]},
# this does not seem like a good emoji for train, especially compared to
# Places/33. streetcar seems like a good addition.
"1f68b": {"canonical_name": "tram", "aliases": ["streetcar"]},
"1f69e": {"canonical_name": "mountain_railway", "aliases": []},
# elevated_train seems like a reasonable addition
"1f69d": {"canonical_name": "monorail", "aliases": ["elevated_train"]},
# from gemoji/Unicode. Also, don't thin we need two bullettrain's
"1f684": {"canonical_name": "high_speed_train", "aliases": []},
# Google, Wikipedia, etc. prefer bullet train to bullettrain
"1f685": {"canonical_name": "bullet_train", "aliases": []},
"1f688": {"canonical_name": "light_rail", "aliases": []},
"1f682": {"canonical_name": "train", "aliases": ["steam_locomotive"]},
# oncoming_train seems better than train2
"1f686": {"canonical_name": "oncoming_train", "aliases": []},
# saving metro for Symbols/108. The tunnel makes subway more appropriate
# anyway.
"1f687": {"canonical_name": "subway", "aliases": []},
# all the glyphs of oncoming vehicles have names like oncoming_*. The
# alternate names are to parallel the alternates to Places/27.
"1f68a": {
"canonical_name": "oncoming_tram",
"aliases": ["oncoming_streetcar", "oncoming_trolley"],
},
"1f689": {"canonical_name": "station", "aliases": []},
"1f681": {"canonical_name": "helicopter", "aliases": []},
"1f6e9": {"canonical_name": "small_airplane", "aliases": []},
"2708": {"canonical_name": "airplane", "aliases": []},
# take_off seems more useful than airplane_departure. departure also seems
# more useful than airplane_departure. Arguably departure should be the
# primary, since arrival is probably more useful than landing in Places/42,
# but going with this for now.
"1f6eb": {"canonical_name": "take_off", "aliases": ["departure", "airplane_departure"]},
# parallel to Places/41
"1f6ec": {"canonical_name": "landing", "aliases": ["arrival", "airplane_arrival"]},
"1f680": {"canonical_name": "rocket", "aliases": []},
"1f6f0": {"canonical_name": "satellite", "aliases": []},
"1f4ba": {"canonical_name": "seat", "aliases": []},
"1f6f6": {"canonical_name": "canoe", "aliases": []},
"26f5": {"canonical_name": "boat", "aliases": ["sailboat"]},
"1f6e5": {"canonical_name": "motor_boat", "aliases": []},
"1f6a4": {"canonical_name": "speedboat", "aliases": []},
# yacht and cruise seem like reasonable additions
"1f6f3": {"canonical_name": "passenger_ship", "aliases": ["yacht", "cruise"]},
"26f4": {"canonical_name": "ferry", "aliases": []},
"1f6a2": {"canonical_name": "ship", "aliases": []},
"2693": {"canonical_name": "anchor", "aliases": []},
# there already is a construction in Places/82, and work_in_progress seems
# like a useful thing to have. Construction_zone seems better than the
# Unicode construction_sign, and is there partly so this autocompletes for
# construction.
"1f6a7": {"canonical_name": "work_in_progress", "aliases": ["construction_zone"]},
# alternates from https://emojipedia.org/fuel-pump/. Unicode is fuel_pump,
# not fuelpump
"26fd": {"canonical_name": "fuel_pump", "aliases": ["gas_pump", "petrol_pump"]},
# not sure why iamcal removed the space
"1f68f": {"canonical_name": "bus_stop", "aliases": []},
# https://emojipedia.org/vertical-traffic-light/ thinks this is the more
# common of the two traffic lights, so putting traffic_light on this one
"1f6a6": {"canonical_name": "traffic_light", "aliases": ["vertical_traffic_light"]},
# see Places/57
"1f6a5": {"canonical_name": "horizontal_traffic_light", "aliases": []},
# road_trip from https://mashable.com/2015/10/23/ios-9-1-emoji-guide/
"1f5fa": {"canonical_name": "map", "aliases": ["world_map", "road_trip"]},
# rock_carving, statue, and tower seem more general and less culturally
# specific, for Places/60, 61, and 63.
"1f5ff": {"canonical_name": "rock_carving", "aliases": ["moyai"]},
# new_york from https://emojipedia.org/statue-of-liberty/. see Places/60
# for statue
"1f5fd": {"canonical_name": "statue", "aliases": ["new_york", "statue_of_liberty"]},
"26f2": {"canonical_name": "fountain", "aliases": []},
# see Places/60
"1f5fc": {"canonical_name": "tower", "aliases": ["tokyo_tower"]},
# choosing this as the castle since castles are a way bigger thing in
# europe than japan, and shiro is a pretty reasonable name for Places/65
"1f3f0": {"canonical_name": "castle", "aliases": []},
# see Places/64
"1f3ef": {"canonical_name": "shiro", "aliases": []},
"1f3df": {"canonical_name": "stadium", "aliases": []},
"1f3a1": {"canonical_name": "ferris_wheel", "aliases": []},
"1f3a2": {"canonical_name": "roller_coaster", "aliases": []},
# merry_go_round seems like a good addition
"1f3a0": {"canonical_name": "carousel", "aliases": ["merry_go_round"]},
# beach_umbrella seems more useful
"26f1": {"canonical_name": "beach_umbrella", "aliases": []},
"1f3d6": {"canonical_name": "beach", "aliases": []},
"1f3dd": {"canonical_name": "island", "aliases": []},
"26f0": {"canonical_name": "mountain", "aliases": []},
"1f3d4": {"canonical_name": "snowy_mountain", "aliases": []},
# already lots of other mountains, otherwise would rename this like
# Places/60
"1f5fb": {"canonical_name": "mount_fuji", "aliases": []},
"1f30b": {"canonical_name": "volcano", "aliases": []},
"1f3dc": {"canonical_name": "desert", "aliases": []},
# campsite from https://emojipedia.org/camping/, I think Places/79 is a
# better camping
"1f3d5": {"canonical_name": "campsite", "aliases": []},
"26fa": {"canonical_name": "tent", "aliases": ["camping"]},
"1f6e4": {"canonical_name": "railway_track", "aliases": ["train_tracks"]},
# road is used much more frequently at
# https://trends.google.com/trends/explore?q=road,motorway
"1f6e3": {"canonical_name": "road", "aliases": ["motorway"]},
"1f3d7": {"canonical_name": "construction", "aliases": []},
"1f3ed": {"canonical_name": "factory", "aliases": []},
"1f3e0": {"canonical_name": "house", "aliases": []},
# suburb seems more useful
"1f3e1": {"canonical_name": "suburb", "aliases": []},
"1f3d8": {"canonical_name": "houses", "aliases": []},
# condemned seemed like a good addition
"1f3da": {"canonical_name": "derelict_house", "aliases": ["condemned"]},
"1f3e2": {"canonical_name": "office", "aliases": []},
"1f3ec": {"canonical_name": "department_store", "aliases": []},
"1f3e3": {"canonical_name": "japan_post", "aliases": []},
"1f3e4": {"canonical_name": "post_office", "aliases": []},
"1f3e5": {"canonical_name": "hospital", "aliases": []},
"1f3e6": {"canonical_name": "bank", "aliases": []},
"1f3e8": {"canonical_name": "hotel", "aliases": []},
"1f3ea": {"canonical_name": "convenience_store", "aliases": []},
"1f3eb": {"canonical_name": "school", "aliases": []},
"1f3e9": {"canonical_name": "love_hotel", "aliases": []},
"1f492": {"canonical_name": "wedding", "aliases": []},
"1f3db": {"canonical_name": "classical_building", "aliases": []},
"26ea": {"canonical_name": "church", "aliases": []},
"1f54c": {"canonical_name": "mosque", "aliases": []},
"1f54d": {"canonical_name": "synagogue", "aliases": []},
"1f54b": {"canonical_name": "kaaba", "aliases": []},
"26e9": {"canonical_name": "shinto_shrine", "aliases": []},
"1f5fe": {"canonical_name": "japan", "aliases": []},
# rice_scene seems like a strange name to have. gemoji alternate is
# moon_ceremony
"1f391": {"canonical_name": "moon_ceremony", "aliases": []},
"1f3de": {"canonical_name": "national_park", "aliases": []},
# ocean_sunrise to parallel Places/109
"1f305": {"canonical_name": "sunrise", "aliases": ["ocean_sunrise"]},
"1f304": {"canonical_name": "mountain_sunrise", "aliases": []},
# shooting_star and wish seem like way better descriptions. gemoji/Unicode
# is shooting_star
"1f320": {"canonical_name": "shooting_star", "aliases": ["wish"]},
"1f387": {"canonical_name": "sparkler", "aliases": []},
"1f386": {"canonical_name": "fireworks", "aliases": []},
"1f307": {"canonical_name": "city_sunrise", "aliases": []},
"1f306": {"canonical_name": "sunset", "aliases": []},
# city and skyline seem more useful than cityscape
"1f3d9": {"canonical_name": "city", "aliases": ["skyline"]},
"1f303": {"canonical_name": "night", "aliases": []},
# night_sky seems like a good addition
"1f30c": {"canonical_name": "milky_way", "aliases": ["night_sky"]},
"1f309": {"canonical_name": "bridge", "aliases": []},
"1f301": {"canonical_name": "foggy", "aliases": []},
"231a": {"canonical_name": "watch", "aliases": []},
# Unicode/gemoji is mobile_phone. The rest seem like good additions
"1f4f1": {"canonical_name": "mobile_phone", "aliases": ["smartphone", "iphone", "android"]},
"1f4f2": {"canonical_name": "calling", "aliases": []},
# gemoji has laptop, even though the Google emoji for this does not look
# like a laptop
"1f4bb": {"canonical_name": "computer", "aliases": ["laptop"]},
"2328": {"canonical_name": "keyboard", "aliases": []},
"1f5a5": {"canonical_name": "desktop_computer", "aliases": []},
"1f5a8": {"canonical_name": "printer", "aliases": []},
# gemoji/Unicode is computer_mouse
"1f5b1": {"canonical_name": "computer_mouse", "aliases": []},
"1f5b2": {"canonical_name": "trackball", "aliases": []},
# arcade seems like a reasonable addition
"1f579": {"canonical_name": "joystick", "aliases": ["arcade"]},
# vise seems like a reasonable addition
"1f5dc": {"canonical_name": "compression", "aliases": ["vise"]},
# gold record seems more useful, idea came from
# https://11points.com/11-emoji-different-meanings-think/
"1f4bd": {"canonical_name": "gold_record", "aliases": ["minidisc"]},
"1f4be": {"canonical_name": "floppy_disk", "aliases": []},
"1f4bf": {"canonical_name": "cd", "aliases": []},
"1f4c0": {"canonical_name": "dvd", "aliases": []},
# videocassette from gemoji/Unicode
"1f4fc": {"canonical_name": "vhs", "aliases": ["videocassette"]},
"1f4f7": {"canonical_name": "camera", "aliases": []},
# both of these seem more useful than camera_with_flash
"1f4f8": {"canonical_name": "taking_a_picture", "aliases": ["say_cheese"]},
# video_recorder seems like a reasonable addition
"1f4f9": {"canonical_name": "video_camera", "aliases": ["video_recorder"]},
"1f3a5": {"canonical_name": "movie_camera", "aliases": []},
# seems like the best emoji for movie
"1f4fd": {"canonical_name": "projector", "aliases": ["movie"]},
"1f39e": {"canonical_name": "film", "aliases": []},
# both of these seem more useful than telephone_receiver
"1f4de": {"canonical_name": "landline", "aliases": ["home_phone"]},
"260e": {"canonical_name": "phone", "aliases": ["telephone"]},
"1f4df": {"canonical_name": "pager", "aliases": []},
"1f4e0": {"canonical_name": "fax", "aliases": []},
"1f4fa": {"canonical_name": "tv", "aliases": ["television"]},
"1f4fb": {"canonical_name": "radio", "aliases": []},
"1f399": {"canonical_name": "studio_microphone", "aliases": []},
# volume seems more useful
"1f39a": {"canonical_name": "volume", "aliases": ["level_slider"]},
"1f39b": {"canonical_name": "control_knobs", "aliases": []},
"23f1": {"canonical_name": "stopwatch", "aliases": []},
"23f2": {"canonical_name": "timer", "aliases": []},
"23f0": {"canonical_name": "alarm_clock", "aliases": []},
"1f570": {"canonical_name": "mantelpiece_clock", "aliases": []},
# times_up and time_ticking seem more useful than the hourglass names
"231b": {"canonical_name": "times_up", "aliases": ["hourglass_done"]},
# seems like the better hourglass. Also see Objects/36
"23f3": {"canonical_name": "time_ticking", "aliases": ["hourglass"]},
"1f4e1": {"canonical_name": "satellite_antenna", "aliases": []},
# seems like a reasonable addition
"1f50b": {"canonical_name": "battery", "aliases": ["full_battery"]},
"1f50c": {"canonical_name": "electric_plug", "aliases": []},
# light_bulb seems better and from Unicode/gemoji. idea seems like a good
# addition
"1f4a1": {"canonical_name": "light_bulb", "aliases": ["bulb", "idea"]},
"1f526": {"canonical_name": "flashlight", "aliases": []},
"1f56f": {"canonical_name": "candle", "aliases": []},
# seems like a reasonable addition
"1f5d1": {"canonical_name": "wastebasket", "aliases": ["trash_can"]},
# https://www.iemoji.com/view/emoji/1173/objects/oil-drum
"1f6e2": {"canonical_name": "oil_drum", "aliases": ["commodities"]},
# losing money from https://emojipedia.org/money-with-wings/,
# easy_come_easy_go seems like a reasonable addition
"1f4b8": {
"canonical_name": "losing_money",
"aliases": ["easy_come_easy_go", "money_with_wings"],
},
# I think the _bills, _banknotes etc versions of these are arguably more
# fun to use in chat, and certainly match the glyphs better
"1f4b5": {"canonical_name": "dollar_bills", "aliases": []},
"1f4b4": {"canonical_name": "yen_banknotes", "aliases": []},
"1f4b6": {"canonical_name": "euro_banknotes", "aliases": []},
"1f4b7": {"canonical_name": "pound_notes", "aliases": []},
"1f4b0": {"canonical_name": "money", "aliases": []},
"1f4b3": {"canonical_name": "credit_card", "aliases": ["debit_card"]},
"1f48e": {"canonical_name": "gem", "aliases": ["crystal"]},
# justice seems more useful
"2696": {"canonical_name": "justice", "aliases": ["scales", "balance"]},
# fixing, at_work, and working_on_it seem like useful concepts for
# workplace chat
"1f527": {"canonical_name": "fixing", "aliases": ["wrench"]},
"1f528": {"canonical_name": "hammer", "aliases": ["maintenance", "handyman", "handywoman"]},
"2692": {"canonical_name": "at_work", "aliases": ["hammer_and_pick"]},
# something that might be useful for chat.zulip.org, even
"1f6e0": {"canonical_name": "working_on_it", "aliases": ["hammer_and_wrench", "tools"]},
"26cf": {"canonical_name": "mine", "aliases": ["pick"]},
# screw is somewhat inappropriate, but not openly so, so leaving it in
"1f529": {"canonical_name": "nut_and_bolt", "aliases": ["screw"]},
"2699": {"canonical_name": "gear", "aliases": ["settings", "mechanical", "engineer"]},
"26d3": {"canonical_name": "chains", "aliases": []},
"1f52b": {"canonical_name": "gun", "aliases": []},
"1f4a3": {"canonical_name": "bomb", "aliases": []},
# betrayed from https://www.iemoji.com/view/emoji/786/objects/kitchen-knife
"1f52a": {"canonical_name": "knife", "aliases": ["hocho", "betrayed"]},
# rated_for_violence from
# https://www.iemoji.com/view/emoji/1085/objects/dagger. hate (also
# suggested there) seems too strong, as does just "violence".
"1f5e1": {"canonical_name": "dagger", "aliases": ["rated_for_violence"]},
"2694": {"canonical_name": "duel", "aliases": ["swords"]},
"1f6e1": {"canonical_name": "shield", "aliases": []},
"1f6ac": {"canonical_name": "smoking", "aliases": []},
"26b0": {"canonical_name": "coffin", "aliases": ["burial", "grave"]},
"26b1": {"canonical_name": "funeral_urn", "aliases": ["cremation"]},
# amphora is too obscure, I think
"1f3fa": {"canonical_name": "vase", "aliases": ["amphora"]},
"1f52e": {"canonical_name": "crystal_ball", "aliases": ["oracle", "future", "fortune_telling"]},
"1f4ff": {"canonical_name": "prayer_beads", "aliases": []},
"1f488": {"canonical_name": "barber", "aliases": ["striped_pole"]},
# alchemy seems more useful and less obscure
"2697": {"canonical_name": "alchemy", "aliases": ["alembic"]},
"1f52d": {"canonical_name": "telescope", "aliases": []},
# science seems useful to have. scientist inspired by
# https://www.iemoji.com/view/emoji/787/objects/microscope
"1f52c": {"canonical_name": "science", "aliases": ["microscope", "scientist"]},
"1f573": {"canonical_name": "hole", "aliases": []},
"1f48a": {"canonical_name": "medicine", "aliases": ["pill"]},
"1f489": {"canonical_name": "injection", "aliases": ["syringe"]},
"1f321": {"canonical_name": "temperature", "aliases": ["thermometer", "warm"]},
"1f6bd": {"canonical_name": "toilet", "aliases": []},
"1f6b0": {"canonical_name": "potable_water", "aliases": ["tap_water", "drinking_water"]},
"1f6bf": {"canonical_name": "shower", "aliases": []},
"1f6c1": {"canonical_name": "bathtub", "aliases": []},
"1f6c0": {"canonical_name": "bath", "aliases": []},
# reception and services from
# https://www.iemoji.com/view/emoji/1169/objects/bellhop-bell
"1f6ce": {"canonical_name": "bellhop_bell", "aliases": ["reception", "services", "ding"]},
"1f511": {"canonical_name": "key", "aliases": []},
# encrypted from https://www.iemoji.com/view/emoji/1081/objects/old-key,
# secret from https://mashable.com/2015/10/23/ios-9-1-emoji-guide/
"1f5dd": {
"canonical_name": "secret",
"aliases": ["dungeon", "old_key", "encrypted", "clue", "hint"],
},
"1f6aa": {"canonical_name": "door", "aliases": []},
"1f6cb": {
"canonical_name": "living_room",
"aliases": ["furniture", "couch_and_lamp", "lifestyles"],
},
"1f6cf": {"canonical_name": "bed", "aliases": ["bedroom"]},
# guestrooms from iemoji, would add hotel but taken by Places/94
"1f6cc": {"canonical_name": "in_bed", "aliases": ["accommodations", "guestrooms"]},
"1f5bc": {"canonical_name": "picture", "aliases": ["framed_picture"]},
"1f6cd": {"canonical_name": "shopping_bags", "aliases": []},
# https://trends.google.com/trends/explore?q=shopping%20cart,shopping%20trolley
"1f6d2": {"canonical_name": "shopping_cart", "aliases": ["shopping_trolley"]},
"1f381": {"canonical_name": "gift", "aliases": ["present"]},
# seemed like the best celebration
"1f388": {"canonical_name": "balloon", "aliases": ["celebration"]},
# from gemoji/Unicode
"1f38f": {"canonical_name": "carp_streamer", "aliases": ["flags"]},
"1f380": {"canonical_name": "ribbon", "aliases": ["decoration"]},
"1f38a": {"canonical_name": "confetti", "aliases": ["party_ball"]},
# seemed like the best congratulations
"1f389": {"canonical_name": "tada", "aliases": ["congratulations"]},
"1f38e": {"canonical_name": "dolls", "aliases": []},
"1f3ee": {"canonical_name": "lantern", "aliases": ["izakaya_lantern"]},
"1f390": {"canonical_name": "wind_chime", "aliases": []},
"2709": {"canonical_name": "email", "aliases": ["envelope", "mail"]},
# seems useful for chat?
"1f4e9": {"canonical_name": "mail_sent", "aliases": ["sealed"]},
"1f4e8": {"canonical_name": "mail_received", "aliases": []},
"1f4e7": {"canonical_name": "e-mail", "aliases": []},
"1f48c": {"canonical_name": "love_letter", "aliases": []},
"1f4e5": {"canonical_name": "inbox", "aliases": []},
"1f4e4": {"canonical_name": "outbox", "aliases": []},
"1f4e6": {"canonical_name": "package", "aliases": []},
# price_tag from iemoji
"1f3f7": {"canonical_name": "label", "aliases": ["tag", "price_tag"]},
"1f4ea": {"canonical_name": "closed_mailbox", "aliases": []},
"1f4eb": {"canonical_name": "mailbox", "aliases": []},
"1f4ec": {"canonical_name": "unread_mail", "aliases": []},
"1f4ed": {"canonical_name": "inbox_zero", "aliases": ["empty_mailbox", "no_mail"]},
"1f4ee": {"canonical_name": "mail_dropoff", "aliases": []},
"1f4ef": {"canonical_name": "horn", "aliases": []},
"1f4dc": {"canonical_name": "scroll", "aliases": []},
# receipt seems more useful?
"1f4c3": {"canonical_name": "receipt", "aliases": []},
"1f4c4": {"canonical_name": "document", "aliases": ["paper", "file", "page"]},
"1f4d1": {"canonical_name": "place_holder", "aliases": []},
"1f4ca": {"canonical_name": "bar_chart", "aliases": []},
# seems like the best chart
"1f4c8": {"canonical_name": "chart", "aliases": ["upwards_trend", "growing", "increasing"]},
"1f4c9": {"canonical_name": "downwards_trend", "aliases": ["shrinking", "decreasing"]},
"1f5d2": {"canonical_name": "spiral_notepad", "aliases": []},
# '1f5d3': {'canonical_name': 'X', 'aliases': ['spiral_calendar_pad']},
# swapped the following two largely due to the emojione glyphs
"1f4c6": {"canonical_name": "date", "aliases": []},
"1f4c5": {"canonical_name": "calendar", "aliases": []},
"1f4c7": {"canonical_name": "rolodex", "aliases": ["card_index"]},
"1f5c3": {"canonical_name": "archive", "aliases": []},
"1f5f3": {"canonical_name": "ballot_box", "aliases": []},
"1f5c4": {"canonical_name": "file_cabinet", "aliases": []},
"1f4cb": {"canonical_name": "clipboard", "aliases": []},
# don't need two file_folders, so made this organize
"1f4c1": {"canonical_name": "organize", "aliases": ["file_folder"]},
"1f4c2": {"canonical_name": "folder", "aliases": []},
"1f5c2": {"canonical_name": "sort", "aliases": []},
"1f5de": {"canonical_name": "newspaper", "aliases": ["swat"]},
"1f4f0": {"canonical_name": "headlines", "aliases": []},
"1f4d3": {"canonical_name": "notebook", "aliases": ["composition_book"]},
"1f4d4": {"canonical_name": "decorative_notebook", "aliases": []},
"1f4d2": {"canonical_name": "ledger", "aliases": ["spiral_notebook"]},
# the glyphs here are the same as Objects/147-149 (with a different color),
# for all but Google
"1f4d5": {"canonical_name": "red_book", "aliases": ["closed_book"]},
"1f4d7": {"canonical_name": "green_book", "aliases": []},
"1f4d8": {"canonical_name": "blue_book", "aliases": []},
"1f4d9": {"canonical_name": "orange_book", "aliases": []},
"1f4da": {"canonical_name": "books", "aliases": []},
"1f4d6": {"canonical_name": "book", "aliases": ["open_book"]},
"1f516": {"canonical_name": "bookmark", "aliases": []},
"1f517": {"canonical_name": "link", "aliases": []},
"1f4ce": {"canonical_name": "paperclip", "aliases": ["attachment"]},
# office_supplies from https://mashable.com/2015/10/23/ios-9-1-emoji-guide/
"1f587": {"canonical_name": "office_supplies", "aliases": ["paperclip_chain", "linked"]},
"1f4d0": {"canonical_name": "carpenter_square", "aliases": ["triangular_ruler"]},
"1f4cf": {"canonical_name": "ruler", "aliases": ["straightedge"]},
"1f4cc": {"canonical_name": "push_pin", "aliases": ["thumb_tack"]},
"1f4cd": {"canonical_name": "pin", "aliases": ["sewing_pin"]},
"2702": {"canonical_name": "scissors", "aliases": []},
"1f58a": {"canonical_name": "pen", "aliases": ["ballpoint_pen"]},
"1f58b": {"canonical_name": "fountain_pen", "aliases": []},
# three of the four emoji sets just have a rightwards-facing objects/162
# '2712': {'canonical_name': 'X', 'aliases': ['black_nib']},
"1f58c": {"canonical_name": "paintbrush", "aliases": []},
"1f58d": {"canonical_name": "crayon", "aliases": []},
"1f4dd": {"canonical_name": "memo", "aliases": ["note"]},
"270f": {"canonical_name": "pencil", "aliases": []},
"1f50d": {"canonical_name": "search", "aliases": ["find", "magnifying_glass"]},
# '1f50e': {'canonical_name': 'X', 'aliases': ['mag_right']},
# https://emojipedia.org/lock-with-ink-pen/
"1f50f": {
"canonical_name": "privacy",
"aliases": ["key_signing", "digital_security", "protected"],
},
"1f510": {
"canonical_name": "secure",
"aliases": ["lock_with_key", "safe", "commitment", "loyalty"],
},
"1f512": {"canonical_name": "locked", "aliases": []},
"1f513": {"canonical_name": "unlocked", "aliases": []},
# seems the best glyph for love and love_you
"2764": {"canonical_name": "heart", "aliases": ["love", "love_you"]},
"1f49b": {"canonical_name": "yellow_heart", "aliases": ["heart_of_gold"]},
"1f49a": {"canonical_name": "green_heart", "aliases": ["envy"]},
"1f499": {"canonical_name": "blue_heart", "aliases": []},
"1f49c": {"canonical_name": "purple_heart", "aliases": ["bravery"]},
"1f5a4": {"canonical_name": "black_heart", "aliases": []},
"1f494": {"canonical_name": "broken_heart", "aliases": ["heartache"]},
"2763": {"canonical_name": "heart_exclamation", "aliases": []},
"1f495": {"canonical_name": "two_hearts", "aliases": []},
"1f49e": {"canonical_name": "revolving_hearts", "aliases": []},
"1f493": {"canonical_name": "heartbeat", "aliases": []},
"1f497": {"canonical_name": "heart_pulse", "aliases": ["growing_heart"]},
"1f496": {"canonical_name": "sparkling_heart", "aliases": []},
"1f498": {"canonical_name": "cupid", "aliases": ["smitten", "heart_arrow"]},
"1f49d": {"canonical_name": "gift_heart", "aliases": []},
"1f49f": {"canonical_name": "heart_box", "aliases": []},
"262e": {"canonical_name": "peace", "aliases": []},
"271d": {"canonical_name": "cross", "aliases": ["christianity"]},
"262a": {"canonical_name": "star_and_crescent", "aliases": ["islam"]},
"1f549": {"canonical_name": "om", "aliases": ["hinduism"]},
"2638": {"canonical_name": "wheel_of_dharma", "aliases": ["buddhism"]},
"2721": {"canonical_name": "star_of_david", "aliases": ["judaism"]},
# can't find any explanation of this at all. Is an alternate star of david?
# '1f52f': {'canonical_name': 'X', 'aliases': ['six_pointed_star']},
"1f54e": {"canonical_name": "menorah", "aliases": []},
"262f": {"canonical_name": "yin_yang", "aliases": []},
"2626": {"canonical_name": "orthodox_cross", "aliases": []},
"1f6d0": {"canonical_name": "place_of_worship", "aliases": []},
"26ce": {"canonical_name": "ophiuchus", "aliases": []},
"2648": {"canonical_name": "aries", "aliases": []},
"2649": {"canonical_name": "taurus", "aliases": []},
"264a": {"canonical_name": "gemini", "aliases": []},
"264b": {"canonical_name": "cancer", "aliases": []},
"264c": {"canonical_name": "leo", "aliases": []},
"264d": {"canonical_name": "virgo", "aliases": []},
"264e": {"canonical_name": "libra", "aliases": []},
"264f": {"canonical_name": "scorpius", "aliases": []},
"2650": {"canonical_name": "sagittarius", "aliases": []},
"2651": {"canonical_name": "capricorn", "aliases": []},
"2652": {"canonical_name": "aquarius", "aliases": []},
"2653": {"canonical_name": "pisces", "aliases": []},
"1f194": {"canonical_name": "id", "aliases": []},
"269b": {"canonical_name": "atom", "aliases": ["physics"]},
# japanese symbol
# '1f251': {'canonical_name': 'X', 'aliases': ['accept']},
"2622": {"canonical_name": "radioactive", "aliases": ["nuclear"]},
"2623": {"canonical_name": "biohazard", "aliases": []},
"1f4f4": {"canonical_name": "phone_off", "aliases": []},
"1f4f3": {"canonical_name": "vibration_mode", "aliases": []},
# '1f236': {'canonical_name': 'X', 'aliases': ['u6709']},
# '1f21a': {'canonical_name': 'X', 'aliases': ['u7121']},
# '1f238': {'canonical_name': 'X', 'aliases': ['u7533']},
# '1f23a': {'canonical_name': 'X', 'aliases': ['u55b6']},
# '1f237': {'canonical_name': 'X', 'aliases': ['u6708']},
"2734": {"canonical_name": "eight_pointed_star", "aliases": []},
"1f19a": {"canonical_name": "vs", "aliases": []},
"1f4ae": {"canonical_name": "white_flower", "aliases": []},
# '1f250': {'canonical_name': 'X', 'aliases': ['ideograph_advantage']},
# japanese character
# '3299': {'canonical_name': 'X', 'aliases': ['secret']},
# '3297': {'canonical_name': 'X', 'aliases': ['congratulations']},
# '1f234': {'canonical_name': 'X', 'aliases': ['u5408']},
# '1f235': {'canonical_name': 'X', 'aliases': ['u6e80']},
# '1f239': {'canonical_name': 'X', 'aliases': ['u5272']},
# '1f232': {'canonical_name': 'X', 'aliases': ['u7981']},
"1f170": {"canonical_name": "a", "aliases": []},
"1f171": {"canonical_name": "b", "aliases": []},
"1f18e": {"canonical_name": "ab", "aliases": []},
"1f191": {"canonical_name": "cl", "aliases": []},
"1f17e": {"canonical_name": "o", "aliases": []},
"1f198": {"canonical_name": "sos", "aliases": []},
# Symbols/105 seems like a better x, and looks more like the other letters
"274c": {"canonical_name": "cross_mark", "aliases": ["incorrect", "wrong"]},
"2b55": {"canonical_name": "circle", "aliases": []},
"1f6d1": {"canonical_name": "stop_sign", "aliases": ["octagonal_sign"]},
"26d4": {"canonical_name": "no_entry", "aliases": ["wrong_way"]},
"1f4db": {"canonical_name": "name_badge", "aliases": []},
"1f6ab": {"canonical_name": "prohibited", "aliases": ["not_allowed"]},
"1f4af": {"canonical_name": "100", "aliases": ["hundred"]},
"1f4a2": {"canonical_name": "anger", "aliases": ["bam", "pow"]},
"2668": {"canonical_name": "hot_springs", "aliases": []},
"1f6b7": {"canonical_name": "no_pedestrians", "aliases": []},
"1f6af": {"canonical_name": "do_not_litter", "aliases": []},
"1f6b3": {"canonical_name": "no_bicycles", "aliases": []},
"1f6b1": {"canonical_name": "non-potable_water", "aliases": []},
"1f51e": {"canonical_name": "underage", "aliases": ["nc17"]},
"1f4f5": {"canonical_name": "no_phones", "aliases": []},
"1f6ad": {"canonical_name": "no_smoking", "aliases": []},
"2757": {"canonical_name": "exclamation", "aliases": []},
"2755": {"canonical_name": "grey_exclamation", "aliases": []},
"2753": {"canonical_name": "question", "aliases": []},
"2754": {"canonical_name": "grey_question", "aliases": []},
"203c": {"canonical_name": "bangbang", "aliases": ["double_exclamation"]},
"2049": {"canonical_name": "interrobang", "aliases": []},
"1f505": {"canonical_name": "low_brightness", "aliases": ["dim"]},
"1f506": {"canonical_name": "brightness", "aliases": ["high_brightness"]},
"303d": {"canonical_name": "part_alternation", "aliases": []},
"26a0": {"canonical_name": "warning", "aliases": ["caution", "danger"]},
"1f6b8": {
"canonical_name": "children_crossing",
"aliases": ["school_crossing", "drive_with_care"],
},
"1f531": {"canonical_name": "trident", "aliases": []},
"269c": {"canonical_name": "fleur_de_lis", "aliases": []},
"1f530": {"canonical_name": "beginner", "aliases": []},
"267b": {"canonical_name": "recycle", "aliases": []},
# seems like the best check
"2705": {"canonical_name": "check", "aliases": ["all_good", "approved"]},
# '1f22f': {'canonical_name': 'X', 'aliases': ['u6307']},
# stock_market seemed more useful
"1f4b9": {"canonical_name": "stock_market", "aliases": []},
"2747": {"canonical_name": "sparkle", "aliases": []},
"2733": {"canonical_name": "eight_spoked_asterisk", "aliases": []},
"274e": {"canonical_name": "x", "aliases": []},
"1f310": {"canonical_name": "www", "aliases": ["globe"]},
"1f4a0": {"canonical_name": "cute", "aliases": ["kawaii", "diamond_with_a_dot"]},
"24c2": {"canonical_name": "metro", "aliases": ["m"]},
"1f300": {"canonical_name": "cyclone", "aliases": ["hurricane", "typhoon"]},
"1f4a4": {"canonical_name": "zzz", "aliases": []},
"1f3e7": {"canonical_name": "atm", "aliases": []},
"1f6be": {"canonical_name": "wc", "aliases": ["water_closet"]},
"267f": {"canonical_name": "accessible", "aliases": ["wheelchair", "disabled"]},
"1f17f": {"canonical_name": "parking", "aliases": ["p"]},
# '1f233': {'canonical_name': 'X', 'aliases': ['u7a7a']},
# '1f202': {'canonical_name': 'X', 'aliases': ['sa']},
"1f6c2": {"canonical_name": "passport_control", "aliases": ["immigration"]},
"1f6c3": {"canonical_name": "customs", "aliases": []},
"1f6c4": {"canonical_name": "baggage_claim", "aliases": []},
"1f6c5": {"canonical_name": "locker", "aliases": ["locked_bag"]},
"1f6b9": {"canonical_name": "mens", "aliases": []},
"1f6ba": {"canonical_name": "womens", "aliases": []},
# seems more in line with the surrounding bathroom symbols
"1f6bc": {"canonical_name": "baby_change_station", "aliases": ["nursery"]},
"1f6bb": {"canonical_name": "restroom", "aliases": []},
"1f6ae": {"canonical_name": "put_litter_in_its_place", "aliases": []},
"1f3a6": {"canonical_name": "cinema", "aliases": ["movie_theater"]},
"1f4f6": {"canonical_name": "cell_reception", "aliases": ["signal_strength", "signal_bars"]},
# '1f201': {'canonical_name': 'X', 'aliases': ['koko']},
"1f523": {"canonical_name": "symbols", "aliases": []},
"2139": {"canonical_name": "info", "aliases": []},
"1f524": {"canonical_name": "abc", "aliases": []},
"1f521": {"canonical_name": "abcd", "aliases": ["alphabet"]},
"1f520": {"canonical_name": "capital_abcd", "aliases": ["capital_letters"]},
"1f196": {"canonical_name": "ng", "aliases": []},
# from Unicode/gemoji. Saving ok for People/111
"1f197": {"canonical_name": "squared_ok", "aliases": []},
# from Unicode, and to parallel Symbols/135. Saving up for Symbols/171
"1f199": {"canonical_name": "squared_up", "aliases": []},
"1f192": {"canonical_name": "cool", "aliases": []},
"1f195": {"canonical_name": "new", "aliases": []},
"1f193": {"canonical_name": "free", "aliases": []},
"0030-20e3": {"canonical_name": "zero", "aliases": []},
"0031-20e3": {"canonical_name": "one", "aliases": []},
"0032-20e3": {"canonical_name": "two", "aliases": []},
"0033-20e3": {"canonical_name": "three", "aliases": []},
"0034-20e3": {"canonical_name": "four", "aliases": []},
"0035-20e3": {"canonical_name": "five", "aliases": []},
"0036-20e3": {"canonical_name": "six", "aliases": []},
"0037-20e3": {"canonical_name": "seven", "aliases": []},
"0038-20e3": {"canonical_name": "eight", "aliases": []},
"0039-20e3": {"canonical_name": "nine", "aliases": []},
"1f51f": {"canonical_name": "ten", "aliases": []},
"1f522": {"canonical_name": "1234", "aliases": ["numbers"]},
"0023-20e3": {"canonical_name": "hash", "aliases": []},
"002a-20e3": {"canonical_name": "asterisk", "aliases": []},
"25b6": {"canonical_name": "play", "aliases": []},
"23f8": {"canonical_name": "pause", "aliases": []},
"23ef": {"canonical_name": "play_pause", "aliases": []},
# stop taken by People/118
"23f9": {"canonical_name": "stop_button", "aliases": []},
"23fa": {"canonical_name": "record", "aliases": []},
"23ed": {"canonical_name": "next_track", "aliases": ["skip_forward"]},
"23ee": {"canonical_name": "previous_track", "aliases": ["skip_back"]},
"23e9": {"canonical_name": "fast_forward", "aliases": []},
"23ea": {"canonical_name": "rewind", "aliases": ["fast_reverse"]},
"23eb": {"canonical_name": "double_up", "aliases": ["fast_up"]},
"23ec": {"canonical_name": "double_down", "aliases": ["fast_down"]},
"25c0": {"canonical_name": "play_reverse", "aliases": []},
"1f53c": {"canonical_name": "upvote", "aliases": ["up_button", "increase"]},
"1f53d": {"canonical_name": "downvote", "aliases": ["down_button", "decrease"]},
"27a1": {"canonical_name": "right", "aliases": ["east"]},
"2b05": {"canonical_name": "left", "aliases": ["west"]},
"2b06": {"canonical_name": "up", "aliases": ["north"]},
"2b07": {"canonical_name": "down", "aliases": ["south"]},
"2197": {"canonical_name": "upper_right", "aliases": ["north_east"]},
"2198": {"canonical_name": "lower_right", "aliases": ["south_east"]},
"2199": {"canonical_name": "lower_left", "aliases": ["south_west"]},
"2196": {"canonical_name": "upper_left", "aliases": ["north_west"]},
"2195": {"canonical_name": "up_down", "aliases": []},
"2194": {"canonical_name": "left_right", "aliases": ["swap"]},
"21aa": {"canonical_name": "forward", "aliases": ["right_hook"]},
"21a9": {"canonical_name": "reply", "aliases": ["left_hook"]},
"2934": {"canonical_name": "heading_up", "aliases": []},
"2935": {"canonical_name": "heading_down", "aliases": []},
"1f500": {"canonical_name": "shuffle", "aliases": []},
"1f501": {"canonical_name": "repeat", "aliases": []},
"1f502": {"canonical_name": "repeat_one", "aliases": []},
"1f504": {"canonical_name": "counterclockwise", "aliases": ["return"]},
"1f503": {"canonical_name": "clockwise", "aliases": []},
"1f3b5": {"canonical_name": "music", "aliases": []},
"1f3b6": {"canonical_name": "musical_notes", "aliases": []},
"2795": {"canonical_name": "plus", "aliases": ["add"]},
"2796": {"canonical_name": "minus", "aliases": ["subtract"]},
"2797": {"canonical_name": "division", "aliases": ["divide"]},
"2716": {"canonical_name": "multiplication", "aliases": ["multiply"]},
"1f4b2": {"canonical_name": "dollars", "aliases": []},
# There is no other exchange, so might as well generalize this
"1f4b1": {"canonical_name": "exchange", "aliases": []},
"2122": {"canonical_name": "tm", "aliases": ["trademark"]},
"3030": {"canonical_name": "wavy_dash", "aliases": []},
"27b0": {"canonical_name": "loop", "aliases": []},
# https://emojipedia.org/double-curly-loop/
"27bf": {"canonical_name": "double_loop", "aliases": ["voicemail"]},
"1f51a": {"canonical_name": "end", "aliases": []},
"1f519": {"canonical_name": "back", "aliases": []},
"1f51b": {"canonical_name": "on", "aliases": []},
"1f51d": {"canonical_name": "top", "aliases": []},
"1f51c": {"canonical_name": "soon", "aliases": []},
"2714": {"canonical_name": "check_mark", "aliases": []},
"2611": {"canonical_name": "checkbox", "aliases": []},
"1f518": {"canonical_name": "radio_button", "aliases": []},
"26aa": {"canonical_name": "white_circle", "aliases": []},
"26ab": {"canonical_name": "black_circle", "aliases": []},
"1f534": {"canonical_name": "red_circle", "aliases": []},
"1f535": {"canonical_name": "blue_circle", "aliases": []},
"1f53a": {"canonical_name": "red_triangle_up", "aliases": []},
"1f53b": {"canonical_name": "red_triangle_down", "aliases": []},
"1f538": {"canonical_name": "small_orange_diamond", "aliases": []},
"1f539": {"canonical_name": "small_blue_diamond", "aliases": []},
"1f536": {"canonical_name": "large_orange_diamond", "aliases": []},
"1f537": {"canonical_name": "large_blue_diamond", "aliases": []},
"1f533": {"canonical_name": "black_and_white_square", "aliases": []},
"1f532": {"canonical_name": "white_and_black_square", "aliases": []},
"25aa": {"canonical_name": "black_small_square", "aliases": []},
"25ab": {"canonical_name": "white_small_square", "aliases": []},
"25fe": {"canonical_name": "black_medium_small_square", "aliases": []},
"25fd": {"canonical_name": "white_medium_small_square", "aliases": []},
"25fc": {"canonical_name": "black_medium_square", "aliases": []},
"25fb": {"canonical_name": "white_medium_square", "aliases": []},
"2b1b": {"canonical_name": "black_large_square", "aliases": []},
"2b1c": {"canonical_name": "white_large_square", "aliases": []},
"1f508": {"canonical_name": "speaker", "aliases": []},
"1f507": {"canonical_name": "mute", "aliases": ["no_sound"]},
"1f509": {"canonical_name": "softer", "aliases": []},
"1f50a": {"canonical_name": "louder", "aliases": ["sound"]},
"1f514": {"canonical_name": "notifications", "aliases": ["bell"]},
"1f515": {"canonical_name": "mute_notifications", "aliases": []},
"1f4e3": {"canonical_name": "megaphone", "aliases": ["shout"]},
"1f4e2": {"canonical_name": "loudspeaker", "aliases": ["bullhorn"]},
"1f4ac": {"canonical_name": "umm", "aliases": ["speech_balloon"]},
"1f5e8": {"canonical_name": "speech_bubble", "aliases": []},
"1f4ad": {"canonical_name": "thought", "aliases": ["dream"]},
"1f5ef": {"canonical_name": "anger_bubble", "aliases": []},
"2660": {"canonical_name": "spades", "aliases": []},
"2663": {"canonical_name": "clubs", "aliases": []},
"2665": {"canonical_name": "hearts", "aliases": []},
"2666": {"canonical_name": "diamonds", "aliases": []},
"1f0cf": {"canonical_name": "joker", "aliases": []},
"1f3b4": {"canonical_name": "playing_cards", "aliases": []},
"1f004": {"canonical_name": "mahjong", "aliases": []},
# The only use I can think of for so many clocks is to be able to use them
# to vote on times and such in emoji reactions. But a) the experience is
# not that great (the images are too small), b) there are issues with
# 24-hour time (used in many countries), like what is 00:30 or 01:00
# called, c) it's hard to make the compose typeahead experience great, and
# d) we should have a dedicated time voting widget that takes care of
# timezone and locale issues, and uses a digital representation.
# '1f550': {'canonical_name': 'X', 'aliases': ['clock1']},
# '1f551': {'canonical_name': 'X', 'aliases': ['clock2']},
# '1f552': {'canonical_name': 'X', 'aliases': ['clock3']},
# '1f553': {'canonical_name': 'X', 'aliases': ['clock4']},
# '1f554': {'canonical_name': 'X', 'aliases': ['clock5']},
# '1f555': {'canonical_name': 'X', 'aliases': ['clock6']},
# '1f556': {'canonical_name': 'X', 'aliases': ['clock7']},
# seems like the best choice for time
"1f557": {"canonical_name": "time", "aliases": ["clock"]},
# '1f558': {'canonical_name': 'X', 'aliases': ['clock9']},
# '1f559': {'canonical_name': 'X', 'aliases': ['clock10']},
# '1f55a': {'canonical_name': 'X', 'aliases': ['clock11']},
# '1f55b': {'canonical_name': 'X', 'aliases': ['clock12']},
# '1f55c': {'canonical_name': 'X', 'aliases': ['clock130']},
# '1f55d': {'canonical_name': 'X', 'aliases': ['clock230']},
# '1f55e': {'canonical_name': 'X', 'aliases': ['clock330']},
# '1f55f': {'canonical_name': 'X', 'aliases': ['clock430']},
# '1f560': {'canonical_name': 'X', 'aliases': ['clock530']},
# '1f561': {'canonical_name': 'X', 'aliases': ['clock630']},
# '1f562': {'canonical_name': 'X', 'aliases': ['clock730']},
# '1f563': {'canonical_name': 'X', 'aliases': ['clock830']},
# '1f564': {'canonical_name': 'X', 'aliases': ['clock930']},
# '1f565': {'canonical_name': 'X', 'aliases': ['clock1030']},
# '1f566': {'canonical_name': 'X', 'aliases': ['clock1130']},
# '1f567': {'canonical_name': 'X', 'aliases': ['clock1230']},
"1f3f3": {"canonical_name": "white_flag", "aliases": ["surrender"]},
"1f3f4": {"canonical_name": "black_flag", "aliases": []},
"1f3c1": {"canonical_name": "checkered_flag", "aliases": ["race", "go", "start"]},
"1f6a9": {"canonical_name": "triangular_flag", "aliases": []},
# solidarity from iemoji
"1f38c": {"canonical_name": "crossed_flags", "aliases": ["solidarity"]},
}
|
punchagan/zulip
|
tools/setup/emoji/emoji_names.py
|
Python
|
apache-2.0
| 95,618
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
from resource_management import *
from resource_management.core.system import System
import os
config = Script.get_config()
hdp_stack_version = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(hdp_stack_version)
stack_is_hdp22_or_further = hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0
#hadoop params
if stack_is_hdp22_or_further:
mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
hadoop_libexec_dir = "/usr/hdp/current/hadoop-client/libexec"
hadoop_lib_home = "/usr/hdp/current/hadoop-client/lib"
hadoop_bin = "/usr/hdp/current/hadoop-client/sbin"
hadoop_home = '/usr/hdp/current/hadoop-client'
else:
mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
hadoop_lib_home = "/usr/lib/hadoop/lib"
hadoop_bin = "/usr/lib/hadoop/sbin"
hadoop_home = '/usr'
current_service = config['serviceName']
hadoop_conf_dir = "/etc/hadoop/conf"
#security params
security_enabled = config['configurations']['cluster-env']['security_enabled']
#users and groups
mapred_user = config['configurations']['mapred-env']['mapred_user']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
yarn_user = config['configurations']['yarn-env']['yarn_user']
user_group = config['configurations']['cluster-env']['user_group']
#hosts
hostname = config["hostname"]
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
rm_host = default("/clusterHostInfo/rm_host", [])
slave_hosts = default("/clusterHostInfo/slave_hosts", [])
hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
oozie_servers = default("/clusterHostInfo/oozie_server", [])
hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
hive_server_host = default("/clusterHostInfo/hive_server_host", [])
hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
hs_host = default("/clusterHostInfo/hs_host", [])
jtnode_host = default("/clusterHostInfo/jtnode_host", [])
namenode_host = default("/clusterHostInfo/namenode_host", [])
zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
has_namenode = not len(namenode_host) == 0
has_resourcemanager = not len(rm_host) == 0
has_slaves = not len(slave_hosts) == 0
has_nagios = not len(hagios_server_hosts) == 0
has_oozie_server = not len(oozie_servers) == 0
has_hcat_server_host = not len(hcat_server_hosts) == 0
has_hive_server_host = not len(hive_server_host) == 0
has_hbase_masters = not len(hbase_master_hosts) == 0
has_zk_host = not len(zk_hosts) == 0
has_ganglia_server = not len(ganglia_server_hosts) == 0
is_namenode_master = hostname in namenode_host
is_jtnode_master = hostname in jtnode_host
is_rmnode_master = hostname in rm_host
is_hsnode_master = hostname in hs_host
is_hbase_master = hostname in hbase_master_hosts
is_slave = hostname in slave_hosts
if has_ganglia_server:
ganglia_server_host = ganglia_server_hosts[0]
#hadoop params
if has_namenode:
hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
#db params
server_db_name = config['hostLevelParams']['db_name']
db_driver_filename = config['hostLevelParams']['db_driver_filename']
oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
ambari_server_resources = config['hostLevelParams']['jdk_location']
oracle_driver_symlink_url = format("{ambari_server_resources}oracle-jdbc-driver.jar")
mysql_driver_symlink_url = format("{ambari_server_resources}mysql-jdbc-driver.jar")
ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url'][0]
ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0]
ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0]
ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]
if has_namenode and 'rca_enabled' in config['configurations']['hadoop-env']:
rca_enabled = config['configurations']['hadoop-env']['rca_enabled']
else:
rca_enabled = False
rca_disabled_prefix = "###"
if rca_enabled == True:
rca_prefix = ""
else:
rca_prefix = rca_disabled_prefix
#hadoop-env.sh
java_home = config['hostLevelParams']['java_home']
if str(config['hostLevelParams']['stack_version']).startswith('2.0') and System.get_instance().os_family != "suse":
# deprecated rhel jsvc_path
jsvc_path = "/usr/libexec/bigtop-utils"
else:
jsvc_path = "/usr/lib/jsvcdaemon"
hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
jtnode_opt_newsize = "200m"
jtnode_opt_maxnewsize = "200m"
jtnode_heapsize = "1024m"
ttnode_heapsize = "1024m"
dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
#log4j.properties
yarn_log_dir_prefix = default("/configurations/yarn-env/yarn_log_dir_prefix","/var/log/hadoop-yarn")
dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
#log4j.properties
if 'hdfs-log4j' in config['configurations'] and 'content' in config['configurations']['hdfs-log4j']:
log4j_props = config['configurations']['hdfs-log4j']['content']
if 'yarn-log4j' in config['configurations'] and 'content' in config['configurations']['yarn-log4j']:
log4j_props += config['configurations']['yarn-log4j']['content']
else:
log4j_props = None
|
keedio/keedio-stacks
|
KEEDIO/1.3/hooks/before-START/scripts/params.py
|
Python
|
apache-2.0
| 7,254
|
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets the current user."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
user_service = client.GetService('UserService', version='v201206')
# Get current user.
user = user_service.GetCurrentUser()[0]
# Display results.
print ('User with id \'%s\', email \'%s\', and role \'%s\' was found.'
% (user['id'], user['email'], user['roleName']))
|
donspaulding/adspygoogle
|
examples/adspygoogle/dfp/v201206/get_current_user.py
|
Python
|
apache-2.0
| 1,409
|
#!/usr/bin/python
"""
Copyright 2014 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import json
import time
import traceback
import base64
from collections import namedtuple
from Crypto.Hash import SHA256 as HashAlg
from Crypto.PublicKey import RSA as CryptoKey
from Crypto import Random
from Crypto.Signature import PKCS1_PSS as CryptoSigner
import logging
from logging import Logger
logging.basicConfig( format='[%(levelname)s] [%(module)s:%(lineno)d] %(message)s' )
logger = logging.getLogger()
logger.setLevel( logging.INFO )
import syndicate.observer.core as observer_core
import syndicate.observer.cred as observer_cred
import syndicate.observer.push as observer_push
CONFIG = observer_core.get_config()
# objects expected by these methods
SyndicatePrincipal = namedtuple("SyndicatePrincipal", ["principal_id", "public_key_pem", "sealed_private_key"])
Volume = namedtuple("Volume", ["name", "owner_id", "description", "blocksize", "private", "archive", "cap_read_data", "cap_write_data", "cap_host_data", "slice_id"] )
VolumeAccessRight = namedtuple( "VolumeAccessRight", ["owner_id", "volume", "cap_read_data", "cap_write_data", "cap_host_data"] )
SliceSecret = namedtuple( "SliceSecret", ["slice_id", "secret"] )
VolumeSlice = namedtuple( "VolumeSlice", ["volume_id", "slice_id", "cap_read_data", "cap_write_data", "cap_host_data", "UG_portnum", "RG_portnum", "credentials_blob"] )
#-------------------------------
def sync_volume_record( volume ):
"""
Synchronize a Volume record with Syndicate.
"""
logger.info( "Sync Volume = %s\n\n" % volume.name )
principal_id = volume.owner_id.email
config = observer_core.get_config()
max_UGs = None
max_RGs = None
volume_principal_id = observer_core.make_volume_principal_id( principal_id, volume.name )
# get the observer secret
try:
max_UGs = CONFIG.SYNDICATE_UG_QUOTA
max_RGs = CONFIG.SYNDICATE_RG_QUOTA
observer_secret = observer_core.get_syndicate_observer_secret( config.SYNDICATE_OBSERVER_SECRET )
except Exception, e:
traceback.print_exc()
logger.error("config is missing SYNDICATE_OBSERVER_SECRET, SYNDICATE_UG_QUOTA, SYNDICATE_RG_QUOTA")
raise e
# volume owner must exist as a Syndicate user...
try:
rc, user = observer_core.ensure_principal_exists( volume_principal_id, observer_secret, is_admin=False, max_UGs=max_UGs, max_RGs=max_RGs)
assert rc == True, "Failed to create or read volume principal '%s'" % volume_principal_id
except Exception, e:
traceback.print_exc()
logger.error("Failed to ensure principal '%s' exists" % volume_principal_id )
raise e
# volume must exist
# create or update the Volume
try:
new_volume = observer_core.ensure_volume_exists( volume_principal_id, volume, user=user )
except Exception, e:
traceback.print_exc()
logger.error("Failed to ensure volume '%s' exists" % volume.name )
raise e
# did we create the Volume?
if new_volume is not None:
# we're good
pass
# otherwise, just update it
else:
try:
rc = observer_core.update_volume( volume )
except Exception, e:
traceback.print_exc()
logger.error("Failed to update volume '%s', exception = %s" % (volume.name, e.message))
raise e
return True
#-------------------------------
def delete_volume_record( volume ):
"""
Delete a volume from Syndicate.
"""
logger.info( "Delete Volume =%s\n\n" % volume.name )
volume_name = volume.name
config = observer_core.get_config()
# delete the Volume on Syndicate.
try:
rc = observer_core.ensure_volume_absent( volume_name )
except Exception, e:
traceback.print_exc()
logger.error("Failed to delete volume %s", volume_name )
raise e
return rc
#-------------------------------
def sync_volumeaccessright_record( vac ):
"""
Synchronize a volume access record
"""
syndicate_caps = "UNKNOWN" # for exception handling
# get arguments
config = observer_core.get_config()
principal_id = vac.owner_id.email
volume_name = vac.volume.name
syndicate_caps = observer_core.opencloud_caps_to_syndicate_caps( vac.cap_read_data, vac.cap_write_data, vac.cap_host_data )
logger.info( "Sync VolumeAccessRight for (%s, %s)" % (principal_id, volume_name) )
# validate config
try:
observer_secret = observer_core.get_syndicate_observer_secret( config.SYNDICATE_OBSERVER_SECRET )
except Exception, e:
traceback.print_exc()
logger.error("syndicatelib config is missing SYNDICATE_RG_DEFAULT_PORT, SYNDICATE_OBSERVER_SECRET")
raise e
# ensure the user exists and has credentials
try:
rc, user = observer_core.ensure_principal_exists( principal_id, observer_secret, is_admin=False, max_UGs=1100, max_RGs=1 )
assert rc is True, "Failed to ensure principal %s exists (rc = %s,%s)" % (principal_id, rc, user)
except Exception, e:
traceback.print_exc()
logger.error("Failed to ensure user '%s' exists" % principal_id )
raise e
# grant the slice-owning user the ability to provision UGs in this Volume
try:
rc = observer_core.ensure_volume_access_right_exists( principal_id, volume_name, syndicate_caps )
assert rc is True, "Failed to set up Volume access right for slice %s in %s" % (principal_id, volume_name)
except Exception, e:
traceback.print_exc()
logger.error("Failed to set up Volume access right for slice %s in %s" % (principal_id, volume_name))
raise e
except Exception, e:
traceback.print_exc()
logger.error("Faoed to ensure user %s can access Volume %s with rights %s" % (principal_id, volume_name, syndicate_caps))
raise e
return True
#-------------------------------
def delete_volumeaccessright_record( vac ):
"""
Ensure that a principal no longer has access to a particular volume.
"""
principal_id = vac.owner_id.email
volume_name = vac.volume.name
try:
observer_core.ensure_volume_access_right_absent( principal_id, volume_name )
except Exception, e:
traceback.print_exc()
logger.error("Failed to revoke access from %s to %s" % (principal_id, volume_name))
raise e
return True
#-------------------------------
def sync_volumeslice_record( vs ):
"""
Synchronize a VolumeSlice record
"""
logger.info("Sync VolumeSlice for (%s, %s)" % (vs.volume_id.name, vs.slice_id.name))
# extract arguments...
principal_id = vs.slice_id.creator.email
slice_name = vs.slice_id.name
volume_name = vs.volume_id.name
syndicate_caps = observer_core.opencloud_caps_to_syndicate_caps( vs.cap_read_data, vs.cap_write_data, vs.cap_host_data )
RG_port = vs.RG_portnum
UG_port = vs.UG_portnum
slice_secret = None
gateway_name_prefix = None
config = observer_core.get_config()
try:
observer_secret = observer_core.get_syndicate_observer_secret( config.SYNDICATE_OBSERVER_SECRET )
RG_closure = config.SYNDICATE_RG_CLOSURE
observer_pkey_path = config.SYNDICATE_OBSERVER_PRIVATE_KEY
syndicate_url = config.SYNDICATE_SMI_URL
gateway_name_prefix = config.SYNDICATE_GATEWAY_NAME_PREFIX
except Exception, e:
traceback.print_exc()
logger.error("syndicatelib config is missing one or more of the following: SYNDICATE_OBSERVER_SECRET, SYNDICATE_RG_CLOSURE, SYNDICATE_OBSERVER_PRIVATE_KEY, SYNDICATE_SMI_URL")
raise e
# get secrets...
try:
observer_pkey_pem = observer_core.get_observer_private_key_pem( observer_pkey_path )
assert observer_pkey_pem is not None, "Failed to load Observer private key"
# get/create the slice secret
slice_secret = observer_core.get_or_create_slice_secret( observer_pkey_pem, slice_name )
assert slice_secret is not None, "Failed to get or create slice secret for %s" % slice_name
except Exception, e:
traceback.print_exc()
logger.error("Failed to load secret credentials")
raise e
# make sure there's a slice-controlled Syndicate user account for the slice owner
slice_principal_id = observer_core.make_slice_principal_id( principal_id, slice_name )
try:
rc, user = observer_core.ensure_principal_exists( slice_principal_id, observer_secret, is_admin=False, max_UGs=1100, max_RGs=1 )
assert rc is True, "Failed to ensure principal %s exists (rc = %s,%s)" % (slice_principal_id, rc, user)
except Exception, e:
traceback.print_exc()
logger.error('Failed to ensure slice user %s exists' % slice_principal_id)
raise e
# grant the slice-owning user the ability to provision UGs in this Volume
try:
rc = observer_core.ensure_volume_access_right_exists( slice_principal_id, volume_name, syndicate_caps )
assert rc is True, "Failed to set up Volume access right for slice %s in %s" % (slice_principal_id, volume_name)
except Exception, e:
traceback.print_exc()
logger.error("Failed to set up Volume access right for slice %s in %s" % (slice_principal_id, volume_name))
raise e
# provision for the user the (single) RG the slice will instantiate in each VM.
try:
rc = observer_core.setup_global_RG( slice_principal_id, volume_name, gateway_name_prefix, slice_secret, RG_port, RG_closure )
except Exception, e:
logger.exception(e)
return False
# generate and save slice credentials....
try:
slice_cred = observer_core.save_slice_credentials( observer_pkey_pem, syndicate_url, slice_principal_id, volume_name, slice_name, observer_secret, slice_secret,
instantiate_UG=True, run_UG=True, UG_port=UG_port, UG_closure=None,
instantiate_RG=None, run_RG=True, RG_port=RG_port, RG_closure=None, RG_global_hostname="localhost",
instantiate_AG=None, run_AG=None, AG_port=0, AG_closure=None,
gateway_name_prefix=gateway_name_prefix,
existing_user=user )
assert slice_cred is not None, "Failed to generate slice credential for %s in %s" % (slice_principal_id, volume_name )
except Exception, e:
traceback.print_exc()
logger.error("Failed to generate slice credential for %s in %s" % (slice_principal_id, volume_name))
raise e
# ... and push them all out.
try:
rc = observer_push.push_credentials_to_slice( slice_name, slice_cred )
assert rc is True, "Failed to push credentials to slice %s for volume %s" % (slice_name, volume_name)
except Exception, e:
traceback.print_exc()
logger.error("Failed to push slice credentials to %s for volume %s" % (slice_name, volume_name))
raise e
return True
#-------------------------------
def delete_volumeslice_record( vs ):
"""
Unmount a volume from a slice.
That is, prevent the slice from mounting it, by revoking the slice's principal's permissions and deleting its gateways.
"""
principal_id = vs.slice_id.creator.email
slice_name = vs.slice_id.name
volume_name = vs.volume_id.name
slice_principal_id = observer_core.make_slice_principal_id( principal_id, slice_name )
try:
observer_core.revoke_volume_access( slice_principal_id, volume_name )
except Exception, e:
traceback.print_exc()
logger.error("Failed to remove slice principal %s from %s" % (slice_principal_id, volume_name))
raise e
return True
|
jcnelson/syndicate
|
python/syndicate/observer/sync.py
|
Python
|
apache-2.0
| 12,416
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import os
import subprocess
import uuid
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
import saml2
from saml2 import client_base
from saml2 import md
from saml2.profile import ecp
from saml2 import saml
from saml2 import samlp
from saml2.schema import soapenv
from saml2 import sigver
import xmldsig
from keystone import exception
from keystone.i18n import _, _LE
from keystone.openstack.common import fileutils
LOG = log.getLogger(__name__)
CONF = cfg.CONF
class SAMLGenerator(object):
"""A class to generate SAML assertions."""
def __init__(self):
self.assertion_id = uuid.uuid4().hex
def samlize_token(self, issuer, recipient, user, roles, project,
expires_in=None):
"""Convert Keystone attributes to a SAML assertion.
:param issuer: URL of the issuing party
:type issuer: string
:param recipient: URL of the recipient
:type recipient: string
:param user: User name
:type user: string
:param roles: List of role names
:type roles: list
:param project: Project name
:type project: string
:param expires_in: Sets how long the assertion is valid for, in seconds
:type expires_in: int
:return: XML <Response> object
"""
expiration_time = self._determine_expiration_time(expires_in)
status = self._create_status()
saml_issuer = self._create_issuer(issuer)
subject = self._create_subject(user, expiration_time, recipient)
attribute_statement = self._create_attribute_statement(user, roles,
project)
authn_statement = self._create_authn_statement(issuer, expiration_time)
signature = self._create_signature()
assertion = self._create_assertion(saml_issuer, signature,
subject, authn_statement,
attribute_statement)
assertion = _sign_assertion(assertion)
response = self._create_response(saml_issuer, status, assertion,
recipient)
return response
def _determine_expiration_time(self, expires_in):
if expires_in is None:
expires_in = CONF.saml.assertion_expiration_time
now = timeutils.utcnow()
future = now + datetime.timedelta(seconds=expires_in)
return timeutils.isotime(future, subsecond=True)
def _create_status(self):
"""Create an object that represents a SAML Status.
<ns0:Status xmlns:ns0="urn:oasis:names:tc:SAML:2.0:protocol">
<ns0:StatusCode
Value="urn:oasis:names:tc:SAML:2.0:status:Success" />
</ns0:Status>
:return: XML <Status> object
"""
status = samlp.Status()
status_code = samlp.StatusCode()
status_code.value = samlp.STATUS_SUCCESS
status_code.set_text('')
status.status_code = status_code
return status
def _create_issuer(self, issuer_url):
"""Create an object that represents a SAML Issuer.
<ns0:Issuer
xmlns:ns0="urn:oasis:names:tc:SAML:2.0:assertion"
Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">
https://acme.com/FIM/sps/openstack/saml20</ns0:Issuer>
:return: XML <Issuer> object
"""
issuer = saml.Issuer()
issuer.format = saml.NAMEID_FORMAT_ENTITY
issuer.set_text(issuer_url)
return issuer
def _create_subject(self, user, expiration_time, recipient):
"""Create an object that represents a SAML Subject.
<ns0:Subject>
<ns0:NameID>
john@smith.com</ns0:NameID>
<ns0:SubjectConfirmation
Method="urn:oasis:names:tc:SAML:2.0:cm:bearer">
<ns0:SubjectConfirmationData
NotOnOrAfter="2014-08-19T11:53:57.243106Z"
Recipient="http://beta.com/Shibboleth.sso/SAML2/POST" />
</ns0:SubjectConfirmation>
</ns0:Subject>
:return: XML <Subject> object
"""
name_id = saml.NameID()
name_id.set_text(user)
subject_conf_data = saml.SubjectConfirmationData()
subject_conf_data.recipient = recipient
subject_conf_data.not_on_or_after = expiration_time
subject_conf = saml.SubjectConfirmation()
subject_conf.method = saml.SCM_BEARER
subject_conf.subject_confirmation_data = subject_conf_data
subject = saml.Subject()
subject.subject_confirmation = subject_conf
subject.name_id = name_id
return subject
def _create_attribute_statement(self, user, roles, project):
"""Create an object that represents a SAML AttributeStatement.
<ns0:AttributeStatement>
<ns0:Attribute Name="openstack_user">
<ns0:AttributeValue
xsi:type="xs:string">test_user</ns0:AttributeValue>
</ns0:Attribute>
<ns0:Attribute Name="openstack_roles">
<ns0:AttributeValue
xsi:type="xs:string">admin</ns0:AttributeValue>
<ns0:AttributeValue
xsi:type="xs:string">member</ns0:AttributeValue>
</ns0:Attribute>
<ns0:Attribute Name="openstack_project">
<ns0:AttributeValue
xsi:type="xs:string">development</ns0:AttributeValue>
</ns0:Attribute>
</ns0:AttributeStatement>
:return: XML <AttributeStatement> object
"""
openstack_user = 'openstack_user'
user_attribute = saml.Attribute()
user_attribute.name = openstack_user
user_value = saml.AttributeValue()
user_value.set_text(user)
user_attribute.attribute_value = user_value
openstack_roles = 'openstack_roles'
roles_attribute = saml.Attribute()
roles_attribute.name = openstack_roles
for role in roles:
role_value = saml.AttributeValue()
role_value.set_text(role)
roles_attribute.attribute_value.append(role_value)
openstack_project = 'openstack_project'
project_attribute = saml.Attribute()
project_attribute.name = openstack_project
project_value = saml.AttributeValue()
project_value.set_text(project)
project_attribute.attribute_value = project_value
attribute_statement = saml.AttributeStatement()
attribute_statement.attribute.append(user_attribute)
attribute_statement.attribute.append(roles_attribute)
attribute_statement.attribute.append(project_attribute)
return attribute_statement
def _create_authn_statement(self, issuer, expiration_time):
"""Create an object that represents a SAML AuthnStatement.
<ns0:AuthnStatement xmlns:ns0="urn:oasis:names:tc:SAML:2.0:assertion"
AuthnInstant="2014-07-30T03:04:25Z" SessionIndex="47335964efb"
SessionNotOnOrAfter="2014-07-30T03:04:26Z">
<ns0:AuthnContext>
<ns0:AuthnContextClassRef>
urn:oasis:names:tc:SAML:2.0:ac:classes:Password
</ns0:AuthnContextClassRef>
<ns0:AuthenticatingAuthority>
https://acme.com/FIM/sps/openstack/saml20
</ns0:AuthenticatingAuthority>
</ns0:AuthnContext>
</ns0:AuthnStatement>
:return: XML <AuthnStatement> object
"""
authn_statement = saml.AuthnStatement()
authn_statement.authn_instant = timeutils.isotime()
authn_statement.session_index = uuid.uuid4().hex
authn_statement.session_not_on_or_after = expiration_time
authn_context = saml.AuthnContext()
authn_context_class = saml.AuthnContextClassRef()
authn_context_class.set_text(saml.AUTHN_PASSWORD)
authn_authority = saml.AuthenticatingAuthority()
authn_authority.set_text(issuer)
authn_context.authn_context_class_ref = authn_context_class
authn_context.authenticating_authority = authn_authority
authn_statement.authn_context = authn_context
return authn_statement
def _create_assertion(self, issuer, signature, subject, authn_statement,
attribute_statement):
"""Create an object that represents a SAML Assertion.
<ns0:Assertion
ID="35daed258ba647ba8962e9baff4d6a46"
IssueInstant="2014-06-11T15:45:58Z"
Version="2.0">
<ns0:Issuer> ... </ns0:Issuer>
<ns1:Signature> ... </ns1:Signature>
<ns0:Subject> ... </ns0:Subject>
<ns0:AuthnStatement> ... </ns0:AuthnStatement>
<ns0:AttributeStatement> ... </ns0:AttributeStatement>
</ns0:Assertion>
:return: XML <Assertion> object
"""
assertion = saml.Assertion()
assertion.id = self.assertion_id
assertion.issue_instant = timeutils.isotime()
assertion.version = '2.0'
assertion.issuer = issuer
assertion.signature = signature
assertion.subject = subject
assertion.authn_statement = authn_statement
assertion.attribute_statement = attribute_statement
return assertion
def _create_response(self, issuer, status, assertion, recipient):
"""Create an object that represents a SAML Response.
<ns0:Response
Destination="http://beta.com/Shibboleth.sso/SAML2/POST"
ID="c5954543230e4e778bc5b92923a0512d"
IssueInstant="2014-07-30T03:19:45Z"
Version="2.0" />
<ns0:Issuer> ... </ns0:Issuer>
<ns0:Assertion> ... </ns0:Assertion>
<ns0:Status> ... </ns0:Status>
</ns0:Response>
:return: XML <Response> object
"""
response = samlp.Response()
response.id = uuid.uuid4().hex
response.destination = recipient
response.issue_instant = timeutils.isotime()
response.version = '2.0'
response.issuer = issuer
response.status = status
response.assertion = assertion
return response
def _create_signature(self):
"""Create an object that represents a SAML <Signature>.
This must be filled with algorithms that the signing binary will apply
in order to sign the whole message.
Currently we enforce X509 signing.
Example of the template::
<Signature xmlns="http://www.w3.org/2000/09/xmldsig#">
<SignedInfo>
<CanonicalizationMethod
Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/>
<SignatureMethod
Algorithm="http://www.w3.org/2000/09/xmldsig#rsa-sha1"/>
<Reference URI="#<Assertion ID>">
<Transforms>
<Transform
Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature"/>
<Transform Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/>
</Transforms>
<DigestMethod Algorithm="http://www.w3.org/2000/09/xmldsig#sha1"/>
<DigestValue />
</Reference>
</SignedInfo>
<SignatureValue />
<KeyInfo>
<X509Data />
</KeyInfo>
</Signature>
:return: XML <Signature> object
"""
canonicalization_method = xmldsig.CanonicalizationMethod()
canonicalization_method.algorithm = xmldsig.ALG_EXC_C14N
signature_method = xmldsig.SignatureMethod(
algorithm=xmldsig.SIG_RSA_SHA1)
transforms = xmldsig.Transforms()
envelope_transform = xmldsig.Transform(
algorithm=xmldsig.TRANSFORM_ENVELOPED)
c14_transform = xmldsig.Transform(algorithm=xmldsig.ALG_EXC_C14N)
transforms.transform = [envelope_transform, c14_transform]
digest_method = xmldsig.DigestMethod(algorithm=xmldsig.DIGEST_SHA1)
digest_value = xmldsig.DigestValue()
reference = xmldsig.Reference()
reference.uri = '#' + self.assertion_id
reference.digest_method = digest_method
reference.digest_value = digest_value
reference.transforms = transforms
signed_info = xmldsig.SignedInfo()
signed_info.canonicalization_method = canonicalization_method
signed_info.signature_method = signature_method
signed_info.reference = reference
key_info = xmldsig.KeyInfo()
key_info.x509_data = xmldsig.X509Data()
signature = xmldsig.Signature()
signature.signed_info = signed_info
signature.signature_value = xmldsig.SignatureValue()
signature.key_info = key_info
return signature
def _sign_assertion(assertion):
"""Sign a SAML assertion.
This method utilizes ``xmlsec1`` binary and signs SAML assertions in a
separate process. ``xmlsec1`` cannot read input data from stdin so the
prepared assertion needs to be serialized and stored in a temporary
file. This file will be deleted immediately after ``xmlsec1`` returns.
The signed assertion is redirected to a standard output and read using
subprocess.PIPE redirection. A ``saml.Assertion`` class is created
from the signed string again and returned.
Parameters that are required in the CONF::
* xmlsec_binary
* private key file path
* public key file path
:return: XML <Assertion> object
"""
xmlsec_binary = CONF.saml.xmlsec1_binary
idp_private_key = CONF.saml.keyfile
idp_public_key = CONF.saml.certfile
# xmlsec1 --sign --privkey-pem privkey,cert --id-attr:ID <tag> <file>
certificates = '%(idp_private_key)s,%(idp_public_key)s' % {
'idp_public_key': idp_public_key,
'idp_private_key': idp_private_key
}
command_list = [xmlsec_binary, '--sign', '--privkey-pem', certificates,
'--id-attr:ID', 'Assertion']
try:
# NOTE(gyee): need to make the namespace prefixes explicit so
# they won't get reassigned when we wrap the assertion into
# SAML2 response
file_path = fileutils.write_to_tempfile(assertion.to_string(
nspair={'saml': saml2.NAMESPACE,
'xmldsig': xmldsig.NAMESPACE}))
command_list.append(file_path)
stdout = subprocess.check_output(command_list)
except Exception as e:
msg = _LE('Error when signing assertion, reason: %(reason)s')
msg = msg % {'reason': e}
LOG.error(msg)
raise exception.SAMLSigningError(reason=e)
finally:
try:
os.remove(file_path)
except OSError:
pass
return saml2.create_class_from_xml_string(saml.Assertion, stdout)
class MetadataGenerator(object):
"""A class for generating SAML IdP Metadata."""
def generate_metadata(self):
"""Generate Identity Provider Metadata.
Generate and format metadata into XML that can be exposed and
consumed by a federated Service Provider.
:return: XML <EntityDescriptor> object.
:raises: keystone.exception.ValidationError: Raises if the required
config options aren't set.
"""
self._ensure_required_values_present()
entity_descriptor = self._create_entity_descriptor()
entity_descriptor.idpsso_descriptor = (
self._create_idp_sso_descriptor())
return entity_descriptor
def _create_entity_descriptor(self):
ed = md.EntityDescriptor()
ed.entity_id = CONF.saml.idp_entity_id
return ed
def _create_idp_sso_descriptor(self):
def get_cert():
try:
return sigver.read_cert_from_file(CONF.saml.certfile, 'pem')
except (IOError, sigver.CertificateError) as e:
msg = _('Cannot open certificate %(cert_file)s. '
'Reason: %(reason)s')
msg = msg % {'cert_file': CONF.saml.certfile, 'reason': e}
LOG.error(msg)
raise IOError(msg)
def key_descriptor():
cert = get_cert()
return md.KeyDescriptor(
key_info=xmldsig.KeyInfo(
x509_data=xmldsig.X509Data(
x509_certificate=xmldsig.X509Certificate(text=cert)
)
), use='signing'
)
def single_sign_on_service():
idp_sso_endpoint = CONF.saml.idp_sso_endpoint
return md.SingleSignOnService(
binding=saml2.BINDING_URI,
location=idp_sso_endpoint)
def organization():
name = md.OrganizationName(lang=CONF.saml.idp_lang,
text=CONF.saml.idp_organization_name)
display_name = md.OrganizationDisplayName(
lang=CONF.saml.idp_lang,
text=CONF.saml.idp_organization_display_name)
url = md.OrganizationURL(lang=CONF.saml.idp_lang,
text=CONF.saml.idp_organization_url)
return md.Organization(
organization_display_name=display_name,
organization_url=url, organization_name=name)
def contact_person():
company = md.Company(text=CONF.saml.idp_contact_company)
given_name = md.GivenName(text=CONF.saml.idp_contact_name)
surname = md.SurName(text=CONF.saml.idp_contact_surname)
email = md.EmailAddress(text=CONF.saml.idp_contact_email)
telephone = md.TelephoneNumber(
text=CONF.saml.idp_contact_telephone)
contact_type = CONF.saml.idp_contact_type
return md.ContactPerson(
company=company, given_name=given_name, sur_name=surname,
email_address=email, telephone_number=telephone,
contact_type=contact_type)
def name_id_format():
return md.NameIDFormat(text=saml.NAMEID_FORMAT_TRANSIENT)
idpsso = md.IDPSSODescriptor()
idpsso.protocol_support_enumeration = samlp.NAMESPACE
idpsso.key_descriptor = key_descriptor()
idpsso.single_sign_on_service = single_sign_on_service()
idpsso.name_id_format = name_id_format()
if self._check_organization_values():
idpsso.organization = organization()
if self._check_contact_person_values():
idpsso.contact_person = contact_person()
return idpsso
def _ensure_required_values_present(self):
"""Ensure idp_sso_endpoint and idp_entity_id have values."""
if CONF.saml.idp_entity_id is None:
msg = _('Ensure configuration option idp_entity_id is set.')
raise exception.ValidationError(msg)
if CONF.saml.idp_sso_endpoint is None:
msg = _('Ensure configuration option idp_sso_endpoint is set.')
raise exception.ValidationError(msg)
def _check_contact_person_values(self):
"""Determine if contact information is included in metadata."""
# Check if we should include contact information
params = [CONF.saml.idp_contact_company,
CONF.saml.idp_contact_name,
CONF.saml.idp_contact_surname,
CONF.saml.idp_contact_email,
CONF.saml.idp_contact_telephone]
for value in params:
if value is None:
return False
# Check if contact type is an invalid value
valid_type_values = ['technical', 'other', 'support', 'administrative',
'billing']
if CONF.saml.idp_contact_type not in valid_type_values:
msg = _('idp_contact_type must be one of: [technical, other, '
'support, administrative or billing.')
raise exception.ValidationError(msg)
return True
def _check_organization_values(self):
"""Determine if organization information is included in metadata."""
params = [CONF.saml.idp_organization_name,
CONF.saml.idp_organization_display_name,
CONF.saml.idp_organization_url]
for value in params:
if value is None:
return False
return True
class ECPGenerator(object):
"""A class for generating an ECP assertion."""
@staticmethod
def generate_ecp(saml_assertion, relay_state_prefix):
ecp_generator = ECPGenerator()
header = ecp_generator._create_header(relay_state_prefix)
body = ecp_generator._create_body(saml_assertion)
envelope = soapenv.Envelope(header=header, body=body)
return envelope
def _create_header(self, relay_state_prefix):
relay_state_text = relay_state_prefix + uuid.uuid4().hex
relay_state = ecp.RelayState(actor=client_base.ACTOR,
must_understand='1',
text=relay_state_text)
header = soapenv.Header()
header.extension_elements = (
[saml2.element_to_extension_element(relay_state)])
return header
def _create_body(self, saml_assertion):
body = soapenv.Body()
body.extension_elements = (
[saml2.element_to_extension_element(saml_assertion)])
return body
|
jumpstarter-io/keystone
|
keystone/contrib/federation/idp.py
|
Python
|
apache-2.0
| 22,080
|
#!/router/bin/python
import trex_client
from jsonrpclib import ProtocolError, AppError
class CTRexAdvClient(trex_client.CTRexClient):
def __init__ (self, trex_host, max_history_size = 100, trex_daemon_port = 8090, trex_zmq_port = 4500, verbose = False):
super(CTRexAdvClient, self).__init__(trex_host, max_history_size, trex_daemon_port, trex_zmq_port, verbose)
pass
# TRex KIWI advanced methods
def start_quick_trex(self, pcap_file, d, delay, dual, ipv6, times, interfaces):
try:
return self.server.start_quick_trex(pcap_file = pcap_file, duration = d, dual = dual, delay = delay, ipv6 = ipv6, times = times, interfaces = interfaces)
except AppError as err:
self.__handle_AppError_exception(err.args[0])
except ProtocolError:
raise
finally:
self.prompt_verbose_data()
def stop_quick_trex(self):
try:
return self.server.stop_quick_trex()
except AppError as err:
self.__handle_AppError_exception(err.args[0])
except ProtocolError:
raise
finally:
self.prompt_verbose_data()
# def is_running(self):
# pass
def get_running_stats(self):
try:
return self.server.get_running_stats()
except AppError as err:
self.__handle_AppError_exception(err.args[0])
except ProtocolError:
raise
finally:
self.prompt_verbose_data()
def clear_counters(self):
try:
return self.server.clear_counters()
except AppError as err:
self.__handle_AppError_exception(err.args[0])
except ProtocolError:
raise
finally:
self.prompt_verbose_data()
if __name__ == "__main__":
trex = CTRexAdvClient('trex-dan', trex_daemon_port = 8383, verbose = True)
print trex.start_quick_trex(delay = 10,
dual = True,
d = 20,
interfaces = ["gig0/0/1", "gig0/0/2"],
ipv6 = False,
pcap_file="avl/http_browsing.pcap",
times=3)
print trex.stop_quick_trex()
print trex.get_running_stats()
print trex.clear_counters()
pass
|
dproc/trex_odp_porting_integration
|
scripts/automation/trex_control_plane/client/trex_adv_client.py
|
Python
|
apache-2.0
| 2,221
|
import random
import re
from email.headerregistry import Address
from typing import List, Sequence
from unittest.mock import patch
import ldap
import ujson
from django.conf import settings
from django.core import mail
from django.test import override_settings
from django_auth_ldap.config import LDAPSearch
from zerver.lib.actions import do_change_notification_settings, do_change_user_role
from zerver.lib.email_notifications import (
enqueue_welcome_emails,
fix_emojis,
handle_missedmessage_emails,
relative_to_full_url,
)
from zerver.lib.send_email import FromAddress, send_custom_email
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import ScheduledEmail, UserProfile, get_realm, get_stream
class TestCustomEmails(ZulipTestCase):
def test_send_custom_email_argument(self) -> None:
hamlet = self.example_user('hamlet')
email_subject = 'subject_test'
reply_to = 'reply_to_test'
from_name = "from_name_test"
markdown_template_path = "templates/zerver/emails/email_base_default.source.html"
send_custom_email([hamlet], {
"markdown_template_path": markdown_template_path,
"reply_to": reply_to,
"subject": email_subject,
"from_name": from_name,
})
self.assertEqual(len(mail.outbox), 1)
msg = mail.outbox[0]
self.assertEqual(msg.subject, email_subject)
self.assertEqual(len(msg.reply_to), 1)
self.assertEqual(msg.reply_to[0], reply_to)
self.assertNotIn("{% block content %}", msg.body)
def test_send_custom_email_headers(self) -> None:
hamlet = self.example_user('hamlet')
markdown_template_path = "zerver/tests/fixtures/email/custom_emails/email_base_headers_test.source.html"
send_custom_email([hamlet], {
"markdown_template_path": markdown_template_path,
})
self.assertEqual(len(mail.outbox), 1)
msg = mail.outbox[0]
self.assertEqual(msg.subject, "Test Subject")
self.assertFalse(msg.reply_to)
self.assertEqual('Test body', msg.body)
def test_send_custom_email_no_argument(self) -> None:
hamlet = self.example_user('hamlet')
from_name = "from_name_test"
email_subject = 'subject_test'
markdown_template_path = "zerver/tests/fixtures/email/custom_emails/email_base_headers_no_headers_test.source.html"
from zerver.lib.send_email import NoEmailArgumentException
self.assertRaises(NoEmailArgumentException, send_custom_email, [hamlet], {
"markdown_template_path": markdown_template_path,
"from_name": from_name,
})
self.assertRaises(NoEmailArgumentException, send_custom_email, [hamlet], {
"markdown_template_path": markdown_template_path,
"subject": email_subject,
})
def test_send_custom_email_doubled_arguments(self) -> None:
hamlet = self.example_user('hamlet')
from_name = "from_name_test"
email_subject = 'subject_test'
markdown_template_path = "zerver/tests/fixtures/email/custom_emails/email_base_headers_test.source.html"
from zerver.lib.send_email import DoubledEmailArgumentException
self.assertRaises(DoubledEmailArgumentException, send_custom_email, [hamlet], {
"markdown_template_path": markdown_template_path,
"subject": email_subject,
})
self.assertRaises(DoubledEmailArgumentException, send_custom_email, [hamlet], {
"markdown_template_path": markdown_template_path,
"from_name": from_name,
})
def test_send_custom_email_admins_only(self) -> None:
admin_user = self.example_user('hamlet')
do_change_user_role(admin_user, UserProfile.ROLE_REALM_ADMINISTRATOR)
non_admin_user = self.example_user('cordelia')
markdown_template_path = "zerver/tests/fixtures/email/custom_emails/email_base_headers_test.source.html"
send_custom_email([admin_user, non_admin_user], {
"markdown_template_path": markdown_template_path,
"admins_only": True,
})
self.assertEqual(len(mail.outbox), 1)
self.assertIn(admin_user.delivery_email, mail.outbox[0].to[0])
class TestFollowupEmails(ZulipTestCase):
def test_day1_email_context(self) -> None:
hamlet = self.example_user("hamlet")
enqueue_welcome_emails(hamlet)
scheduled_emails = ScheduledEmail.objects.filter(users=hamlet)
email_data = ujson.loads(scheduled_emails[0].data)
self.assertEqual(email_data["context"]["email"], self.example_email("hamlet"))
self.assertEqual(email_data["context"]["is_realm_admin"], False)
self.assertEqual(email_data["context"]["getting_started_link"], "https://zulip.com")
self.assertNotIn("ldap_username", email_data["context"])
ScheduledEmail.objects.all().delete()
iago = self.example_user("iago")
enqueue_welcome_emails(iago)
scheduled_emails = ScheduledEmail.objects.filter(users=iago)
email_data = ujson.loads(scheduled_emails[0].data)
self.assertEqual(email_data["context"]["email"], self.example_email("iago"))
self.assertEqual(email_data["context"]["is_realm_admin"], True)
self.assertEqual(email_data["context"]["getting_started_link"],
"http://zulip.testserver/help/getting-your-organization-started-with-zulip")
self.assertNotIn("ldap_username", email_data["context"])
# See https://zulip.readthedocs.io/en/latest/production/authentication-methods.html#ldap-including-active-directory
# for case details.
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'),
# configure email search for email address in the uid attribute:
AUTH_LDAP_REVERSE_EMAIL_SEARCH=LDAPSearch("ou=users,dc=zulip,dc=com",
ldap.SCOPE_ONELEVEL,
"(uid=%(email)s)"))
def test_day1_email_ldap_case_a_login_credentials(self) -> None:
self.init_default_ldap_database()
ldap_user_attr_map = {'full_name': 'cn', 'short_name': 'sn'}
with self.settings(AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map):
self.login_with_return("newuser_email_as_uid@zulip.com",
self.ldap_password("newuser_email_as_uid@zulip.com"))
user = UserProfile.objects.get(delivery_email="newuser_email_as_uid@zulip.com")
scheduled_emails = ScheduledEmail.objects.filter(users=user)
self.assertEqual(len(scheduled_emails), 2)
email_data = ujson.loads(scheduled_emails[0].data)
self.assertEqual(email_data["context"]["ldap"], True)
self.assertEqual(email_data["context"]["ldap_username"], "newuser_email_as_uid@zulip.com")
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_day1_email_ldap_case_b_login_credentials(self) -> None:
self.init_default_ldap_database()
ldap_user_attr_map = {'full_name': 'cn', 'short_name': 'sn'}
with self.settings(
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
):
self.login_with_return("newuser@zulip.com", self.ldap_password("newuser"))
user = UserProfile.objects.get(delivery_email="newuser@zulip.com")
scheduled_emails = ScheduledEmail.objects.filter(users=user)
self.assertEqual(len(scheduled_emails), 2)
email_data = ujson.loads(scheduled_emails[0].data)
self.assertEqual(email_data["context"]["ldap"], True)
self.assertEqual(email_data["context"]["ldap_username"], "newuser")
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_day1_email_ldap_case_c_login_credentials(self) -> None:
self.init_default_ldap_database()
ldap_user_attr_map = {'full_name': 'cn', 'short_name': 'sn'}
with self.settings(
LDAP_EMAIL_ATTR='mail',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
):
self.login_with_return("newuser_with_email", self.ldap_password("newuser_with_email"))
user = UserProfile.objects.get(delivery_email="newuser_email@zulip.com")
scheduled_emails = ScheduledEmail.objects.filter(users=user)
self.assertEqual(len(scheduled_emails), 2)
email_data = ujson.loads(scheduled_emails[0].data)
self.assertEqual(email_data["context"]["ldap"], True)
self.assertEqual(email_data["context"]["ldap_username"], "newuser_with_email")
def test_followup_emails_count(self) -> None:
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
enqueue_welcome_emails(self.example_user("hamlet"))
# Hamlet has account only in Zulip realm so both day1 and day2 emails should be sent
scheduled_emails = ScheduledEmail.objects.filter(users=hamlet).order_by(
"scheduled_timestamp")
self.assertEqual(2, len(scheduled_emails))
self.assertEqual(ujson.loads(scheduled_emails[1].data)["template_prefix"], 'zerver/emails/followup_day2')
self.assertEqual(ujson.loads(scheduled_emails[0].data)["template_prefix"], 'zerver/emails/followup_day1')
ScheduledEmail.objects.all().delete()
enqueue_welcome_emails(cordelia)
scheduled_emails = ScheduledEmail.objects.filter(users=cordelia)
# Cordelia has account in more than 1 realm so day2 email should not be sent
self.assertEqual(len(scheduled_emails), 1)
email_data = ujson.loads(scheduled_emails[0].data)
self.assertEqual(email_data["template_prefix"], 'zerver/emails/followup_day1')
class TestMissedMessages(ZulipTestCase):
def normalize_string(self, s: str) -> str:
s = s.strip()
return re.sub(r'\s+', ' ', s)
def _get_tokens(self) -> List[str]:
return ['mm' + str(random.getrandbits(32)) for _ in range(30)]
def _test_cases(self, msg_id: int, verify_body_include: List[str], email_subject: str,
send_as_user: bool, verify_html_body: bool=False,
show_message_content: bool=True,
verify_body_does_not_include: Sequence[str]=[],
trigger: str='') -> None:
othello = self.example_user('othello')
hamlet = self.example_user('hamlet')
tokens = self._get_tokens()
with patch('zerver.lib.email_mirror.generate_missed_message_token', side_effect=tokens):
handle_missedmessage_emails(hamlet.id, [{'message_id': msg_id, 'trigger': trigger}])
if settings.EMAIL_GATEWAY_PATTERN != "":
reply_to_addresses = [settings.EMAIL_GATEWAY_PATTERN % (t,) for t in tokens]
reply_to_emails = [str(Address(display_name="Zulip", addr_spec=address)) for address in reply_to_addresses]
else:
reply_to_emails = ["noreply@testserver"]
msg = mail.outbox[0]
from_email = str(Address(display_name="Zulip missed messages", addr_spec=FromAddress.NOREPLY))
self.assertEqual(len(mail.outbox), 1)
if send_as_user:
from_email = f'"{othello.full_name}" <{othello.email}>'
self.assertEqual(msg.from_email, from_email)
self.assertEqual(msg.subject, email_subject)
self.assertEqual(len(msg.reply_to), 1)
self.assertIn(msg.reply_to[0], reply_to_emails)
if verify_html_body:
for text in verify_body_include:
self.assertIn(text, self.normalize_string(msg.alternatives[0][0]))
else:
for text in verify_body_include:
self.assertIn(text, self.normalize_string(msg.body))
for text in verify_body_does_not_include:
self.assertNotIn(text, self.normalize_string(msg.body))
def _realm_name_in_missed_message_email_subject(self, realm_name_in_notifications: bool) -> None:
msg_id = self.send_personal_message(
self.example_user('othello'),
self.example_user('hamlet'),
'Extremely personal message!',
)
verify_body_include = ['Extremely personal message!']
email_subject = 'PMs with Othello, the Moor of Venice'
if realm_name_in_notifications:
email_subject = 'PMs with Othello, the Moor of Venice [Zulip Dev]'
self._test_cases(msg_id, verify_body_include, email_subject, False)
def _extra_context_in_missed_stream_messages_mention(self, send_as_user: bool,
show_message_content: bool=True) -> None:
for i in range(0, 11):
self.send_stream_message(self.example_user('othello'), "Denmark", content=str(i))
self.send_stream_message(
self.example_user('othello'), "Denmark",
'11', topic_name='test2')
msg_id = self.send_stream_message(
self.example_user('othello'), "denmark",
'@**King Hamlet**')
if show_message_content:
verify_body_include = [
"Othello, the Moor of Venice: 1 2 3 4 5 6 7 8 9 10 @**King Hamlet** -- ",
"You are receiving this because you were mentioned in Zulip Dev.",
]
email_subject = '#Denmark > test'
verify_body_does_not_include: List[str] = []
else:
# Test in case if message content in missed email message are disabled.
verify_body_include = [
"This email does not include message content because you have disabled message ",
"http://zulip.testserver/help/pm-mention-alert-notifications ",
"View or reply in Zulip",
" Manage email preferences: http://zulip.testserver/#settings/notifications",
]
email_subject = 'New missed messages'
verify_body_does_not_include = ['Denmark > test', 'Othello, the Moor of Venice',
'1 2 3 4 5 6 7 8 9 10 @**King Hamlet**', 'private', 'group',
'Reply to this email directly, or view it in Zulip']
self._test_cases(msg_id, verify_body_include, email_subject, send_as_user,
show_message_content=show_message_content,
verify_body_does_not_include=verify_body_does_not_include,
trigger='mentioned')
def _extra_context_in_missed_stream_messages_wildcard_mention(self, send_as_user: bool,
show_message_content: bool=True) -> None:
for i in range(1, 6):
self.send_stream_message(self.example_user('othello'), "Denmark", content=str(i))
self.send_stream_message(
self.example_user('othello'), "Denmark",
'11', topic_name='test2')
msg_id = self.send_stream_message(
self.example_user('othello'), "denmark",
'@**all**')
if show_message_content:
verify_body_include = [
"Othello, the Moor of Venice: 1 2 3 4 5 @**all** -- ",
"You are receiving this because you were mentioned in Zulip Dev.",
]
email_subject = '#Denmark > test'
verify_body_does_not_include: List[str] = []
else:
# Test in case if message content in missed email message are disabled.
verify_body_include = [
"This email does not include message content because you have disabled message ",
"http://zulip.testserver/help/pm-mention-alert-notifications ",
"View or reply in Zulip",
" Manage email preferences: http://zulip.testserver/#settings/notifications",
]
email_subject = 'New missed messages'
verify_body_does_not_include = ['Denmark > test', 'Othello, the Moor of Venice',
'1 2 3 4 5 @**all**', 'private', 'group',
'Reply to this email directly, or view it in Zulip']
self._test_cases(msg_id, verify_body_include, email_subject, send_as_user,
show_message_content=show_message_content,
verify_body_does_not_include=verify_body_does_not_include,
trigger='wildcard_mentioned')
def _extra_context_in_missed_stream_messages_email_notify(self, send_as_user: bool) -> None:
for i in range(0, 11):
self.send_stream_message(self.example_user('othello'), "Denmark", content=str(i))
self.send_stream_message(
self.example_user('othello'), "Denmark",
'11', topic_name='test2')
msg_id = self.send_stream_message(
self.example_user('othello'), "denmark",
'12')
verify_body_include = [
"Othello, the Moor of Venice: 1 2 3 4 5 6 7 8 9 10 12 -- ",
"You are receiving this because you have email notifications enabled for this stream.",
]
email_subject = '#Denmark > test'
self._test_cases(msg_id, verify_body_include, email_subject, send_as_user, trigger='stream_email_notify')
def _extra_context_in_missed_stream_messages_mention_two_senders(self, send_as_user: bool) -> None:
for i in range(0, 3):
self.send_stream_message(self.example_user('cordelia'), "Denmark", str(i))
msg_id = self.send_stream_message(
self.example_user('othello'), "Denmark",
'@**King Hamlet**')
verify_body_include = [
"Cordelia Lear: 0 1 2 Othello, the Moor of Venice: @**King Hamlet** -- ",
"You are receiving this because you were mentioned in Zulip Dev.",
]
email_subject = '#Denmark > test'
self._test_cases(msg_id, verify_body_include, email_subject, send_as_user, trigger='mentioned')
def _extra_context_in_personal_missed_stream_messages(self, send_as_user: bool,
show_message_content: bool=True,
message_content_disabled_by_user: bool=False,
message_content_disabled_by_realm: bool=False) -> None:
msg_id = self.send_personal_message(
self.example_user('othello'),
self.example_user('hamlet'),
'Extremely personal message!',
)
if show_message_content:
verify_body_include = ['Extremely personal message!']
email_subject = 'PMs with Othello, the Moor of Venice'
verify_body_does_not_include: List[str] = []
else:
if message_content_disabled_by_realm:
verify_body_include = [
"This email does not include message content because your organization has disabled",
"http://zulip.testserver/help/hide-message-content-in-emails",
"View or reply in Zulip",
" Manage email preferences: http://zulip.testserver/#settings/notifications",
]
elif message_content_disabled_by_user:
verify_body_include = [
"This email does not include message content because you have disabled message ",
"http://zulip.testserver/help/pm-mention-alert-notifications ",
"View or reply in Zulip",
" Manage email preferences: http://zulip.testserver/#settings/notifications",
]
email_subject = 'New missed messages'
verify_body_does_not_include = ['Othello, the Moor of Venice', 'Extremely personal message!',
'mentioned', 'group', 'Reply to this email directly, or view it in Zulip']
self._test_cases(msg_id, verify_body_include, email_subject, send_as_user,
show_message_content=show_message_content,
verify_body_does_not_include=verify_body_does_not_include)
def _reply_to_email_in_personal_missed_stream_messages(self, send_as_user: bool) -> None:
msg_id = self.send_personal_message(
self.example_user('othello'),
self.example_user('hamlet'),
'Extremely personal message!',
)
verify_body_include = ['Reply to this email directly, or view it in Zulip']
email_subject = 'PMs with Othello, the Moor of Venice'
self._test_cases(msg_id, verify_body_include, email_subject, send_as_user)
def _reply_warning_in_personal_missed_stream_messages(self, send_as_user: bool) -> None:
msg_id = self.send_personal_message(
self.example_user('othello'),
self.example_user('hamlet'),
'Extremely personal message!',
)
verify_body_include = ['Do not reply to this email.']
email_subject = 'PMs with Othello, the Moor of Venice'
self._test_cases(msg_id, verify_body_include, email_subject, send_as_user)
def _extra_context_in_huddle_missed_stream_messages_two_others(self, send_as_user: bool,
show_message_content: bool=True) -> None:
msg_id = self.send_huddle_message(
self.example_user('othello'),
[
self.example_user('hamlet'),
self.example_user('iago'),
],
'Group personal message!',
)
if show_message_content:
verify_body_include = ['Othello, the Moor of Venice: Group personal message! -- Reply']
email_subject = 'Group PMs with Iago and Othello, the Moor of Venice'
verify_body_does_not_include: List[str] = []
else:
verify_body_include = [
"This email does not include message content because you have disabled message ",
"http://zulip.testserver/help/pm-mention-alert-notifications ",
"View or reply in Zulip",
" Manage email preferences: http://zulip.testserver/#settings/notifications",
]
email_subject = 'New missed messages'
verify_body_does_not_include = ['Iago', 'Othello, the Moor of Venice Othello, the Moor of Venice',
'Group personal message!', 'mentioned',
'Reply to this email directly, or view it in Zulip']
self._test_cases(msg_id, verify_body_include, email_subject, send_as_user,
show_message_content=show_message_content,
verify_body_does_not_include=verify_body_does_not_include)
def _extra_context_in_huddle_missed_stream_messages_three_others(self, send_as_user: bool) -> None:
msg_id = self.send_huddle_message(
self.example_user('othello'),
[
self.example_user('hamlet'),
self.example_user('iago'),
self.example_user('cordelia'),
],
'Group personal message!',
)
verify_body_include = ['Othello, the Moor of Venice: Group personal message! -- Reply']
email_subject = 'Group PMs with Cordelia Lear, Iago, and Othello, the Moor of Venice'
self._test_cases(msg_id, verify_body_include, email_subject, send_as_user)
def _extra_context_in_huddle_missed_stream_messages_many_others(self, send_as_user: bool) -> None:
msg_id = self.send_huddle_message(self.example_user('othello'),
[self.example_user('hamlet'),
self.example_user('iago'),
self.example_user('cordelia'),
self.example_user('prospero')],
'Group personal message!')
verify_body_include = ['Othello, the Moor of Venice: Group personal message! -- Reply']
email_subject = 'Group PMs with Cordelia Lear, Iago, and 2 others'
self._test_cases(msg_id, verify_body_include, email_subject, send_as_user)
def _deleted_message_in_missed_stream_messages(self, send_as_user: bool) -> None:
msg_id = self.send_stream_message(
self.example_user('othello'), "denmark",
'@**King Hamlet** to be deleted')
hamlet = self.example_user('hamlet')
self.login('othello')
result = self.client_patch('/json/messages/' + str(msg_id),
{'message_id': msg_id, 'content': ' '})
self.assert_json_success(result)
handle_missedmessage_emails(hamlet.id, [{'message_id': msg_id}])
self.assertEqual(len(mail.outbox), 0)
def _deleted_message_in_personal_missed_stream_messages(self, send_as_user: bool) -> None:
msg_id = self.send_personal_message(self.example_user('othello'),
self.example_user('hamlet'),
'Extremely personal message! to be deleted!')
hamlet = self.example_user('hamlet')
self.login('othello')
result = self.client_patch('/json/messages/' + str(msg_id),
{'message_id': msg_id, 'content': ' '})
self.assert_json_success(result)
handle_missedmessage_emails(hamlet.id, [{'message_id': msg_id}])
self.assertEqual(len(mail.outbox), 0)
def _deleted_message_in_huddle_missed_stream_messages(self, send_as_user: bool) -> None:
msg_id = self.send_huddle_message(
self.example_user('othello'),
[
self.example_user('hamlet'),
self.example_user('iago'),
],
'Group personal message!',
)
hamlet = self.example_user('hamlet')
iago = self.example_user('iago')
self.login('othello')
result = self.client_patch('/json/messages/' + str(msg_id),
{'message_id': msg_id, 'content': ' '})
self.assert_json_success(result)
handle_missedmessage_emails(hamlet.id, [{'message_id': msg_id}])
self.assertEqual(len(mail.outbox), 0)
handle_missedmessage_emails(iago.id, [{'message_id': msg_id}])
self.assertEqual(len(mail.outbox), 0)
def test_realm_name_in_notifications(self) -> None:
# Test with realm_name_in_notifications for hamlet disabled.
self._realm_name_in_missed_message_email_subject(False)
# Enable realm_name_in_notifications for hamlet and test again.
hamlet = self.example_user('hamlet')
hamlet.realm_name_in_notifications = True
hamlet.save(update_fields=['realm_name_in_notifications'])
# Empty the test outbox
mail.outbox = []
self._realm_name_in_missed_message_email_subject(True)
def test_message_content_disabled_in_missed_message_notifications(self) -> None:
# Test when user disabled message content in email notifications.
do_change_notification_settings(self.example_user("hamlet"),
"message_content_in_email_notifications", False)
self._extra_context_in_missed_stream_messages_mention(False, show_message_content=False)
mail.outbox = []
self._extra_context_in_missed_stream_messages_wildcard_mention(False, show_message_content=False)
mail.outbox = []
self._extra_context_in_personal_missed_stream_messages(False, show_message_content=False,
message_content_disabled_by_user=True)
mail.outbox = []
self._extra_context_in_huddle_missed_stream_messages_two_others(False, show_message_content=False)
@override_settings(SEND_MISSED_MESSAGE_EMAILS_AS_USER=True)
def test_extra_context_in_missed_stream_messages_as_user(self) -> None:
self._extra_context_in_missed_stream_messages_mention(True)
def test_extra_context_in_missed_stream_messages(self) -> None:
self._extra_context_in_missed_stream_messages_mention(False)
@override_settings(SEND_MISSED_MESSAGE_EMAILS_AS_USER=True)
def test_extra_context_in_missed_stream_messages_as_user_wildcard(self) -> None:
self._extra_context_in_missed_stream_messages_wildcard_mention(True)
def test_extra_context_in_missed_stream_messages_wildcard(self) -> None:
self._extra_context_in_missed_stream_messages_wildcard_mention(False)
@override_settings(SEND_MISSED_MESSAGE_EMAILS_AS_USER=True)
def test_extra_context_in_missed_stream_messages_as_user_two_senders(self) -> None:
self._extra_context_in_missed_stream_messages_mention_two_senders(True)
def test_extra_context_in_missed_stream_messages_two_senders(self) -> None:
self._extra_context_in_missed_stream_messages_mention_two_senders(False)
def test_reply_to_email_in_personal_missed_stream_messages(self) -> None:
self._reply_to_email_in_personal_missed_stream_messages(False)
@override_settings(SEND_MISSED_MESSAGE_EMAILS_AS_USER=True)
def test_extra_context_in_missed_stream_messages_email_notify_as_user(self) -> None:
self._extra_context_in_missed_stream_messages_email_notify(True)
def test_extra_context_in_missed_stream_messages_email_notify(self) -> None:
self._extra_context_in_missed_stream_messages_email_notify(False)
@override_settings(EMAIL_GATEWAY_PATTERN="")
def test_reply_warning_in_personal_missed_stream_messages(self) -> None:
self._reply_warning_in_personal_missed_stream_messages(False)
@override_settings(SEND_MISSED_MESSAGE_EMAILS_AS_USER=True)
def test_extra_context_in_personal_missed_stream_messages_as_user(self) -> None:
self._extra_context_in_personal_missed_stream_messages(True)
def test_extra_context_in_personal_missed_stream_messages(self) -> None:
self._extra_context_in_personal_missed_stream_messages(False)
@override_settings(SEND_MISSED_MESSAGE_EMAILS_AS_USER=True)
def test_extra_context_in_huddle_missed_stream_messages_two_others_as_user(self) -> None:
self._extra_context_in_huddle_missed_stream_messages_two_others(True)
def test_extra_context_in_huddle_missed_stream_messages_two_others(self) -> None:
self._extra_context_in_huddle_missed_stream_messages_two_others(False)
@override_settings(SEND_MISSED_MESSAGE_EMAILS_AS_USER=True)
def test_extra_context_in_huddle_missed_stream_messages_three_others_as_user(self) -> None:
self._extra_context_in_huddle_missed_stream_messages_three_others(True)
def test_extra_context_in_huddle_missed_stream_messages_three_others(self) -> None:
self._extra_context_in_huddle_missed_stream_messages_three_others(False)
@override_settings(SEND_MISSED_MESSAGE_EMAILS_AS_USER=True)
def test_extra_context_in_huddle_missed_stream_messages_many_others_as_user(self) -> None:
self._extra_context_in_huddle_missed_stream_messages_many_others(True)
def test_extra_context_in_huddle_missed_stream_messages_many_others(self) -> None:
self._extra_context_in_huddle_missed_stream_messages_many_others(False)
@override_settings(SEND_MISSED_MESSAGE_EMAILS_AS_USER=True)
def test_deleted_message_in_missed_stream_messages_as_user(self) -> None:
self._deleted_message_in_missed_stream_messages(True)
def test_deleted_message_in_missed_stream_messages(self) -> None:
self._deleted_message_in_missed_stream_messages(False)
@override_settings(SEND_MISSED_MESSAGE_EMAILS_AS_USER=True)
def test_deleted_message_in_personal_missed_stream_messages_as_user(self) -> None:
self._deleted_message_in_personal_missed_stream_messages(True)
def test_deleted_message_in_personal_missed_stream_messages(self) -> None:
self._deleted_message_in_personal_missed_stream_messages(False)
@override_settings(SEND_MISSED_MESSAGE_EMAILS_AS_USER=True)
def test_deleted_message_in_huddle_missed_stream_messages_as_user(self) -> None:
self._deleted_message_in_huddle_missed_stream_messages(True)
def test_deleted_message_in_huddle_missed_stream_messages(self) -> None:
self._deleted_message_in_huddle_missed_stream_messages(False)
def test_realm_message_content_allowed_in_email_notifications(self) -> None:
user = self.example_user("hamlet")
# When message content is allowed at realm level
realm = get_realm("zulip")
realm.message_content_allowed_in_email_notifications = True
realm.save(update_fields=['message_content_allowed_in_email_notifications'])
# Emails have missed message content when message content is enabled by the user
do_change_notification_settings(user, "message_content_in_email_notifications", True)
mail.outbox = []
self._extra_context_in_personal_missed_stream_messages(False, show_message_content=True)
# Emails don't have missed message content when message content is disabled by the user
do_change_notification_settings(user, "message_content_in_email_notifications", False)
mail.outbox = []
self._extra_context_in_personal_missed_stream_messages(False, show_message_content=False,
message_content_disabled_by_user=True)
# When message content is not allowed at realm level
# Emails don't have missed message irrespective of message content setting of the user
realm = get_realm("zulip")
realm.message_content_allowed_in_email_notifications = False
realm.save(update_fields=['message_content_allowed_in_email_notifications'])
do_change_notification_settings(user, "message_content_in_email_notifications", True)
mail.outbox = []
self._extra_context_in_personal_missed_stream_messages(False, show_message_content=False,
message_content_disabled_by_realm=True)
do_change_notification_settings(user, "message_content_in_email_notifications", False)
mail.outbox = []
self._extra_context_in_personal_missed_stream_messages(False, show_message_content=False,
message_content_disabled_by_user=True,
message_content_disabled_by_realm=True)
def test_realm_emoji_in_missed_message(self) -> None:
realm = get_realm("zulip")
msg_id = self.send_personal_message(
self.example_user('othello'), self.example_user('hamlet'),
'Extremely personal message with a realm emoji :green_tick:!')
realm_emoji_id = realm.get_active_emoji()['green_tick']['id']
realm_emoji_url = f"http://zulip.testserver/user_avatars/{realm.id}/emoji/images/{realm_emoji_id}.png"
verify_body_include = [f'<img alt=":green_tick:" src="{realm_emoji_url}" title="green tick" style="height: 20px;">']
email_subject = 'PMs with Othello, the Moor of Venice'
self._test_cases(msg_id, verify_body_include, email_subject, send_as_user=False, verify_html_body=True)
def test_emojiset_in_missed_message(self) -> None:
hamlet = self.example_user('hamlet')
hamlet.emojiset = 'twitter'
hamlet.save(update_fields=['emojiset'])
msg_id = self.send_personal_message(
self.example_user('othello'), self.example_user('hamlet'),
'Extremely personal message with a hamburger :hamburger:!')
verify_body_include = ['<img alt=":hamburger:" src="http://zulip.testserver/static/generated/emoji/images-twitter-64/1f354.png" title="hamburger" style="height: 20px;">']
email_subject = 'PMs with Othello, the Moor of Venice'
self._test_cases(msg_id, verify_body_include, email_subject, send_as_user=False, verify_html_body=True)
def test_stream_link_in_missed_message(self) -> None:
msg_id = self.send_personal_message(
self.example_user('othello'), self.example_user('hamlet'),
'Come and join us in #**Verona**.')
stream_id = get_stream('Verona', get_realm('zulip')).id
href = f"http://zulip.testserver/#narrow/stream/{stream_id}-Verona"
verify_body_include = [f'<a class="stream" data-stream-id="5" href="{href}">#Verona</a']
email_subject = 'PMs with Othello, the Moor of Venice'
self._test_cases(msg_id, verify_body_include, email_subject, send_as_user=False, verify_html_body=True)
def test_sender_name_in_missed_message(self) -> None:
hamlet = self.example_user('hamlet')
msg_id_1 = self.send_stream_message(self.example_user('iago'),
"Denmark",
'@**King Hamlet**')
msg_id_2 = self.send_stream_message(self.example_user('iago'),
"Verona",
'* 1\n *2')
msg_id_3 = self.send_personal_message(self.example_user('iago'),
hamlet,
'Hello')
handle_missedmessage_emails(hamlet.id, [
{'message_id': msg_id_1, "trigger": "mentioned"},
{'message_id': msg_id_2, "trigger": "stream_email_notify"},
{'message_id': msg_id_3},
])
self.assertIn('Iago: @**King Hamlet**\n\n--\nYou are', mail.outbox[0].body)
# If message content starts with <p> tag the sender name is appended inside the <p> tag.
self.assertIn('<p><b>Iago</b>: <span class="user-mention"', mail.outbox[0].alternatives[0][0])
self.assertIn('Iago: * 1\n *2\n\n--\nYou are receiving', mail.outbox[1].body)
# If message content does not starts with <p> tag sender name is appended before the <p> tag
self.assertIn(' <b>Iago</b>: <ul>\n<li>1<br/>\n *2</li>\n</ul>\n',
mail.outbox[1].alternatives[0][0])
self.assertEqual('Hello\n\n--\n\nReply', mail.outbox[2].body[:16])
# Sender name is not appended to message for PM missed messages
self.assertIn('>\n \n <p>Hello</p>\n',
mail.outbox[2].alternatives[0][0])
def test_multiple_missed_personal_messages(self) -> None:
hamlet = self.example_user('hamlet')
msg_id_1 = self.send_personal_message(self.example_user('othello'),
hamlet,
'Personal Message 1')
msg_id_2 = self.send_personal_message(self.example_user('iago'),
hamlet,
'Personal Message 2')
handle_missedmessage_emails(hamlet.id, [
{'message_id': msg_id_1},
{'message_id': msg_id_2},
])
self.assertEqual(len(mail.outbox), 2)
email_subject = 'PMs with Othello, the Moor of Venice'
self.assertEqual(mail.outbox[0].subject, email_subject)
email_subject = 'PMs with Iago'
self.assertEqual(mail.outbox[1].subject, email_subject)
def test_multiple_stream_messages(self) -> None:
hamlet = self.example_user('hamlet')
msg_id_1 = self.send_stream_message(self.example_user('othello'),
"Denmark",
'Message1')
msg_id_2 = self.send_stream_message(self.example_user('iago'),
"Denmark",
'Message2')
handle_missedmessage_emails(hamlet.id, [
{'message_id': msg_id_1, "trigger": "stream_email_notify"},
{'message_id': msg_id_2, "trigger": "stream_email_notify"},
])
self.assertEqual(len(mail.outbox), 1)
email_subject = '#Denmark > test'
self.assertEqual(mail.outbox[0].subject, email_subject)
def test_multiple_stream_messages_and_mentions(self) -> None:
"""Subject should be stream name and topic as usual."""
hamlet = self.example_user('hamlet')
msg_id_1 = self.send_stream_message(self.example_user('iago'),
"Denmark",
'Regular message')
msg_id_2 = self.send_stream_message(self.example_user('othello'),
"Denmark",
'@**King Hamlet**')
handle_missedmessage_emails(hamlet.id, [
{'message_id': msg_id_1, "trigger": "stream_email_notify"},
{'message_id': msg_id_2, "trigger": "mentioned"},
])
self.assertEqual(len(mail.outbox), 1)
email_subject = '#Denmark > test'
self.assertEqual(mail.outbox[0].subject, email_subject)
def test_message_access_in_emails(self) -> None:
# Messages sent to a protected history-private stream shouldn't be
# accessible/available in emails before subscribing
stream_name = "private_stream"
self.make_stream(stream_name, invite_only=True,
history_public_to_subscribers=False)
user = self.example_user('iago')
self.subscribe(user, stream_name)
late_subscribed_user = self.example_user('hamlet')
self.send_stream_message(user,
stream_name,
'Before subscribing')
self.subscribe(late_subscribed_user, stream_name)
self.send_stream_message(user,
stream_name,
"After subscribing")
mention_msg_id = self.send_stream_message(user,
stream_name,
'@**King Hamlet**')
handle_missedmessage_emails(late_subscribed_user.id, [
{'message_id': mention_msg_id, "trigger": "mentioned"},
])
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, '#private_stream > test') # email subject
email_text = mail.outbox[0].message().as_string()
self.assertNotIn('Before subscribing', email_text)
self.assertIn('After subscribing', email_text)
self.assertIn('@**King Hamlet**', email_text)
def test_stream_mentions_multiple_people(self) -> None:
"""Subject should be stream name and topic as usual."""
hamlet = self.example_user('hamlet')
msg_id_1 = self.send_stream_message(self.example_user('iago'),
"Denmark",
'@**King Hamlet**')
msg_id_2 = self.send_stream_message(self.example_user('othello'),
"Denmark",
'@**King Hamlet**')
msg_id_3 = self.send_stream_message(self.example_user('cordelia'),
"Denmark",
'Regular message')
handle_missedmessage_emails(hamlet.id, [
{'message_id': msg_id_1, "trigger": "mentioned"},
{'message_id': msg_id_2, "trigger": "mentioned"},
{'message_id': msg_id_3, "trigger": "stream_email_notify"},
])
self.assertEqual(len(mail.outbox), 1)
email_subject = '#Denmark > test'
self.assertEqual(mail.outbox[0].subject, email_subject)
def test_multiple_stream_messages_different_topics(self) -> None:
"""Should receive separate emails for each topic within a stream."""
hamlet = self.example_user('hamlet')
msg_id_1 = self.send_stream_message(self.example_user('othello'),
"Denmark",
'Message1')
msg_id_2 = self.send_stream_message(self.example_user('iago'),
"Denmark",
'Message2',
topic_name="test2")
handle_missedmessage_emails(hamlet.id, [
{'message_id': msg_id_1, "trigger": "stream_email_notify"},
{'message_id': msg_id_2, "trigger": "stream_email_notify"},
])
self.assertEqual(len(mail.outbox), 2)
email_subjects = {mail.outbox[0].subject, mail.outbox[1].subject}
valid_email_subjects = {'#Denmark > test', '#Denmark > test2'}
self.assertEqual(email_subjects, valid_email_subjects)
def test_relative_to_full_url(self) -> None:
zulip_realm = get_realm("zulip")
zephyr_realm = get_realm("zephyr")
# Run `relative_to_full_url()` function over test fixtures present in
# 'markdown_test_cases.json' and check that it converts all the relative
# URLs to absolute URLs.
fixtures = ujson.loads(self.fixture_data("markdown_test_cases.json"))
test_fixtures = {}
for test in fixtures['regular_tests']:
test_fixtures[test['name']] = test
for test_name in test_fixtures:
test_data = test_fixtures[test_name]["expected_output"]
output_data = relative_to_full_url("http://example.com", test_data)
if re.search(r"""(?<=\=['"])/(?=[^<]+>)""", output_data) is not None:
raise AssertionError("Relative URL present in email: " + output_data +
"\nFailed test case's name is: " + test_name +
"\nIt is present in markdown_test_cases.json")
# Specific test cases.
# A path similar to our emoji path, but not in a link:
test_data = "<p>Check out the file at: '/static/generated/emoji/images/emoji/'</p>"
actual_output = relative_to_full_url("http://example.com", test_data)
expected_output = "<p>Check out the file at: '/static/generated/emoji/images/emoji/'</p>"
self.assertEqual(actual_output, expected_output)
# An uploaded file
test_data = '<a href="/user_uploads/{realm_id}/1f/some_random_value">/user_uploads/{realm_id}/1f/some_random_value</a>'
test_data = test_data.format(realm_id=zephyr_realm.id)
actual_output = relative_to_full_url("http://example.com", test_data)
expected_output = '<a href="http://example.com/user_uploads/{realm_id}/1f/some_random_value">' + \
'/user_uploads/{realm_id}/1f/some_random_value</a>'
expected_output = expected_output.format(realm_id=zephyr_realm.id)
self.assertEqual(actual_output, expected_output)
# A profile picture like syntax, but not actually in an HTML tag
test_data = '<p>Set src="/avatar/username@example.com?s=30"</p>'
actual_output = relative_to_full_url("http://example.com", test_data)
expected_output = '<p>Set src="/avatar/username@example.com?s=30"</p>'
self.assertEqual(actual_output, expected_output)
# A narrow URL which begins with a '#'.
test_data = '<p><a href="#narrow/stream/test/topic/test.20topic/near/142"' + \
'title="#narrow/stream/test/topic/test.20topic/near/142">Conversation</a></p>'
actual_output = relative_to_full_url("http://example.com", test_data)
expected_output = '<p><a href="http://example.com/#narrow/stream/test/topic/test.20topic/near/142" ' + \
'title="http://example.com/#narrow/stream/test/topic/test.20topic/near/142">Conversation</a></p>'
self.assertEqual(actual_output, expected_output)
# Scrub inline images.
test_data = '<p>See this <a href="/user_uploads/{realm_id}/52/fG7GM9e3afz_qsiUcSce2tl_/avatar_103.jpeg" target="_blank" ' + \
'title="avatar_103.jpeg">avatar_103.jpeg</a>.</p>' + \
'<div class="message_inline_image"><a href="/user_uploads/{realm_id}/52/fG7GM9e3afz_qsiUcSce2tl_/avatar_103.jpeg" ' + \
'target="_blank" title="avatar_103.jpeg"><img src="/user_uploads/{realm_id}/52/fG7GM9e3afz_qsiUcSce2tl_/avatar_103.jpeg"></a></div>'
test_data = test_data.format(realm_id=zulip_realm.id)
actual_output = relative_to_full_url("http://example.com", test_data)
expected_output = '<div><p>See this <a href="http://example.com/user_uploads/{realm_id}/52/fG7GM9e3afz_qsiUcSce2tl_/avatar_103.jpeg" target="_blank" ' + \
'title="avatar_103.jpeg">avatar_103.jpeg</a>.</p></div>'
expected_output = expected_output.format(realm_id=zulip_realm.id)
self.assertEqual(actual_output, expected_output)
# A message containing only an inline image URL preview, we do
# somewhat more extensive surgery.
test_data = '<div class="message_inline_image"><a href="https://www.google.com/images/srpr/logo4w.png" ' + \
'target="_blank" title="https://www.google.com/images/srpr/logo4w.png">' + \
'<img data-src-fullsize="/thumbnail/https%3A//www.google.com/images/srpr/logo4w.png?size=0x0" ' + \
'src="/thumbnail/https%3A//www.google.com/images/srpr/logo4w.png?size=0x100"></a></div>'
actual_output = relative_to_full_url("http://example.com", test_data)
expected_output = '<p><a href="https://www.google.com/images/srpr/logo4w.png" ' + \
'target="_blank" title="https://www.google.com/images/srpr/logo4w.png">' + \
'https://www.google.com/images/srpr/logo4w.png</a></p>'
self.assertEqual(actual_output, expected_output)
def test_fix_emoji(self) -> None:
# An emoji.
test_data = '<p>See <span aria-label="cloud with lightning and rain" class="emoji emoji-26c8" role="img" title="cloud with lightning and rain">' + \
':cloud_with_lightning_and_rain:</span>.</p>'
actual_output = fix_emojis(test_data, "http://example.com", "google")
expected_output = '<p>See <img alt=":cloud_with_lightning_and_rain:" src="http://example.com/static/generated/emoji/images-google-64/26c8.png" ' + \
'title="cloud with lightning and rain" style="height: 20px;">.</p>'
self.assertEqual(actual_output, expected_output)
|
timabbott/zulip
|
zerver/tests/test_email_notifications.py
|
Python
|
apache-2.0
| 51,309
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslo_config import fixture
from keystoneauth1 import exceptions as ka_exception
from keystoneauth1 import identity as ka_identity
import keystoneclient.exceptions as kc_exception
from magnum.common import exception
from magnum.common import keystone
import magnum.conf
from magnum.conf import keystone as ksconf
from magnum.tests import base
from magnum.tests import utils
CONF = magnum.conf.CONF
@mock.patch('keystoneclient.v3.client.Client')
class KeystoneClientTest(base.TestCase):
def setUp(self):
super(KeystoneClientTest, self).setUp()
dummy_url = 'http://server.test:5000/v3'
self.ctx = utils.dummy_context()
self.ctx.auth_url = dummy_url
self.ctx.auth_token = 'abcd1234'
plugin = keystone.ka_loading.get_plugin_loader('password')
opts = keystone.ka_loading.get_auth_plugin_conf_options(plugin)
cfg_fixture = self.useFixture(fixture.Config())
cfg_fixture.register_opts(opts, group=ksconf.CFG_GROUP)
self.config(auth_type='password',
auth_url=dummy_url,
username='fake_user',
password='fake_pass',
project_name='fake_project',
group=ksconf.CFG_GROUP)
self.config(auth_uri=dummy_url,
admin_user='magnum',
admin_password='varybadpass',
admin_tenant_name='service',
group=ksconf.CFG_LEGACY_GROUP)
# Disable global mocking for trustee_domain_id
self.stop_global(
'magnum.common.keystone.KeystoneClientV3.trustee_domain_id')
def tearDown(self):
# Re-enable global mocking for trustee_domain_id. We need this because
# mock blows up when trying to stop an already stopped patch (which it
# will do due to the addCleanup() in base.TestCase).
self.start_global(
'magnum.common.keystone.KeystoneClientV3.trustee_domain_id')
super(KeystoneClientTest, self).tearDown()
def test_client_with_password(self, mock_ks):
self.ctx.is_admin = True
self.ctx.auth_token_info = None
self.ctx.auth_token = None
self.ctx.trust_id = None
ks_client = keystone.KeystoneClientV3(self.ctx)
ks_client.client
session = ks_client.session
auth_plugin = session.auth
mock_ks.assert_called_once_with(session=session, trust_id=None)
self.assertIsInstance(auth_plugin, ka_identity.Password)
@mock.patch('magnum.common.keystone.ka_loading')
@mock.patch('magnum.common.keystone.ka_v3')
def test_client_with_password_legacy(self, mock_v3, mock_loading, mock_ks):
self.ctx.is_admin = True
self.ctx.auth_token_info = None
self.ctx.auth_token = None
self.ctx.trust_id = None
mock_loading.load_auth_from_conf_options.side_effect = \
ka_exception.MissingRequiredOptions(mock.MagicMock())
ks_client = keystone.KeystoneClientV3(self.ctx)
ks_client.client
session = ks_client.session
self.assertWarnsRegex(Warning,
'[keystone_authtoken] section is deprecated')
mock_v3.Password.assert_called_once_with(
auth_url='http://server.test:5000/v3', password='varybadpass',
project_domain_id='default', project_name='service',
user_domain_id='default', username='magnum')
mock_ks.assert_called_once_with(session=session, trust_id=None)
@mock.patch('magnum.common.keystone.ka_access')
def test_client_with_access_info(self, mock_access, mock_ks):
self.ctx.auth_token_info = mock.MagicMock()
ks_client = keystone.KeystoneClientV3(self.ctx)
ks_client.client
session = ks_client.session
auth_plugin = session.auth
mock_access.create.assert_called_once_with(body=mock.ANY,
auth_token='abcd1234')
mock_ks.assert_called_once_with(session=session, trust_id=None)
self.assertIsInstance(auth_plugin, ka_identity.access.AccessInfoPlugin)
@mock.patch('magnum.common.keystone.ka_v3')
def test_client_with_token(self, mock_v3, mock_ks):
ks_client = keystone.KeystoneClientV3(self.ctx)
ks_client.client
session = ks_client.session
mock_v3.Token.assert_called_once_with(
auth_url='http://server.test:5000/v3', token='abcd1234')
mock_ks.assert_called_once_with(session=session, trust_id=None)
def test_client_with_no_credentials(self, mock_ks):
self.ctx.auth_token = None
ks_client = keystone.KeystoneClientV3(self.ctx)
self.assertRaises(exception.AuthorizationFailure,
ks_client._get_auth)
mock_ks.assert_not_called()
def test_delete_trust(self, mock_ks):
mock_ks.return_value.trusts.delete.return_value = None
ks_client = keystone.KeystoneClientV3(self.ctx)
cluster = mock.MagicMock()
cluster.trust_id = 'atrust123'
self.assertIsNone(ks_client.delete_trust(self.ctx, cluster))
mock_ks.return_value.trusts.delete.assert_called_once_with('atrust123')
def test_delete_trust_not_found(self, mock_ks):
mock_delete = mock_ks.return_value.trusts.delete
mock_delete.side_effect = kc_exception.NotFound()
ks_client = keystone.KeystoneClientV3(self.ctx)
cluster = mock.MagicMock()
cluster.trust_id = 'atrust123'
self.assertIsNone(ks_client.delete_trust(self.ctx, cluster))
@mock.patch('keystoneauth1.session.Session')
def test_create_trust_with_all_roles(self, mock_session, mock_ks):
mock_session.return_value.get_user_id.return_value = '123456'
mock_session.return_value.get_project_id.return_value = '654321'
self.ctx.roles = ['role1', 'role2']
ks_client = keystone.KeystoneClientV3(self.ctx)
ks_client.create_trust(trustee_user='888888')
mock_ks.return_value.trusts.create.assert_called_once_with(
delegation_depth=0,
trustor_user='123456', project='654321',
trustee_user='888888', role_names=['role1', 'role2'],
impersonation=True)
@mock.patch('keystoneauth1.session.Session')
def test_create_trust_with_limit_roles(self, mock_session, mock_ks):
mock_session.return_value.get_user_id.return_value = '123456'
mock_session.return_value.get_project_id.return_value = '654321'
self.ctx.roles = ['role1', 'role2']
ks_client = keystone.KeystoneClientV3(self.ctx)
CONF.set_override('roles', ['role3'], group='trust')
ks_client.create_trust(trustee_user='888888')
mock_ks.return_value.trusts.create.assert_called_once_with(
delegation_depth=0,
trustor_user='123456', project='654321',
trustee_user='888888', role_names=['role3'],
impersonation=True)
@mock.patch('magnum.common.keystone.KeystoneClientV3.trustee_domain_id')
def test_create_trustee(self, mock_tdi, mock_ks):
expected_username = '_username'
expected_password = '_password'
expected_domain = '_expected_trustee_domain_id'
mock_tdi.__get__ = mock.MagicMock(return_value=expected_domain)
ks_client = keystone.KeystoneClientV3(self.ctx)
ks_client.create_trustee(
username=expected_username,
password=expected_password,
)
mock_ks.return_value.users.create.assert_called_once_with(
name=expected_username,
password=expected_password,
domain=expected_domain,
)
@mock.patch('magnum.common.keystone.KeystoneClientV3.domain_admin_auth')
@mock.patch('magnum.common.keystone.KeystoneClientV3.domain_admin_session')
def test_trustee_domain_id(self, mock_session, mock_auth, mock_ks):
expected_domain_id = '_expected_domain_id'
_mock_session = mock.MagicMock()
mock_session.__get__ = mock.MagicMock(return_value=_mock_session)
_mock_auth = mock.MagicMock()
mock_auth.__get__ = mock.MagicMock(return_value=_mock_auth)
mock_access = mock.MagicMock()
mock_access.domain_id = expected_domain_id
_mock_auth.get_access.return_value = mock_access
ks_client = keystone.KeystoneClientV3(self.ctx)
self.assertEqual(expected_domain_id, ks_client.trustee_domain_id)
_mock_auth.get_access.assert_called_once_with(
_mock_session
)
def test_get_validate_region_name(self, mock_ks):
key = 'region_name'
val = 'RegionOne'
CONF.set_override(key, val, 'cinder_client')
mock_region = mock.MagicMock()
mock_region.id = 'RegionOne'
mock_ks.return_value.regions.list.return_value = [mock_region]
ks_client = keystone.KeystoneClientV3(self.ctx)
region_name = ks_client.get_validate_region_name(val)
self.assertEqual('RegionOne', region_name)
def test_get_validate_region_name_not_found(self, mock_ks):
key = 'region_name'
val = 'region123'
CONF.set_override(key, val, 'cinder_client')
ks_client = keystone.KeystoneClientV3(self.ctx)
self.assertRaises(exception.InvalidParameterValue,
ks_client.get_validate_region_name, val)
def test_get_validate_region_name_is_None(self, mock_ks):
key = 'region_name'
val = None
CONF.set_override(key, val, 'cinder_client')
ks_client = keystone.KeystoneClientV3(self.ctx)
self.assertRaises(exception.InvalidParameterValue,
ks_client.get_validate_region_name, val)
|
ArchiFleKs/magnum
|
magnum/tests/unit/common/test_keystone.py
|
Python
|
apache-2.0
| 10,319
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2012-12-17 11:07:19
from __future__ import unicode_literals
import os
import sys
import six
import copy
import time
import json
import logging
import traceback
import functools
import threading
import tornado.ioloop
import tornado.httputil
import tornado.httpclient
import pyspider
from six.moves import queue, http_cookies
from six.moves.urllib.robotparser import RobotFileParser
from requests import cookies
from six.moves.urllib.parse import urljoin, urlsplit
from tornado import gen
from tornado.curl_httpclient import CurlAsyncHTTPClient
from tornado.simple_httpclient import SimpleAsyncHTTPClient
from pyspider.libs import utils, dataurl, counter
from pyspider.libs.url import quote_chinese
from .cookie_utils import extract_cookies_to_jar
logger = logging.getLogger('fetcher')
class MyCurlAsyncHTTPClient(CurlAsyncHTTPClient):
def free_size(self):
return len(self._free_list)
def size(self):
return len(self._curls) - self.free_size()
class MySimpleAsyncHTTPClient(SimpleAsyncHTTPClient):
def free_size(self):
return self.max_clients - self.size()
def size(self):
return len(self.active)
fetcher_output = {
"status_code": int,
"orig_url": str,
"url": str,
"headers": dict,
"content": str,
"cookies": dict,
}
class Fetcher(object):
user_agent = "pyspider/%s (+http://pyspider.org/)" % pyspider.__version__
default_options = {
'method': 'GET',
'headers': {
},
'use_gzip': True,
'timeout': 120,
'connect_timeout': 20,
}
phantomjs_proxy = None
splash_endpoint = None
splash_lua_source = open(os.path.join(os.path.dirname(__file__), "splash_fetcher.lua")).read()
robot_txt_age = 60*60 # 1h
def __init__(self, inqueue, outqueue, poolsize=100, proxy=None, async_mode=True):
self.inqueue = inqueue
self.outqueue = outqueue
self.poolsize = poolsize
self._running = False
self._quit = False
self.proxy = proxy
self.async_mode = async_mode
self.ioloop = tornado.ioloop.IOLoop()
self.robots_txt_cache = {}
# binding io_loop to http_client here
if self.async_mode:
self.http_client = MyCurlAsyncHTTPClient(max_clients=self.poolsize,
io_loop=self.ioloop)
else:
self.http_client = tornado.httpclient.HTTPClient(MyCurlAsyncHTTPClient, max_clients=self.poolsize)
self._cnt = {
'5m': counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(30, 10)),
'1h': counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(60, 60)),
}
def send_result(self, type, task, result):
'''Send fetch result to processor'''
if self.outqueue:
try:
self.outqueue.put((task, result))
except Exception as e:
logger.exception(e)
def fetch(self, task, callback=None):
if self.async_mode:
return self.async_fetch(task, callback)
else:
return self.async_fetch(task, callback).result()
@gen.coroutine
def async_fetch(self, task, callback=None):
'''Do one fetch'''
url = task.get('url', 'data:,')
if callback is None:
callback = self.send_result
type = 'None'
start_time = time.time()
try:
if url.startswith('data:'):
type = 'data'
result = yield gen.maybe_future(self.data_fetch(url, task))
elif task.get('fetch', {}).get('fetch_type') in ('js', 'phantomjs'):
type = 'phantomjs'
result = yield self.phantomjs_fetch(url, task)
elif task.get('fetch', {}).get('fetch_type') in ('splash', ):
type = 'splash'
result = yield self.splash_fetch(url, task)
elif task.get('fetch', {}).get('fetch_type') in ('puppeteer', ):
type = 'puppeteer'
result = yield self.puppeteer_fetch(url, task)
else:
type = 'http'
result = yield self.http_fetch(url, task)
except Exception as e:
logger.exception(e)
result = self.handle_error(type, url, task, start_time, e)
callback(type, task, result)
self.on_result(type, task, result)
raise gen.Return(result)
def sync_fetch(self, task):
'''Synchronization fetch, usually used in xmlrpc thread'''
if not self._running:
return self.ioloop.run_sync(functools.partial(self.async_fetch, task, lambda t, _, r: True))
wait_result = threading.Condition()
_result = {}
def callback(type, task, result):
wait_result.acquire()
_result['type'] = type
_result['task'] = task
_result['result'] = result
wait_result.notify()
wait_result.release()
wait_result.acquire()
self.ioloop.add_callback(self.fetch, task, callback)
while 'result' not in _result:
wait_result.wait()
wait_result.release()
return _result['result']
def data_fetch(self, url, task):
'''A fake fetcher for dataurl'''
self.on_fetch('data', task)
result = {}
result['orig_url'] = url
result['content'] = dataurl.decode(url)
result['headers'] = {}
result['status_code'] = 200
result['url'] = url
result['cookies'] = {}
result['time'] = 0
result['save'] = task.get('fetch', {}).get('save')
if len(result['content']) < 70:
logger.info("[200] %s:%s %s 0s", task.get('project'), task.get('taskid'), url)
else:
logger.info(
"[200] %s:%s data:,%s...[content:%d] 0s",
task.get('project'), task.get('taskid'),
result['content'][:70],
len(result['content'])
)
return result
def handle_error(self, type, url, task, start_time, error):
result = {
'status_code': getattr(error, 'code', 599),
'error': utils.text(error),
'traceback': traceback.format_exc() if sys.exc_info()[0] else None,
'content': "",
'time': time.time() - start_time,
'orig_url': url,
'url': url,
"save": task.get('fetch', {}).get('save')
}
logger.error("[%d] %s:%s %s, %r %.2fs",
result['status_code'], task.get('project'), task.get('taskid'),
url, error, result['time'])
return result
allowed_options = ['method', 'data', 'connect_timeout', 'timeout', 'cookies', 'use_gzip', 'validate_cert']
def pack_tornado_request_parameters(self, url, task):
fetch = copy.deepcopy(self.default_options)
fetch['url'] = url
fetch['headers'] = tornado.httputil.HTTPHeaders(fetch['headers'])
fetch['headers']['User-Agent'] = self.user_agent
task_fetch = task.get('fetch', {})
for each in self.allowed_options:
if each in task_fetch:
fetch[each] = task_fetch[each]
fetch['headers'].update(task_fetch.get('headers', {}))
if task.get('track'):
track_headers = tornado.httputil.HTTPHeaders(
task.get('track', {}).get('fetch', {}).get('headers') or {})
track_ok = task.get('track', {}).get('process', {}).get('ok', False)
else:
track_headers = {}
track_ok = False
# proxy
proxy_string = None
if isinstance(task_fetch.get('proxy'), six.string_types):
proxy_string = task_fetch['proxy']
elif self.proxy and task_fetch.get('proxy', True):
proxy_string = self.proxy
if proxy_string:
if '://' not in proxy_string:
proxy_string = 'http://' + proxy_string
proxy_splited = urlsplit(proxy_string)
fetch['proxy_host'] = proxy_splited.hostname
if proxy_splited.username:
fetch['proxy_username'] = proxy_splited.username
if proxy_splited.password:
fetch['proxy_password'] = proxy_splited.password
if six.PY2:
for key in ('proxy_host', 'proxy_username', 'proxy_password'):
if key in fetch:
fetch[key] = fetch[key].encode('utf8')
fetch['proxy_port'] = proxy_splited.port or 8080
# etag
if task_fetch.get('etag', True):
_t = None
if isinstance(task_fetch.get('etag'), six.string_types):
_t = task_fetch.get('etag')
elif track_ok:
_t = track_headers.get('etag')
if _t and 'If-None-Match' not in fetch['headers']:
fetch['headers']['If-None-Match'] = _t
# last modifed
if task_fetch.get('last_modified', task_fetch.get('last_modifed', True)):
last_modified = task_fetch.get('last_modified', task_fetch.get('last_modifed', True))
_t = None
if isinstance(last_modified, six.string_types):
_t = last_modified
elif track_ok:
_t = track_headers.get('last-modified')
if _t and 'If-Modified-Since' not in fetch['headers']:
fetch['headers']['If-Modified-Since'] = _t
# timeout
if 'timeout' in fetch:
fetch['request_timeout'] = fetch['timeout']
del fetch['timeout']
# data rename to body
if 'data' in fetch:
fetch['body'] = fetch['data']
del fetch['data']
return fetch
@gen.coroutine
def can_fetch(self, user_agent, url):
parsed = urlsplit(url)
domain = parsed.netloc
if domain in self.robots_txt_cache:
robot_txt = self.robots_txt_cache[domain]
if time.time() - robot_txt.mtime() > self.robot_txt_age:
robot_txt = None
else:
robot_txt = None
if robot_txt is None:
robot_txt = RobotFileParser()
try:
response = yield gen.maybe_future(self.http_client.fetch(
urljoin(url, '/robots.txt'), connect_timeout=10, request_timeout=30))
content = response.body
except tornado.httpclient.HTTPError as e:
logger.error('load robots.txt from %s error: %r', domain, e)
content = ''
try:
content = content.decode('utf8', 'ignore')
except UnicodeDecodeError:
content = ''
robot_txt.parse(content.splitlines())
self.robots_txt_cache[domain] = robot_txt
raise gen.Return(robot_txt.can_fetch(user_agent, url))
def clear_robot_txt_cache(self):
now = time.time()
for domain, robot_txt in self.robots_txt_cache.items():
if now - robot_txt.mtime() > self.robot_txt_age:
del self.robots_txt_cache[domain]
@gen.coroutine
def http_fetch(self, url, task):
'''HTTP fetcher'''
start_time = time.time()
self.on_fetch('http', task)
handle_error = lambda x: self.handle_error('http', url, task, start_time, x)
# setup request parameters
fetch = self.pack_tornado_request_parameters(url, task)
task_fetch = task.get('fetch', {})
session = cookies.RequestsCookieJar()
# fix for tornado request obj
if 'Cookie' in fetch['headers']:
c = http_cookies.SimpleCookie()
try:
c.load(fetch['headers']['Cookie'])
except AttributeError:
c.load(utils.utf8(fetch['headers']['Cookie']))
for key in c:
session.set(key, c[key])
del fetch['headers']['Cookie']
if 'cookies' in fetch:
session.update(fetch['cookies'])
del fetch['cookies']
max_redirects = task_fetch.get('max_redirects', 5)
# we will handle redirects by hand to capture cookies
fetch['follow_redirects'] = False
# making requests
while True:
# robots.txt
if task_fetch.get('robots_txt', False):
can_fetch = yield self.can_fetch(fetch['headers']['User-Agent'], fetch['url'])
if not can_fetch:
error = tornado.httpclient.HTTPError(403, 'Disallowed by robots.txt')
raise gen.Return(handle_error(error))
try:
request = tornado.httpclient.HTTPRequest(**fetch)
# if cookie already in header, get_cookie_header wouldn't work
old_cookie_header = request.headers.get('Cookie')
if old_cookie_header:
del request.headers['Cookie']
cookie_header = cookies.get_cookie_header(session, request)
if cookie_header:
request.headers['Cookie'] = cookie_header
elif old_cookie_header:
request.headers['Cookie'] = old_cookie_header
except Exception as e:
logger.exception(fetch)
raise gen.Return(handle_error(e))
try:
response = yield gen.maybe_future(self.http_client.fetch(request))
except tornado.httpclient.HTTPError as e:
if e.response:
response = e.response
else:
raise gen.Return(handle_error(e))
extract_cookies_to_jar(session, response.request, response.headers)
if (response.code in (301, 302, 303, 307)
and response.headers.get('Location')
and task_fetch.get('allow_redirects', True)):
if max_redirects <= 0:
error = tornado.httpclient.HTTPError(
599, 'Maximum (%d) redirects followed' % task_fetch.get('max_redirects', 5),
response)
raise gen.Return(handle_error(error))
if response.code in (302, 303):
fetch['method'] = 'GET'
if 'body' in fetch:
del fetch['body']
fetch['url'] = quote_chinese(urljoin(fetch['url'], response.headers['Location']))
fetch['request_timeout'] -= time.time() - start_time
if fetch['request_timeout'] < 0:
fetch['request_timeout'] = 0.1
max_redirects -= 1
continue
result = {}
result['orig_url'] = url
result['content'] = response.body or ''
result['headers'] = dict(response.headers)
result['status_code'] = response.code
result['url'] = response.effective_url or url
result['time'] = time.time() - start_time
result['cookies'] = session.get_dict()
result['save'] = task_fetch.get('save')
if response.error:
result['error'] = utils.text(response.error)
if 200 <= response.code < 300:
logger.info("[%d] %s:%s %s %.2fs", response.code,
task.get('project'), task.get('taskid'),
url, result['time'])
else:
logger.warning("[%d] %s:%s %s %.2fs", response.code,
task.get('project'), task.get('taskid'),
url, result['time'])
raise gen.Return(result)
@gen.coroutine
def phantomjs_fetch(self, url, task):
'''Fetch with phantomjs proxy'''
start_time = time.time()
self.on_fetch('phantomjs', task)
handle_error = lambda x: self.handle_error('phantomjs', url, task, start_time, x)
# check phantomjs proxy is enabled
if not self.phantomjs_proxy:
result = {
"orig_url": url,
"content": "phantomjs is not enabled.",
"headers": {},
"status_code": 501,
"url": url,
"time": time.time() - start_time,
"cookies": {},
"save": task.get('fetch', {}).get('save')
}
logger.warning("[501] %s:%s %s 0s", task.get('project'), task.get('taskid'), url)
raise gen.Return(result)
# setup request parameters
fetch = self.pack_tornado_request_parameters(url, task)
task_fetch = task.get('fetch', {})
for each in task_fetch:
if each not in fetch:
fetch[each] = task_fetch[each]
# robots.txt
if task_fetch.get('robots_txt', False):
user_agent = fetch['headers']['User-Agent']
can_fetch = yield self.can_fetch(user_agent, url)
if not can_fetch:
error = tornado.httpclient.HTTPError(403, 'Disallowed by robots.txt')
raise gen.Return(handle_error(error))
request_conf = {
'follow_redirects': False
}
request_conf['connect_timeout'] = fetch.get('connect_timeout', 20)
request_conf['request_timeout'] = fetch.get('request_timeout', 120) + 1
session = cookies.RequestsCookieJar()
if 'Cookie' in fetch['headers']:
c = http_cookies.SimpleCookie()
try:
c.load(fetch['headers']['Cookie'])
except AttributeError:
c.load(utils.utf8(fetch['headers']['Cookie']))
for key in c:
session.set(key, c[key])
del fetch['headers']['Cookie']
if 'cookies' in fetch:
session.update(fetch['cookies'])
del fetch['cookies']
request = tornado.httpclient.HTTPRequest(url=fetch['url'])
cookie_header = cookies.get_cookie_header(session, request)
if cookie_header:
fetch['headers']['Cookie'] = cookie_header
# making requests
fetch['headers'] = dict(fetch['headers'])
try:
request = tornado.httpclient.HTTPRequest(
url=self.phantomjs_proxy, method="POST",
body=json.dumps(fetch), **request_conf)
except Exception as e:
raise gen.Return(handle_error(e))
try:
response = yield gen.maybe_future(self.http_client.fetch(request))
except tornado.httpclient.HTTPError as e:
if e.response:
response = e.response
else:
raise gen.Return(handle_error(e))
if not response.body:
raise gen.Return(handle_error(Exception('no response from phantomjs: %r' % response)))
result = {}
try:
result = json.loads(utils.text(response.body))
assert 'status_code' in result, result
except Exception as e:
if response.error:
result['error'] = utils.text(response.error)
raise gen.Return(handle_error(e))
if result.get('status_code', 200):
logger.info("[%d] %s:%s %s %.2fs", result['status_code'],
task.get('project'), task.get('taskid'), url, result['time'])
else:
logger.error("[%d] %s:%s %s, %r %.2fs", result['status_code'],
task.get('project'), task.get('taskid'),
url, result['content'], result['time'])
raise gen.Return(result)
@gen.coroutine
def splash_fetch(self, url, task):
'''Fetch with splash'''
start_time = time.time()
self.on_fetch('splash', task)
handle_error = lambda x: self.handle_error('splash', url, task, start_time, x)
# check phantomjs proxy is enabled
if not self.splash_endpoint:
result = {
"orig_url": url,
"content": "splash is not enabled.",
"headers": {},
"status_code": 501,
"url": url,
"time": time.time() - start_time,
"cookies": {},
"save": task.get('fetch', {}).get('save')
}
logger.warning("[501] %s:%s %s 0s", task.get('project'), task.get('taskid'), url)
raise gen.Return(result)
# setup request parameters
fetch = self.pack_tornado_request_parameters(url, task)
task_fetch = task.get('fetch', {})
for each in task_fetch:
if each not in fetch:
fetch[each] = task_fetch[each]
# robots.txt
if task_fetch.get('robots_txt', False):
user_agent = fetch['headers']['User-Agent']
can_fetch = yield self.can_fetch(user_agent, url)
if not can_fetch:
error = tornado.httpclient.HTTPError(403, 'Disallowed by robots.txt')
raise gen.Return(handle_error(error))
request_conf = {
'follow_redirects': False,
'headers': {
'Content-Type': 'application/json',
}
}
request_conf['connect_timeout'] = fetch.get('connect_timeout', 20)
request_conf['request_timeout'] = fetch.get('request_timeout', 120) + 1
session = cookies.RequestsCookieJar()
if 'Cookie' in fetch['headers']:
c = http_cookies.SimpleCookie()
try:
c.load(fetch['headers']['Cookie'])
except AttributeError:
c.load(utils.utf8(fetch['headers']['Cookie']))
for key in c:
session.set(key, c[key])
del fetch['headers']['Cookie']
if 'cookies' in fetch:
session.update(fetch['cookies'])
del fetch['cookies']
request = tornado.httpclient.HTTPRequest(url=fetch['url'])
cookie_header = cookies.get_cookie_header(session, request)
if cookie_header:
fetch['headers']['Cookie'] = cookie_header
# making requests
fetch['lua_source'] = self.splash_lua_source
fetch['headers'] = dict(fetch['headers'])
try:
request = tornado.httpclient.HTTPRequest(
url=self.splash_endpoint, method="POST",
body=json.dumps(fetch), **request_conf)
except Exception as e:
raise gen.Return(handle_error(e))
try:
response = yield gen.maybe_future(self.http_client.fetch(request))
except tornado.httpclient.HTTPError as e:
if e.response:
response = e.response
else:
raise gen.Return(handle_error(e))
if not response.body:
raise gen.Return(handle_error(Exception('no response from phantomjs')))
result = {}
try:
result = json.loads(utils.text(response.body))
assert 'status_code' in result, result
except ValueError as e:
logger.error("result is not json: %r", response.body[:500])
raise gen.Return(handle_error(e))
except Exception as e:
if response.error:
result['error'] = utils.text(response.error)
raise gen.Return(handle_error(e))
if result.get('status_code', 200):
logger.info("[%d] %s:%s %s %.2fs", result['status_code'],
task.get('project'), task.get('taskid'), url, result['time'])
else:
logger.error("[%d] %s:%s %s, %r %.2fs", result['status_code'],
task.get('project'), task.get('taskid'),
url, result['content'], result['time'])
raise gen.Return(result)
@gen.coroutine
def puppeteer_fetch(self, url, task):
'''Fetch with puppeteer proxy'''
start_time = time.time()
self.on_fetch('puppeteer', task)
handle_error = lambda x: self.handle_error('puppeteer', url, task, start_time, x)
# check puppeteer proxy is enabled
if not self.puppeteer_proxy:
result = {
"orig_url": url,
"content": "puppeteer is not enabled.",
"headers": {},
"status_code": 501,
"url": url,
"time": time.time() - start_time,
"cookies": {},
"save": task.get('fetch', {}).get('save')
}
logger.warning("[501] %s:%s %s 0s", task.get('project'), task.get('taskid'), url)
raise gen.Return(result)
# setup request parameters
fetch = self.pack_tornado_request_parameters(url, task)
task_fetch = task.get('fetch', {})
for each in task_fetch:
if each not in fetch:
fetch[each] = task_fetch[each]
# robots.txt
if task_fetch.get('robots_txt', False):
user_agent = fetch['headers']['User-Agent']
can_fetch = yield self.can_fetch(user_agent, url)
if not can_fetch:
error = tornado.httpclient.HTTPError(403, 'Disallowed by robots.txt')
raise gen.Return(handle_error(error))
request_conf = {
'follow_redirects': False
}
request_conf['connect_timeout'] = fetch.get('connect_timeout', 20)
request_conf['request_timeout'] = fetch.get('request_timeout', 120) + 1
session = cookies.RequestsCookieJar()
if 'Cookie' in fetch['headers']:
c = http_cookies.SimpleCookie()
try:
c.load(fetch['headers']['Cookie'])
except AttributeError:
c.load(utils.utf8(fetch['headers']['Cookie']))
for key in c:
session.set(key, c[key])
del fetch['headers']['Cookie']
if 'cookies' in fetch:
session.update(fetch['cookies'])
del fetch['cookies']
request = tornado.httpclient.HTTPRequest(url=fetch['url'])
cookie_header = cookies.get_cookie_header(session, request)
if cookie_header:
fetch['headers']['Cookie'] = cookie_header
logger.info("%s", self.puppeteer_proxy)
# making requests
fetch['headers'] = dict(fetch['headers'])
headers = {}
headers['Content-Type'] = 'application/json; charset=UTF-8'
try:
request = tornado.httpclient.HTTPRequest(
url=self.puppeteer_proxy, method="POST", headers=headers,
body=json.dumps(fetch), **request_conf)
except Exception as e:
raise gen.Return(handle_error(e))
try:
response = yield gen.maybe_future(self.http_client.fetch(request))
except tornado.httpclient.HTTPError as e:
if e.response:
response = e.response
else:
raise gen.Return(handle_error(e))
if not response.body:
raise gen.Return(handle_error(Exception('no response from puppeteer: %r' % response)))
result = {}
try:
result = json.loads(utils.text(response.body))
assert 'status_code' in result, result
except Exception as e:
if response.error:
result['error'] = utils.text(response.error)
raise gen.Return(handle_error(e))
if result.get('status_code', 200):
logger.info("[%d] %s:%s %s %.2fs", result['status_code'],
task.get('project'), task.get('taskid'), url, result['time'])
else:
logger.error("[%d] %s:%s %s, %r %.2fs", result['status_code'],
task.get('project'), task.get('taskid'),
url, result['content'], result['time'])
raise gen.Return(result)
def run(self):
'''Run loop'''
logger.info("fetcher starting...")
def queue_loop():
if not self.outqueue or not self.inqueue:
return
while not self._quit:
try:
if self.outqueue.full():
break
if self.http_client.free_size() <= 0:
break
task = self.inqueue.get_nowait()
# FIXME: decode unicode_obj should used after data selete from
# database, it's used here for performance
task = utils.decode_unicode_obj(task)
self.fetch(task)
except queue.Empty:
break
except KeyboardInterrupt:
break
except Exception as e:
logger.exception(e)
break
tornado.ioloop.PeriodicCallback(queue_loop, 100, io_loop=self.ioloop).start()
tornado.ioloop.PeriodicCallback(self.clear_robot_txt_cache, 10000, io_loop=self.ioloop).start()
self._running = True
try:
self.ioloop.start()
except KeyboardInterrupt:
pass
logger.info("fetcher exiting...")
def quit(self):
'''Quit fetcher'''
self._running = False
self._quit = True
self.ioloop.add_callback(self.ioloop.stop)
if hasattr(self, 'xmlrpc_server'):
self.xmlrpc_ioloop.add_callback(self.xmlrpc_server.stop)
self.xmlrpc_ioloop.add_callback(self.xmlrpc_ioloop.stop)
def size(self):
return self.http_client.size()
def xmlrpc_run(self, port=24444, bind='127.0.0.1', logRequests=False):
'''Run xmlrpc server'''
import umsgpack
from pyspider.libs.wsgi_xmlrpc import WSGIXMLRPCApplication
try:
from xmlrpc.client import Binary
except ImportError:
from xmlrpclib import Binary
application = WSGIXMLRPCApplication()
application.register_function(self.quit, '_quit')
application.register_function(self.size)
def sync_fetch(task):
result = self.sync_fetch(task)
result = Binary(umsgpack.packb(result))
return result
application.register_function(sync_fetch, 'fetch')
def dump_counter(_time, _type):
return self._cnt[_time].to_dict(_type)
application.register_function(dump_counter, 'counter')
import tornado.wsgi
import tornado.ioloop
import tornado.httpserver
container = tornado.wsgi.WSGIContainer(application)
self.xmlrpc_ioloop = tornado.ioloop.IOLoop()
self.xmlrpc_server = tornado.httpserver.HTTPServer(container, io_loop=self.xmlrpc_ioloop)
self.xmlrpc_server.listen(port=port, address=bind)
logger.info('fetcher.xmlrpc listening on %s:%s', bind, port)
self.xmlrpc_ioloop.start()
def on_fetch(self, type, task):
'''Called before task fetch'''
logger.info('on fetch %s:%s', type, task)
def on_result(self, type, task, result):
'''Called after task fetched'''
status_code = result.get('status_code', 599)
if status_code != 599:
status_code = (int(status_code) / 100 * 100)
self._cnt['5m'].event((task.get('project'), status_code), +1)
self._cnt['1h'].event((task.get('project'), status_code), +1)
if type in ('http', 'phantomjs') and result.get('time'):
content_len = len(result.get('content', ''))
self._cnt['5m'].event((task.get('project'), 'speed'),
float(content_len) / result.get('time'))
self._cnt['1h'].event((task.get('project'), 'speed'),
float(content_len) / result.get('time'))
self._cnt['5m'].event((task.get('project'), 'time'), result.get('time'))
self._cnt['1h'].event((task.get('project'), 'time'), result.get('time'))
|
wangjun/pyspider
|
pyspider/fetcher/tornado_fetcher.py
|
Python
|
apache-2.0
| 32,232
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from openstackclient.identity.v3 import consumer
from openstackclient.tests.unit import fakes
from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes
class TestOAuth1(identity_fakes.TestOAuth1):
def setUp(self):
super(TestOAuth1, self).setUp()
identity_client = self.app.client_manager.identity
self.consumers_mock = identity_client.oauth1.consumers
self.consumers_mock.reset_mock()
class TestConsumerCreate(TestOAuth1):
def setUp(self):
super(TestConsumerCreate, self).setUp()
self.consumers_mock.create.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.OAUTH_CONSUMER),
loaded=True,
)
self.cmd = consumer.CreateConsumer(self.app, None)
def test_create_consumer(self):
arglist = [
'--description', identity_fakes.consumer_description,
]
verifylist = [
('description', identity_fakes.consumer_description),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.consumers_mock.create.assert_called_with(
identity_fakes.consumer_description,
)
collist = ('description', 'id', 'secret')
self.assertEqual(collist, columns)
datalist = (
identity_fakes.consumer_description,
identity_fakes.consumer_id,
identity_fakes.consumer_secret,
)
self.assertEqual(datalist, data)
class TestConsumerDelete(TestOAuth1):
def setUp(self):
super(TestConsumerDelete, self).setUp()
# This is the return value for utils.find_resource()
self.consumers_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.OAUTH_CONSUMER),
loaded=True,
)
self.consumers_mock.delete.return_value = None
self.cmd = consumer.DeleteConsumer(self.app, None)
def test_delete_consumer(self):
arglist = [
identity_fakes.consumer_id,
]
verifylist = [
('consumer', [identity_fakes.consumer_id]),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.consumers_mock.delete.assert_called_with(
identity_fakes.consumer_id,
)
self.assertIsNone(result)
class TestConsumerList(TestOAuth1):
def setUp(self):
super(TestConsumerList, self).setUp()
self.consumers_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.OAUTH_CONSUMER),
loaded=True,
)
self.consumers_mock.list.return_value = [
fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.OAUTH_CONSUMER),
loaded=True,
),
]
# Get the command object to test
self.cmd = consumer.ListConsumer(self.app, None)
def test_consumer_list(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class Lister in cliff, abstract method take_action()
# returns a tuple containing the column names and an iterable
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
self.consumers_mock.list.assert_called_with()
collist = ('ID', 'Description')
self.assertEqual(collist, columns)
datalist = ((
identity_fakes.consumer_id,
identity_fakes.consumer_description,
), )
self.assertEqual(datalist, tuple(data))
class TestConsumerSet(TestOAuth1):
def setUp(self):
super(TestConsumerSet, self).setUp()
self.consumers_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.OAUTH_CONSUMER),
loaded=True,
)
consumer_updated = copy.deepcopy(identity_fakes.OAUTH_CONSUMER)
consumer_updated['description'] = "consumer new description"
self.consumers_mock.update.return_value = fakes.FakeResource(
None,
consumer_updated,
loaded=True,
)
self.cmd = consumer.SetConsumer(self.app, None)
def test_consumer_update(self):
new_description = "consumer new description"
arglist = [
'--description', new_description,
identity_fakes.consumer_id,
]
verifylist = [
('description', new_description),
('consumer', identity_fakes.consumer_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
kwargs = {'description': new_description}
self.consumers_mock.update.assert_called_with(
identity_fakes.consumer_id,
**kwargs
)
self.assertIsNone(result)
class TestConsumerShow(TestOAuth1):
def setUp(self):
super(TestConsumerShow, self).setUp()
consumer_no_secret = copy.deepcopy(identity_fakes.OAUTH_CONSUMER)
del consumer_no_secret['secret']
self.consumers_mock.get.return_value = fakes.FakeResource(
None,
consumer_no_secret,
loaded=True,
)
# Get the command object to test
self.cmd = consumer.ShowConsumer(self.app, None)
def test_consumer_show(self):
arglist = [
identity_fakes.consumer_id,
]
verifylist = [
('consumer', identity_fakes.consumer_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.consumers_mock.get.assert_called_with(
identity_fakes.consumer_id,
)
collist = ('description', 'id')
self.assertEqual(collist, columns)
datalist = (
identity_fakes.consumer_description,
identity_fakes.consumer_id,
)
self.assertEqual(datalist, data)
|
openstack/python-openstackclient
|
openstackclient/tests/unit/identity/v3/test_consumer.py
|
Python
|
apache-2.0
| 6,837
|
#!/usr/bin/env python
from os.path import join, dirname
from cloudify import ctx
ctx.download_resource(
join('components', 'utils.py'),
join(dirname(__file__), 'utils.py'))
import utils # NOQA
PS_SERVICE_NAME = 'postgresql-9.5'
ctx_properties = utils.CtxPropertyFactory().get(PS_SERVICE_NAME)
utils.systemd.stop(PS_SERVICE_NAME, append_prefix=False)
|
Cloudify-PS/cloudify-manager-blueprints
|
components/postgresql/scripts/stop.py
|
Python
|
apache-2.0
| 364
|
"""Auto-generated file, do not edit by hand. AR metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_AR = PhoneMetadata(id='AR', country_code=54, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='11\\d{8}|[2368]\\d{9}|9\\d{10}', possible_number_pattern='\\d{6,11}'),
fixed_line=PhoneNumberDesc(national_number_pattern='11\\d{8}|(?:2(?:2(?:[013]\\d|2[13-79]|4[1-6]|5[2457]|6[124-8]|7[1-4]|8[13-6]|9[1267])|3(?:1[467]|2[03-6]|3[13-8]|[49][2-6]|5[2-8]|[067]\\d)|4(?:7[3-8]|9\\d)|6(?:[01346]\\d|2[24-6]|5[15-8])|80\\d|9(?:[0124789]\\d|3[1-6]|5[234]|6[2-46]))|3(?:3(?:2[79]|6\\d|8[2578])|4(?:[78]\\d|0[0124-9]|[1-35]\\d|4[24-7]|6[02-9]|9[123678])|5(?:[138]\\d|2[1245]|4[1-9]|6[2-4]|7[1-6])|6[24]\\d|7(?:[0469]\\d|1[1568]|2[013-9]|3[145]|5[14-8]|7[2-57]|8[0-24-9])|8(?:[013578]\\d|2[15-7]|4[13-6]|6[1-357-9]|9[124]))|670\\d)\\d{6}', possible_number_pattern='\\d{6,10}', example_number='1123456789'),
mobile=PhoneNumberDesc(national_number_pattern='675\\d{7}|9(?:11[2-9]\\d{7}|(?:2(?:2[013]|3[067]|49|6[01346]|80|9[147-9])|3(?:36|4[12358]|5[138]|6[24]|7[069]|8[013578]))[2-9]\\d{6}|\\d{4}[2-9]\\d{5})', possible_number_pattern='\\d{6,11}', example_number='91123456789'),
toll_free=PhoneNumberDesc(national_number_pattern='800\\d{7}', possible_number_pattern='\\d{10}', example_number='8001234567'),
premium_rate=PhoneNumberDesc(national_number_pattern='60[04579]\\d{7}', possible_number_pattern='\\d{10}', example_number='6001234567'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='810\\d{7}', possible_number_pattern='\\d{10}', example_number='8101234567'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='810\\d{7}', possible_number_pattern='\\d{10}', example_number='8101234567'),
national_prefix='0',
national_prefix_for_parsing='0?(?:(11|2(?:2(?:02?|[13]|2[13-79]|4[1-6]|5[2457]|6[124-8]|7[1-4]|8[13-6]|9[1267])|3(?:02?|1[467]|2[03-6]|3[13-8]|[49][2-6]|5[2-8]|[67])|4(?:7[3-578]|9)|6(?:[0136]|2[24-6]|4[6-8]?|5[15-8])|80|9(?:0[1-3]|[19]|2\\d|3[1-6]|4[02568]?|5[2-4]|6[2-46]|72?|8[23]?))|3(?:3(?:2[79]|6|8[2578])|4(?:0[0-24-9]|[12]|3[5-8]?|4[24-7]|5[4-68]?|6[02-9]|7[126]|8[2379]?|9[1-36-8])|5(?:1|2[1245]|3[237]?|4[1-46-9]|6[2-4]|7[1-6]|8[2-5]?)|6[24]|7(?:[069]|1[1568]|2[15]|3[145]|4[13]|5[14-8]|7[2-57]|8[126])|8(?:[01]|2[15-7]|3[2578]?|4[13-6]|5[4-8]?|6[1-357-9]|7[36-8]?|8[5-8]?|9[124])))?15)?',
national_prefix_transform_rule='9\\1',
number_format=[NumberFormat(pattern='([68]\\d{2})(\\d{3})(\\d{4})', format='\\1-\\2-\\3', leading_digits_pattern=['[68]'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{2})(\\d{4})', format='\\1-\\2', leading_digits_pattern=['[2-9]'], national_prefix_formatting_rule='\\1'),
NumberFormat(pattern='(\\d{3})(\\d{4})', format='\\1-\\2', leading_digits_pattern=['[2-9]'], national_prefix_formatting_rule='\\1'),
NumberFormat(pattern='(\\d{4})(\\d{4})', format='\\1-\\2', leading_digits_pattern=['[2-9]'], national_prefix_formatting_rule='\\1'),
NumberFormat(pattern='(9)(11)(\\d{4})(\\d{4})', format='\\2 15-\\3-\\4', leading_digits_pattern=['911'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(9)(\\d{3})(\\d{3})(\\d{4})', format='\\2 15-\\3-\\4', leading_digits_pattern=['9(?:2[234689]|3[3-8])', '9(?:2(?:2[013]|3[067]|49|6[01346]|80|9[147-9])|3(?:36|4[1-358]|5[138]|6[24]|7[069]|8[013578]))', '9(?:2(?:2(?:0[013-9]|[13])|3(?:0[013-9]|[67])|49|6(?:[0136]|4[0-59])|8|9(?:[19]|44|7[013-9]|8[14]))|3(?:36|4(?:[12]|[358]4)|5(?:1|3[0-24-689]|8[46])|6|7[069]|8(?:[01]|34|[578][45])))'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(9)(\\d{4})(\\d{2})(\\d{4})', format='\\2 15-\\3-\\4', leading_digits_pattern=['9[23]'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(11)(\\d{4})(\\d{4})', format='\\1 \\2-\\3', leading_digits_pattern=['1'], national_prefix_formatting_rule='0\\1', national_prefix_optional_when_formatting=True),
NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{4})', format='\\1 \\2-\\3', leading_digits_pattern=['2(?:2[013]|3[067]|49|6[01346]|80|9[147-9])|3(?:36|4[1-358]|5[138]|6[24]|7[069]|8[013578])', '2(?:2(?:0[013-9]|[13])|3(?:0[013-9]|[67])|49|6(?:[0136]|4[0-59])|8|9(?:[19]|44|7[013-9]|8[14]))|3(?:36|4(?:[12]|[358]4)|5(?:1|3[0-24-689]|8[46])|6|7[069]|8(?:[01]|34|[578][45]))'], national_prefix_formatting_rule='0\\1', national_prefix_optional_when_formatting=True),
NumberFormat(pattern='(\\d{4})(\\d{2})(\\d{4})', format='\\1 \\2-\\3', leading_digits_pattern=['[23]'], national_prefix_formatting_rule='0\\1', national_prefix_optional_when_formatting=True),
NumberFormat(pattern='(\\d{3})', format='\\1', leading_digits_pattern=['1[012]|911'], national_prefix_formatting_rule='\\1')],
intl_number_format=[NumberFormat(pattern='([68]\\d{2})(\\d{3})(\\d{4})', format='\\1-\\2-\\3', leading_digits_pattern=['[68]']),
NumberFormat(pattern='(9)(11)(\\d{4})(\\d{4})', format='\\1 \\2 \\3-\\4', leading_digits_pattern=['911']),
NumberFormat(pattern='(9)(\\d{3})(\\d{3})(\\d{4})', format='\\1 \\2 \\3-\\4', leading_digits_pattern=['9(?:2[234689]|3[3-8])', '9(?:2(?:2[013]|3[067]|49|6[01346]|80|9[147-9])|3(?:36|4[1-358]|5[138]|6[24]|7[069]|8[013578]))', '9(?:2(?:2(?:0[013-9]|[13])|3(?:0[013-9]|[67])|49|6(?:[0136]|4[0-59])|8|9(?:[19]|44|7[013-9]|8[14]))|3(?:36|4(?:[12]|[358]4)|5(?:1|3[0-24-689]|8[46])|6|7[069]|8(?:[01]|34|[578][45])))']),
NumberFormat(pattern='(9)(\\d{4})(\\d{2})(\\d{4})', format='\\1 \\2 \\3-\\4', leading_digits_pattern=['9[23]']),
NumberFormat(pattern='(11)(\\d{4})(\\d{4})', format='\\1 \\2-\\3', leading_digits_pattern=['1']),
NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{4})', format='\\1 \\2-\\3', leading_digits_pattern=['2(?:2[013]|3[067]|49|6[01346]|80|9[147-9])|3(?:36|4[1-358]|5[138]|6[24]|7[069]|8[013578])', '2(?:2(?:0[013-9]|[13])|3(?:0[013-9]|[67])|49|6(?:[0136]|4[0-59])|8|9(?:[19]|44|7[013-9]|8[14]))|3(?:36|4(?:[12]|[358]4)|5(?:1|3[0-24-689]|8[46])|6|7[069]|8(?:[01]|34|[578][45]))']),
NumberFormat(pattern='(\\d{4})(\\d{2})(\\d{4})', format='\\1 \\2-\\3', leading_digits_pattern=['[23]'])],
mobile_number_portable_region=True)
|
shikigit/python-phonenumbers
|
python/phonenumbers/data/region_AR.py
|
Python
|
apache-2.0
| 6,716
|
import copy
import mock
import unittest
from see import Hook
from see import hooks
CONFIG = {'configuration': {'key': 'value'},
'hooks':
[{'name': 'see.test.hooks_manager_test.TestHook',
'configuration': {'foo': 'bar'}},
{'name': 'see.test.hooks_manager_test.TestHookCleanup'}]}
class TestHook(Hook):
def __init__(self, parameters):
super(TestHook, self).__init__(parameters)
self.cleaned = False
class TestHookCleanup(Hook):
def __init__(self, parameters):
super(TestHookCleanup, self).__init__(parameters)
self.cleaned = False
def cleanup(self):
self.cleaned = True
class HookManagerLoadTest(unittest.TestCase):
def setUp(self):
self.hook_manager = hooks.HookManager('foo', copy.deepcopy(CONFIG))
def test_load_hooks(self):
"""TestHook is loaded into HookManager."""
context = mock.MagicMock()
self.hook_manager.load_hooks(context)
self.assertEqual(self.hook_manager.hooks[0].__class__.__name__,
'TestHook')
def test_load_hooks_configuration(self):
"""Generic configuration are available in TestHook."""
context = mock.MagicMock()
self.hook_manager.load_hooks(context)
self.assertTrue('key' in self.hook_manager.hooks[0].configuration)
def test_load_hooks_specific_configuration(self):
"""Specific configuration are available in TestHook."""
context = mock.MagicMock()
self.hook_manager.load_hooks(context)
self.assertTrue('foo' in self.hook_manager.hooks[0].configuration)
def test_load_non_existing_hook(self):
"""Wrong Hooks are not loaded."""
context = mock.MagicMock()
config = copy.deepcopy(CONFIG)
config['hooks'][0]['name'] = 'foo'
config['hooks'][1]['name'] = 'bar'
hm = hooks.HookManager('foo', config)
hm.load_hooks(context)
self.assertEqual(len(hm.hooks), 0)
def test_load_missing_name(self):
"""Wrong Hooks are not loaded."""
context = mock.MagicMock()
config = copy.deepcopy(CONFIG)
del config['hooks'][0]['name']
hm = hooks.HookManager('foo', config)
hm.load_hooks(context)
self.assertEqual(len(hm.hooks), 1)
class HooksManagerCleanupTest(unittest.TestCase):
def setUp(self):
self.hook_manager = hooks.HookManager('foo', copy.deepcopy(CONFIG))
def test_cleanup(self):
"""Cleanup is performed if specified."""
context = mock.MagicMock()
self.hook_manager.load_hooks(context)
hook = self.hook_manager.hooks[1]
self.hook_manager.cleanup()
self.assertTrue(hook.cleaned)
def test_no_cleanup(self):
"""Cleanup is not performed if not specified."""
context = mock.MagicMock()
self.hook_manager.load_hooks(context)
hook = self.hook_manager.hooks[0]
self.hook_manager.cleanup()
self.assertFalse(hook.cleaned)
|
F-Secure/see
|
see/test/hooks_manager_test.py
|
Python
|
apache-2.0
| 3,020
|
# Copyright 2015-2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import pytest
def get_file_contents(file_path):
file = open(file_path)
file_contents = file.read()
file.close()
return file_contents
def cleanup_stack_if_exists(heat_client, template_name):
stacks = heat_client.stacks.list()
for stack in stacks:
if stack.stack_name == template_name:
heat_client.delete_stack(stack.id)
@pytest.fixture
def HeatStack(heatclientmanager, request):
'''Fixture for creating/deleting a heat stack.'''
def manage_stack(
template_file,
stack_name,
parameters={},
teardown=True,
expect_fail=False
):
def test_teardown():
heatclientmanager.delete_stack(stack.id)
template = get_file_contents(template_file)
config = {}
config['stack_name'] = stack_name
config['template'] = template
config['parameters'] = parameters
# Call delete before create, in case previous teardown failed
cleanup_stack_if_exists(heatclientmanager, stack_name)
target_status = 'CREATE_COMPLETE'
if expect_fail:
target_status = 'CREATE_FAILED'
stack = heatclientmanager.create_stack(
config,
target_status=target_status
)
if teardown:
request.addfinalizer(test_teardown)
return heatclientmanager, stack
return manage_stack
|
pjbreaux/f5-openstack-test
|
f5_os_test/heat_client_utils.py
|
Python
|
apache-2.0
| 2,006
|
#
# Copyright 2013 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ceilometer/alarm/partition/coordination.py
"""
import datetime
import logging
import uuid
import mock
from oslo_config import fixture as fixture_config
from oslo_utils import timeutils
from six import moves
from ceilometer.alarm.partition import coordination
from ceilometer.alarm.storage import models
from ceilometer.tests import base as tests_base
from ceilometer.tests import constants
class MockLoggingHandler(logging.Handler):
"""Mock logging handler to check for expected logs."""
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': []}
class TestCoordinate(tests_base.BaseTestCase):
def setUp(self):
super(TestCoordinate, self).setUp()
self.CONF = self.useFixture(fixture_config.Config()).conf
self.setup_messaging(self.CONF)
self.test_interval = 120
self.CONF.import_opt('evaluation_interval',
'ceilometer.alarm.service',
group='alarm')
self.CONF.set_override('evaluation_interval',
self.test_interval,
group='alarm')
self.api_client = mock.Mock()
self.override_start = datetime.datetime(2012, 7, 2, 10, 45)
patcher = mock.patch.object(timeutils, 'utcnow')
self.addCleanup(patcher.stop)
self.mock_utcnow = patcher.start()
self.mock_utcnow.return_value = self.override_start
self.partition_coordinator = coordination.PartitionCoordinator()
self.partition_coordinator.coordination_rpc = mock.Mock()
# add extra logger to check exception conditions and logged content
self.str_handler = MockLoggingHandler()
coordination.LOG.logger.addHandler(self.str_handler)
def tearDown(self):
super(TestCoordinate, self).tearDown()
# clean up the logger
coordination.LOG.logger.removeHandler(self.str_handler)
self.str_handler.close()
def _no_alarms(self):
self.api_client.alarms.list.return_value = []
def _some_alarms(self, count):
alarm_ids = [str(uuid.uuid4()) for _ in moves.xrange(count)]
alarms = [self._make_alarm(aid) for aid in alarm_ids]
self.api_client.alarms.list.return_value = alarms
return alarm_ids
def _current_alarms(self):
return self.api_client.alarms.list.return_value
def _dump_alarms(self, shave):
alarms = self.api_client.alarms.list.return_value
alarms = alarms[:shave]
alarm_ids = [a.alarm_id for a in alarms]
self.api_client.alarms.list.return_value = alarms
return alarm_ids
def _add_alarms(self, boost):
new_alarm_ids = [str(uuid.uuid4()) for _ in moves.xrange(boost)]
alarms = self.api_client.alarms.list.return_value
for aid in new_alarm_ids:
alarms.append(self._make_alarm(aid))
self.api_client.alarms.list.return_value = alarms
return new_alarm_ids
@staticmethod
def _make_alarm(uuid):
return models.Alarm(name='instance_running_hot',
type='threshold',
user_id='foobar',
project_id='snafu',
enabled=True,
description='',
repeat_actions=False,
state='insufficient data',
state_timestamp=constants.MIN_DATETIME,
timestamp=constants.MIN_DATETIME,
ok_actions=[],
alarm_actions=[],
insufficient_data_actions=[],
alarm_id=uuid,
severity='critical',
time_constraints=[],
rule=dict(
statistic='avg',
comparison_operator='gt',
threshold=80.0,
evaluation_periods=5,
period=60,
query=[],
))
def _advance_time(self, factor):
delta = datetime.timedelta(seconds=self.test_interval * factor)
self.mock_utcnow.return_value = timeutils.utcnow() + delta
def _younger_by(self, offset):
return self.partition_coordinator.this.priority + offset
def _older_by(self, offset):
return self.partition_coordinator.this.priority - offset
def _check_mastership(self, expected):
self.partition_coordinator.check_mastership(self.test_interval,
self.api_client)
self.assertEqual(expected, self.partition_coordinator.is_master)
def _new_partition(self, offset):
younger = self._younger_by(offset)
pid = uuid.uuid4()
self.partition_coordinator.presence(pid, younger)
return pid, younger
def _check_assignments(self, others, alarm_ids, per_worker,
expect_uneffected=None):
rpc = self.partition_coordinator.coordination_rpc
calls = rpc.assign.call_args_list
return self._check_distribution(others, alarm_ids, per_worker, calls,
expect_uneffected or [])
def _check_allocation(self, others, alarm_ids, per_worker):
rpc = self.partition_coordinator.coordination_rpc
calls = rpc.allocate.call_args_list
return self._check_distribution(others, alarm_ids, per_worker, calls)
def _check_distribution(self, others, alarm_ids, per_worker, calls,
expect_uneffected=None):
expect_uneffected = expect_uneffected or []
uneffected = [pid for pid, _ in others]
uneffected.extend(expect_uneffected)
remainder = list(alarm_ids)
for call in calls:
args, _ = call
target, alarms = args
self.assertIn(target, uneffected)
uneffected.remove(target)
self.assertEqual(per_worker, len(alarms))
for aid in alarms:
self.assertIn(aid, remainder)
remainder.remove(aid)
self.assertEqual(set(expect_uneffected), set(uneffected))
return remainder
def _forget_assignments(self, expected_assignments):
rpc = self.partition_coordinator.coordination_rpc
self.assertEqual(expected_assignments, len(rpc.assign.call_args_list))
rpc.reset_mock()
def test_mastership_not_assumed_during_warmup(self):
self._no_alarms()
for _ in moves.xrange(7):
# still warming up
self._advance_time(0.25)
self._check_mastership(False)
# now warmed up
self._advance_time(0.25)
self._check_mastership(True)
def test_uncontested_mastership_assumed(self):
self._no_alarms()
self._advance_time(3)
self._check_mastership(True)
def test_contested_mastership_assumed(self):
self._no_alarms()
self._advance_time(3)
for offset in moves.xrange(1, 5):
younger = self._younger_by(offset)
self.partition_coordinator.presence(uuid.uuid4(), younger)
self._check_mastership(True)
def test_bested_mastership_relinquished(self):
self._no_alarms()
self._advance_time(3)
self._check_mastership(True)
older = self._older_by(1)
self.partition_coordinator.presence(uuid.uuid4(), older)
self._check_mastership(False)
def _do_test_tie_broken_mastership(self, seed, expect_mastership):
self._no_alarms()
self.partition_coordinator.this.uuid = uuid.UUID(int=1)
self._advance_time(3)
self._check_mastership(True)
tied = self.partition_coordinator.this.priority
self.partition_coordinator.presence(uuid.UUID(int=seed), tied)
self._check_mastership(expect_mastership)
def test_tie_broken_mastership_assumed(self):
self._do_test_tie_broken_mastership(2, True)
def test_tie_broken_mastership_relinquished(self):
self._do_test_tie_broken_mastership(0, False)
def test_fair_distribution(self):
alarm_ids = self._some_alarms(49)
self._advance_time(3)
others = [self._new_partition(i) for i in moves.xrange(1, 5)]
self._check_mastership(True)
remainder = self._check_assignments(others, alarm_ids, 10)
self.assertEqual(set(self.partition_coordinator.assignment),
set(remainder))
def test_rebalance_on_partition_startup(self):
alarm_ids = self._some_alarms(49)
self._advance_time(3)
others = [self._new_partition(i) for i in moves.xrange(1, 5)]
self._check_mastership(True)
self. _forget_assignments(4)
others.append(self._new_partition(5))
self._check_mastership(True)
remainder = self._check_assignments(others, alarm_ids, 9)
self.assertEqual(set(self.partition_coordinator.assignment),
set(remainder))
def test_rebalance_on_partition_staleness(self):
alarm_ids = self._some_alarms(49)
self._advance_time(3)
others = [self._new_partition(i) for i in moves.xrange(1, 5)]
self._check_mastership(True)
self. _forget_assignments(4)
self._advance_time(4)
stale, _ = others.pop()
for pid, younger in others:
self.partition_coordinator.presence(pid, younger)
self._check_mastership(True)
remainder = self._check_assignments(others, alarm_ids, 13, [stale])
self.assertEqual(set(self.partition_coordinator.assignment),
set(remainder))
def test_rebalance_on_sufficient_deletion(self):
alarm_ids = self._some_alarms(49)
self._advance_time(3)
others = [self._new_partition(i) for i in moves.xrange(1, 5)]
self._check_mastership(True)
self._forget_assignments(4)
alarm_ids = self._dump_alarms(len(alarm_ids) / 2)
self._check_mastership(True)
remainder = self._check_assignments(others, alarm_ids, 5)
self.assertEqual(set(self.partition_coordinator.assignment),
set(remainder))
def test_no_rebalance_on_insufficient_deletion(self):
alarm_ids = self._some_alarms(49)
self._advance_time(3)
others = [self._new_partition(i) for i in moves.xrange(1, 5)]
self._check_mastership(True)
self._forget_assignments(4)
alarm_ids = self._dump_alarms(45)
self._check_mastership(True)
expect_uneffected = [pid for pid, _ in others]
self._check_assignments(others, alarm_ids, 10, expect_uneffected)
def test_no_rebalance_on_creation(self):
self._some_alarms(49)
self._advance_time(3)
others = [self._new_partition(i) for i in moves.xrange(1, 5)]
self._check_mastership(True)
self._forget_assignments(4)
new_alarm_ids = self._add_alarms(8)
master_assignment = set(self.partition_coordinator.assignment)
self._check_mastership(True)
remainder = self._check_allocation(others, new_alarm_ids, 2)
self.assertEqual(0, len(remainder))
self.assertEqual(set(self.partition_coordinator.assignment),
master_assignment)
def test_bail_when_overtaken_in_distribution(self):
self._some_alarms(49)
self._advance_time(3)
for i in moves.xrange(1, 5):
self._new_partition(i)
def overtake(*args):
self._new_partition(-1)
rpc = self.partition_coordinator.coordination_rpc
rpc.assign.side_effect = overtake
self._check_mastership(False)
self.assertEqual(1, len(rpc.assign.call_args_list))
def test_assigned_alarms_no_assignment(self):
alarms = self.partition_coordinator.assigned_alarms(self.api_client)
self.assertEqual(0, len(alarms))
def test_assigned_alarms_assignment(self):
alarm_ids = self._some_alarms(6)
uuid = self.partition_coordinator.this.uuid
self.partition_coordinator.assign(uuid, alarm_ids)
alarms = self.partition_coordinator.assigned_alarms(self.api_client)
self.assertEqual(self._current_alarms(), alarms)
def test_assigned_alarms_allocation(self):
alarm_ids = self._some_alarms(6)
uuid = self.partition_coordinator.this.uuid
self.partition_coordinator.assign(uuid, alarm_ids)
new_alarm_ids = self._add_alarms(2)
self.partition_coordinator.allocate(uuid, new_alarm_ids)
alarms = self.partition_coordinator.assigned_alarms(self.api_client)
self.assertEqual(self._current_alarms(), alarms)
def test_assigned_alarms_deleted_assignment(self):
alarm_ids = self._some_alarms(6)
uuid = self.partition_coordinator.this.uuid
self.partition_coordinator.assign(uuid, alarm_ids)
self._dump_alarms(len(alarm_ids) / 2)
alarms = self.partition_coordinator.assigned_alarms(self.api_client)
self.assertEqual(self._current_alarms(), alarms)
def test__record_oldest(self):
# Test when the partition to be recorded is the same as the oldest.
self.partition_coordinator._record_oldest(
self.partition_coordinator.oldest, True)
self.assertIsNone(self.partition_coordinator.oldest)
def test_check_mastership(self):
# Test the method exception condition.
self.partition_coordinator._is_master = mock.Mock(
side_effect=Exception('Boom!'))
self.partition_coordinator.check_mastership(10, None)
self.assertIn('mastership check failed',
self.str_handler.messages['error'])
def test_report_presence(self):
self.partition_coordinator.coordination_rpc.presence = mock.Mock(
side_effect=Exception('Boom!'))
self.partition_coordinator.report_presence()
self.assertIn('presence reporting failed',
self.str_handler.messages['error'])
def test_assigned_alarms(self):
api_client = mock.MagicMock()
api_client.alarms.list = mock.Mock(side_effect=Exception('Boom!'))
self.partition_coordinator.assignment = ['something']
self.partition_coordinator.assigned_alarms(api_client)
self.assertIn('assignment retrieval failed',
self.str_handler.messages['error'])
class TestPartitionIdentity(tests_base.BaseTestCase):
def setUp(self):
super(TestPartitionIdentity, self).setUp()
self.id_1st = coordination.PartitionIdentity(str(uuid.uuid4()), 1)
self.id_2nd = coordination.PartitionIdentity(str(uuid.uuid4()), 2)
def test_identity_ops(self):
self.assertNotEqual(self.id_1st, 'Nothing')
self.assertNotEqual(self.id_1st, self.id_2nd)
self.assertTrue(self.id_1st < None)
self.assertFalse(self.id_1st < 'Nothing')
self.assertTrue(self.id_2nd > self.id_1st)
|
Juniper/ceilometer
|
ceilometer/tests/alarm/partition/test_coordination.py
|
Python
|
apache-2.0
| 16,186
|
# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import uuid
from oslo.config import cfg
import six
from sahara import conductor as c
from sahara import context
from sahara.service.edp import base_engine
from sahara.service.edp.binary_retrievers import dispatch
from sahara.service.edp import hdfs_helper as h
from sahara.service.edp import job_utils
from sahara.service.edp.oozie import oozie as o
from sahara.service.edp.oozie.workflow_creator import workflow_factory
from sahara.service.validations.edp import job_execution as j
from sahara.utils import edp
from sahara.utils import remote
from sahara.utils import xmlutils as x
CONF = cfg.CONF
conductor = c.API
@six.add_metaclass(abc.ABCMeta)
class OozieJobEngine(base_engine.JobEngine):
def __init__(self, cluster):
self.cluster = cluster
self.plugin = job_utils.get_plugin(self.cluster)
def _get_client(self):
return o.OozieClient(self.get_oozie_server_uri(self.cluster),
self.get_oozie_server(self.cluster))
def _get_oozie_job_params(self, hdfs_user, path_to_workflow):
rm_path = self.get_resource_manager_uri(self.cluster)
nn_path = self.get_name_node_uri(self.cluster)
job_parameters = {
"jobTracker": rm_path,
"nameNode": nn_path,
"user.name": hdfs_user,
"oozie.wf.application.path": "%s%s" % (nn_path, path_to_workflow),
"oozie.use.system.libpath": "true"}
return job_parameters
def _upload_workflow_file(self, where, job_dir, wf_xml, hdfs_user):
with remote.get_remote(where) as r:
h.put_file_to_hdfs(r, wf_xml, "workflow.xml", job_dir, hdfs_user)
return "%s/workflow.xml" % job_dir
def cancel_job(self, job_execution):
if job_execution.oozie_job_id is not None:
client = self._get_client()
client.kill_job(job_execution)
return client.get_job_status(job_execution)
def get_job_status(self, job_execution):
if job_execution.oozie_job_id is not None:
return self._get_client().get_job_status(job_execution)
def run_job(self, job_execution):
ctx = context.ctx()
job = conductor.job_get(ctx, job_execution.job_id)
input_source, output_source = job_utils.get_data_sources(job_execution,
job)
proxy_configs = job_execution.job_configs.get('proxy_configs')
for data_source in [input_source, output_source]:
if data_source and data_source.type == 'hdfs':
h.configure_cluster_for_hdfs(self.cluster, data_source)
break
hdfs_user = self.get_hdfs_user()
# TODO(tmckay): this should probably be "get_namenode"
# but that call does not exist in the oozie engine api now.
oozie_server = self.get_oozie_server(self.cluster)
wf_dir = self._create_hdfs_workflow_dir(oozie_server, job)
self._upload_job_files_to_hdfs(oozie_server, wf_dir, job,
proxy_configs)
wf_xml = workflow_factory.get_workflow_xml(
job, self.cluster, job_execution, input_source, output_source,
hdfs_user)
path_to_workflow = self._upload_workflow_file(oozie_server, wf_dir,
wf_xml, hdfs_user)
job_params = self._get_oozie_job_params(hdfs_user,
path_to_workflow)
client = self._get_client()
oozie_job_id = client.add_job(x.create_hadoop_xml(job_params),
job_execution)
job_execution = conductor.job_execution_get(ctx, job_execution.id)
if job_execution.info['status'] == edp.JOB_STATUS_TOBEKILLED:
return (None, edp.JOB_STATUS_KILLED, None)
client.run_job(job_execution, oozie_job_id)
try:
status = client.get_job_status(job_execution,
oozie_job_id)['status']
except Exception:
status = None
return (oozie_job_id, status, None)
@abc.abstractmethod
def get_hdfs_user(self):
pass
@abc.abstractmethod
def create_hdfs_dir(self, remote, dir_name):
pass
@abc.abstractmethod
def get_oozie_server_uri(self, cluster):
pass
@abc.abstractmethod
def get_oozie_server(self, cluster):
pass
@abc.abstractmethod
def get_name_node_uri(self, cluster):
pass
@abc.abstractmethod
def get_resource_manager_uri(self, cluster):
pass
def validate_job_execution(self, cluster, job, data):
# All types except Java require input and output objects
# and Java require main class
if job.type in [edp.JOB_TYPE_JAVA]:
j.check_main_class_present(data, job)
else:
j.check_data_sources(data, job)
job_type, subtype = edp.split_job_type(job.type)
if job_type == edp.JOB_TYPE_MAPREDUCE and (
subtype == edp.JOB_SUBTYPE_STREAMING):
j.check_streaming_present(data, job)
@staticmethod
def get_possible_job_config(job_type):
return workflow_factory.get_possible_job_config(job_type)
@staticmethod
def get_supported_job_types():
return [edp.JOB_TYPE_HIVE,
edp.JOB_TYPE_JAVA,
edp.JOB_TYPE_MAPREDUCE,
edp.JOB_TYPE_MAPREDUCE_STREAMING,
edp.JOB_TYPE_PIG]
def _upload_job_files_to_hdfs(self, where, job_dir, job,
proxy_configs=None):
mains = job.mains or []
libs = job.libs or []
uploaded_paths = []
hdfs_user = self.get_hdfs_user()
with remote.get_remote(where) as r:
for main in mains:
raw_data = dispatch.get_raw_binary(main, proxy_configs)
h.put_file_to_hdfs(r, raw_data, main.name, job_dir, hdfs_user)
uploaded_paths.append(job_dir + '/' + main.name)
for lib in libs:
raw_data = dispatch.get_raw_binary(lib, proxy_configs)
# HDFS 2.2.0 fails to put file if the lib dir does not exist
self.create_hdfs_dir(r, job_dir + "/lib")
h.put_file_to_hdfs(r, raw_data, lib.name, job_dir + "/lib",
hdfs_user)
uploaded_paths.append(job_dir + '/lib/' + lib.name)
return uploaded_paths
def _create_hdfs_workflow_dir(self, where, job):
constructed_dir = '/user/%s/' % self.get_hdfs_user()
constructed_dir = self._add_postfix(constructed_dir)
constructed_dir += '%s/%s' % (job.name, six.text_type(uuid.uuid4()))
with remote.get_remote(where) as r:
self.create_hdfs_dir(r, constructed_dir)
return constructed_dir
def _add_postfix(self, constructed_dir):
def _append_slash_if_needed(path):
if path[-1] != '/':
path += '/'
return path
constructed_dir = _append_slash_if_needed(constructed_dir)
if CONF.job_workflow_postfix:
constructed_dir = ''.join([str(constructed_dir),
str(CONF.job_workflow_postfix)])
return _append_slash_if_needed(constructed_dir)
|
citrix-openstack-build/sahara
|
sahara/service/edp/oozie/engine.py
|
Python
|
apache-2.0
| 7,998
|
# Copyright 2017 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with the Google Cloud Firestore API.
This is the base from which all interactions with the API occur.
In the hierarchy of API concepts
* a :class:`~.firestore_v1beta1.client.Client` owns a
:class:`~.firestore_v1beta1.collection.CollectionReference`
* a :class:`~.firestore_v1beta1.client.Client` owns a
:class:`~.firestore_v1beta1.document.DocumentReference`
"""
from google.cloud.client import ClientWithProject
from google.cloud.firestore_v1beta1 import _helpers
from google.cloud.firestore_v1beta1 import types
from google.cloud.firestore_v1beta1.batch import WriteBatch
from google.cloud.firestore_v1beta1.collection import CollectionReference
from google.cloud.firestore_v1beta1.document import DocumentReference
from google.cloud.firestore_v1beta1.document import DocumentSnapshot
from google.cloud.firestore_v1beta1.gapic import firestore_client
from google.cloud.firestore_v1beta1.transaction import Transaction
DEFAULT_DATABASE = "(default)"
"""str: The default database used in a :class:`~.firestore.client.Client`."""
_BAD_OPTION_ERR = (
"Exactly one of ``last_update_time`` or ``exists`` " "must be provided."
)
_BAD_DOC_TEMPLATE = (
"Document {!r} appeared in response but was not present among references"
)
_ACTIVE_TXN = "There is already an active transaction."
_INACTIVE_TXN = "There is no active transaction."
class Client(ClientWithProject):
"""Client for interacting with Google Cloud Firestore API.
.. note::
Since the Cloud Firestore API requires the gRPC transport, no
``_http`` argument is accepted by this class.
Args:
project (Optional[str]): The project which the client acts on behalf
of. If not passed, falls back to the default inferred
from the environment.
credentials (Optional[~google.auth.credentials.Credentials]): The
OAuth2 Credentials to use for this client. If not passed, falls
back to the default inferred from the environment.
database (Optional[str]): The database name that the client targets.
For now, :attr:`DEFAULT_DATABASE` (the default value) is the
only valid database.
"""
SCOPE = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/datastore",
)
"""The scopes required for authenticating with the Firestore service."""
_firestore_api_internal = None
_database_string_internal = None
_rpc_metadata_internal = None
def __init__(self, project=None, credentials=None, database=DEFAULT_DATABASE):
# NOTE: This API has no use for the _http argument, but sending it
# will have no impact since the _http() @property only lazily
# creates a working HTTP object.
super(Client, self).__init__(
project=project, credentials=credentials, _http=None
)
self._database = database
@property
def _firestore_api(self):
"""Lazy-loading getter GAPIC Firestore API.
Returns:
~.gapic.firestore.v1beta1.firestore_client.FirestoreClient: The
GAPIC client with the credentials of the current client.
"""
if self._firestore_api_internal is None:
self._firestore_api_internal = firestore_client.FirestoreClient(
credentials=self._credentials
)
return self._firestore_api_internal
@property
def _database_string(self):
"""The database string corresponding to this client's project.
This value is lazy-loaded and cached.
Will be of the form
``projects/{project_id}/databases/{database_id}``
but ``database_id == '(default)'`` for the time being.
Returns:
str: The fully-qualified database string for the current
project. (The default database is also in this string.)
"""
if self._database_string_internal is None:
# NOTE: database_root_path() is a classmethod, so we don't use
# self._firestore_api (it isn't necessary).
db_str = firestore_client.FirestoreClient.database_root_path(
self.project, self._database
)
self._database_string_internal = db_str
return self._database_string_internal
@property
def _rpc_metadata(self):
"""The RPC metadata for this client's associated database.
Returns:
Sequence[Tuple(str, str)]: RPC metadata with resource prefix
for the database associated with this client.
"""
if self._rpc_metadata_internal is None:
self._rpc_metadata_internal = _helpers.metadata_with_prefix(
self._database_string
)
return self._rpc_metadata_internal
def collection(self, *collection_path):
"""Get a reference to a collection.
For a top-level collection:
.. code-block:: python
>>> client.collection('top')
For a sub-collection:
.. code-block:: python
>>> client.collection('mydocs/doc/subcol')
>>> # is the same as
>>> client.collection('mydocs', 'doc', 'subcol')
Sub-collections can be nested deeper in a similar fashion.
Args:
collection_path (Tuple[str, ...]): Can either be
* A single ``/``-delimited path to a collection
* A tuple of collection path segments
Returns:
~.firestore_v1beta1.collection.CollectionReference: A reference
to a collection in the Firestore database.
"""
if len(collection_path) == 1:
path = collection_path[0].split(_helpers.DOCUMENT_PATH_DELIMITER)
else:
path = collection_path
return CollectionReference(*path, client=self)
def document(self, *document_path):
"""Get a reference to a document in a collection.
For a top-level document:
.. code-block:: python
>>> client.document('collek/shun')
>>> # is the same as
>>> client.document('collek', 'shun')
For a document in a sub-collection:
.. code-block:: python
>>> client.document('mydocs/doc/subcol/child')
>>> # is the same as
>>> client.document('mydocs', 'doc', 'subcol', 'child')
Documents in sub-collections can be nested deeper in a similar fashion.
Args:
document_path (Tuple[str, ...]): Can either be
* A single ``/``-delimited path to a document
* A tuple of document path segments
Returns:
~.firestore_v1beta1.document.DocumentReference: A reference
to a document in a collection.
"""
if len(document_path) == 1:
path = document_path[0].split(_helpers.DOCUMENT_PATH_DELIMITER)
else:
path = document_path
return DocumentReference(*path, client=self)
@staticmethod
def field_path(*field_names):
"""Create a **field path** from a list of nested field names.
A **field path** is a ``.``-delimited concatenation of the field
names. It is used to represent a nested field. For example,
in the data
.. code-block:: python
data = {
'aa': {
'bb': {
'cc': 10,
},
},
}
the field path ``'aa.bb.cc'`` represents the data stored in
``data['aa']['bb']['cc']``.
Args:
field_names (Tuple[str, ...]): The list of field names.
Returns:
str: The ``.``-delimited field path.
"""
return _helpers.get_field_path(field_names)
@staticmethod
def write_option(**kwargs):
"""Create a write option for write operations.
Write operations include :meth:`~.DocumentReference.set`,
:meth:`~.DocumentReference.update` and
:meth:`~.DocumentReference.delete`.
One of the following keyword arguments must be provided:
* ``last_update_time`` (:class:`google.protobuf.timestamp_pb2.\
Timestamp`): A timestamp. When set, the target document must
exist and have been last updated at that time. Protobuf
``update_time`` timestamps are typically returned from methods
that perform write operations as part of a "write result"
protobuf or directly.
* ``exists`` (:class:`bool`): Indicates if the document being modified
should already exist.
Providing no argument would make the option have no effect (so
it is not allowed). Providing multiple would be an apparent
contradiction, since ``last_update_time`` assumes that the
document **was** updated (it can't have been updated if it
doesn't exist) and ``exists`` indicate that it is unknown if the
document exists or not.
Args:
kwargs (Dict[str, Any]): The keyword arguments described above.
Raises:
TypeError: If anything other than exactly one argument is
provided by the caller.
"""
if len(kwargs) != 1:
raise TypeError(_BAD_OPTION_ERR)
name, value = kwargs.popitem()
if name == "last_update_time":
return _helpers.LastUpdateOption(value)
elif name == "exists":
return _helpers.ExistsOption(value)
else:
extra = "{!r} was provided".format(name)
raise TypeError(_BAD_OPTION_ERR, extra)
def get_all(self, references, field_paths=None, transaction=None):
"""Retrieve a batch of documents.
.. note::
Documents returned by this method are not guaranteed to be
returned in the same order that they are given in ``references``.
.. note::
If multiple ``references`` refer to the same document, the server
will only return one result.
See :meth:`~.firestore_v1beta1.client.Client.field_path` for
more information on **field paths**.
If a ``transaction`` is used and it already has write operations
added, this method cannot be used (i.e. read-after-write is not
allowed).
Args:
references (List[.DocumentReference, ...]): Iterable of document
references to be retrieved.
field_paths (Optional[Iterable[str, ...]]): An iterable of field
paths (``.``-delimited list of field names) to use as a
projection of document fields in the returned results. If
no value is provided, all fields will be returned.
transaction (Optional[~.firestore_v1beta1.transaction.\
Transaction]): An existing transaction that these
``references`` will be retrieved in.
Yields:
.DocumentSnapshot: The next document snapshot that fulfills the
query, or :data:`None` if the document does not exist.
"""
document_paths, reference_map = _reference_info(references)
mask = _get_doc_mask(field_paths)
response_iterator = self._firestore_api.batch_get_documents(
self._database_string,
document_paths,
mask,
transaction=_helpers.get_transaction_id(transaction),
metadata=self._rpc_metadata,
)
for get_doc_response in response_iterator:
yield _parse_batch_get(get_doc_response, reference_map, self)
def collections(self):
"""List top-level collections of the client's database.
Returns:
Sequence[~.firestore_v1beta1.collection.CollectionReference]:
iterator of subcollections of the current document.
"""
iterator = self._firestore_api.list_collection_ids(
self._database_string, metadata=self._rpc_metadata
)
iterator.client = self
iterator.item_to_value = _item_to_collection_ref
return iterator
def batch(self):
"""Get a batch instance from this client.
Returns:
~.firestore_v1beta1.batch.WriteBatch: A "write" batch to be
used for accumulating document changes and sending the changes
all at once.
"""
return WriteBatch(self)
def transaction(self, **kwargs):
"""Get a transaction that uses this client.
See :class:`~.firestore_v1beta1.transaction.Transaction` for
more information on transactions and the constructor arguments.
Args:
kwargs (Dict[str, Any]): The keyword arguments (other than
``client``) to pass along to the
:class:`~.firestore_v1beta1.transaction.Transaction`
constructor.
Returns:
~.firestore_v1beta1.transaction.Transaction: A transaction
attached to this client.
"""
return Transaction(self, **kwargs)
def _reference_info(references):
"""Get information about document references.
Helper for :meth:`~.firestore_v1beta1.client.Client.get_all`.
Args:
references (List[.DocumentReference, ...]): Iterable of document
references.
Returns:
Tuple[List[str, ...], Dict[str, .DocumentReference]]: A two-tuple of
* fully-qualified documents paths for each reference in ``references``
* a mapping from the paths to the original reference. (If multiple
``references`` contains multiple references to the same document,
that key will be overwritten in the result.)
"""
document_paths = []
reference_map = {}
for reference in references:
doc_path = reference._document_path
document_paths.append(doc_path)
reference_map[doc_path] = reference
return document_paths, reference_map
def _get_reference(document_path, reference_map):
"""Get a document reference from a dictionary.
This just wraps a simple dictionary look-up with a helpful error that is
specific to :meth:`~.firestore.client.Client.get_all`, the
**public** caller of this function.
Args:
document_path (str): A fully-qualified document path.
reference_map (Dict[str, .DocumentReference]): A mapping (produced
by :func:`_reference_info`) of fully-qualified document paths to
document references.
Returns:
.DocumentReference: The matching reference.
Raises:
ValueError: If ``document_path`` has not been encountered.
"""
try:
return reference_map[document_path]
except KeyError:
msg = _BAD_DOC_TEMPLATE.format(document_path)
raise ValueError(msg)
def _parse_batch_get(get_doc_response, reference_map, client):
"""Parse a `BatchGetDocumentsResponse` protobuf.
Args:
get_doc_response (~google.cloud.proto.firestore.v1beta1.\
firestore_pb2.BatchGetDocumentsResponse): A single response (from
a stream) containing the "get" response for a document.
reference_map (Dict[str, .DocumentReference]): A mapping (produced
by :func:`_reference_info`) of fully-qualified document paths to
document references.
client (~.firestore_v1beta1.client.Client): A client that has
a document factory.
Returns:
[.DocumentSnapshot]: The retrieved snapshot.
Raises:
ValueError: If the response has a ``result`` field (a oneof) other
than ``found`` or ``missing``.
"""
result_type = get_doc_response.WhichOneof("result")
if result_type == "found":
reference = _get_reference(get_doc_response.found.name, reference_map)
data = _helpers.decode_dict(get_doc_response.found.fields, client)
snapshot = DocumentSnapshot(
reference,
data,
exists=True,
read_time=get_doc_response.read_time,
create_time=get_doc_response.found.create_time,
update_time=get_doc_response.found.update_time,
)
elif result_type == "missing":
snapshot = DocumentSnapshot(
None,
None,
exists=False,
read_time=get_doc_response.read_time,
create_time=None,
update_time=None,
)
else:
raise ValueError(
"`BatchGetDocumentsResponse.result` (a oneof) had a field other "
"than `found` or `missing` set, or was unset"
)
return snapshot
def _get_doc_mask(field_paths):
"""Get a document mask if field paths are provided.
Args:
field_paths (Optional[Iterable[str, ...]]): An iterable of field
paths (``.``-delimited list of field names) to use as a
projection of document fields in the returned results.
Returns:
Optional[google.cloud.firestore_v1beta1.types.DocumentMask]: A mask
to project documents to a restricted set of field paths.
"""
if field_paths is None:
return None
else:
return types.DocumentMask(field_paths=field_paths)
def _item_to_collection_ref(iterator, item):
"""Convert collection ID to collection ref.
Args:
iterator (google.api_core.page_iterator.GRPCIterator):
iterator response
item (str): ID of the collection
"""
return iterator.client.collection(item)
|
dhermes/gcloud-python
|
firestore/google/cloud/firestore_v1beta1/client.py
|
Python
|
apache-2.0
| 18,210
|
# coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import lib_openshift
from lib_openshift.rest import ApiException
from lib_openshift.models.v1beta1_ingress import V1beta1Ingress
class TestV1beta1Ingress(unittest.TestCase):
""" V1beta1Ingress unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1Ingress(self):
"""
Test V1beta1Ingress
"""
model = lib_openshift.models.v1beta1_ingress.V1beta1Ingress()
if __name__ == '__main__':
unittest.main()
|
detiber/lib_openshift
|
test/test_v1beta1_ingress.py
|
Python
|
apache-2.0
| 1,276
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
from oslo.config import cfg
from oslo.utils import netutils
CONF = cfg.CONF
netconf_opts = [
cfg.StrOpt('my_ip',
default=netutils.get_my_ipv4(),
help='IP address of this host'),
cfg.StrOpt('my_block_storage_ip',
default='$my_ip',
help='Block storage IP address of this host'),
cfg.StrOpt('host',
default=socket.gethostname(),
help='Name of this node. This can be an opaque identifier. '
'It is not necessarily a hostname, FQDN, or IP address. '
'However, the node name must be valid within '
'an AMQP key, and if using ZeroMQ, a valid '
'hostname, FQDN, or IP address'),
cfg.BoolOpt('use_ipv6',
default=False,
help='Use IPv6'),
]
CONF.register_opts(netconf_opts)
|
shakamunyi/nova
|
nova/netconf.py
|
Python
|
apache-2.0
| 1,665
|
import asyncio
import aiohttp
import aiohttp_jinja2
import jinja2
from aiohttp import web
from aiohttp_debugtoolbar import (middleware, setup as tbsetup, APP_KEY)
from .base import BaseTest
class TestExceptionViews(BaseTest):
@asyncio.coroutine
def _setup_app(self, handler, **kw):
app = web.Application(loop=self.loop,
middlewares=[middleware])
tbsetup(app, **kw)
tplt = "<html><body><h1>{{ head }}</h1>{{ text }}</body></html>"
loader = jinja2.DictLoader({'tplt.html': tplt})
aiohttp_jinja2.setup(app, loader=loader)
app.router.add_route('GET', '/', handler)
handler = app.make_handler()
srv = yield from self.loop.create_server(
handler, '127.0.0.1', self.port)
return app, srv, handler
def test_view_source(self):
@asyncio.coroutine
def func(request):
raise NotImplementedError
@asyncio.coroutine
def go():
app, srv, handler = yield from self._setup_app(func)
# make sure that exception page rendered
resp = yield from aiohttp.request('GET', self.url, loop=self.loop)
txt = yield from resp.text()
self.assertEqual(500, resp.status)
self.assertTrue('<div class="debugger">' in txt)
token = app[APP_KEY]['pdtb_token']
exc_history = app[APP_KEY]['exc_history']
for frame_id in exc_history.frames:
url = '{}/_debugtoolbar/source?frm={}&token={}'.format(
self.url, frame_id, token)
exc_history = app[APP_KEY]['exc_history']
resp = yield from aiohttp.request('GET', url,
loop=self.loop)
yield from resp.text()
self.assertEqual(resp.status, 200)
yield from handler.finish_connections()
srv.close()
self.loop.run_until_complete(go())
def test_view_execute(self):
@asyncio.coroutine
def func(request):
raise NotImplementedError
@asyncio.coroutine
def go():
app, srv, handler = yield from self._setup_app(func)
# make sure that exception page rendered
resp = yield from aiohttp.request('GET', self.url, loop=self.loop)
txt = yield from resp.text()
self.assertEqual(500, resp.status)
self.assertTrue('<div class="debugger">' in txt)
token = app[APP_KEY]['pdtb_token']
exc_history = app[APP_KEY]['exc_history']
for frame_id in exc_history.frames:
params = {'frm': frame_id, 'token': token}
url = '{}/_debugtoolbar/source'.format(self.url)
resp = yield from aiohttp.request('GET', url, params=params,
loop=self.loop)
yield from resp.text()
self.assertEqual(resp.status, 200)
params = {'frm': frame_id, 'token': token,
'cmd': 'dump(object)'}
url = '{}/_debugtoolbar/execute'.format(self.url)
resp = yield from aiohttp.request('GET', url, params=params,
loop=self.loop)
yield from resp.text()
self.assertEqual(resp.status, 200)
# wrong token
params = {'frm': frame_id, 'token': 'x', 'cmd': 'dump(object)'}
resp = yield from aiohttp.request('GET', url, params=params,
loop=self.loop)
self.assertEqual(resp.status, 400)
# no token at all
params = {'frm': frame_id, 'cmd': 'dump(object)'}
resp = yield from aiohttp.request('GET', url, params=params,
loop=self.loop)
self.assertEqual(resp.status, 400)
yield from handler.finish_connections()
srv.close()
self.loop.run_until_complete(go())
def test_view_exception(self):
@asyncio.coroutine
def func(request):
raise NotImplementedError
@asyncio.coroutine
def go():
app, srv, handler = yield from self._setup_app(func)
# make sure that exception page rendered
resp = yield from aiohttp.request('GET', self.url, loop=self.loop)
txt = yield from resp.text()
self.assertEqual(500, resp.status)
self.assertTrue('<div class="debugger">' in txt)
token = app[APP_KEY]['pdtb_token']
exc_history = app[APP_KEY]['exc_history']
tb_id = list(exc_history.tracebacks.keys())[0]
url = '{}/_debugtoolbar/exception?tb={}&token={}'.format(
self.url, tb_id, token)
resp = yield from aiohttp.request('GET', url,
loop=self.loop)
yield from resp.text()
self.assertEqual(resp.status, 200)
self.assertTrue('<div class="debugger">' in txt)
yield from handler.finish_connections()
srv.close()
self.loop.run_until_complete(go())
|
massimiliano-della-rovere/aiohttp_debugtoolbar
|
tests/test_exception_views.py
|
Python
|
apache-2.0
| 5,294
|
"""
mbed SDK
Copyright (c) 2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from copy import deepcopy
import os
import sys
# Implementation of mbed configuration mechanism
from tools.utils import json_file_to_dict
from tools.targets import CUMULATIVE_ATTRIBUTES, TARGET_MAP, \
generate_py_target, get_resolution_order
# Base class for all configuration exceptions
class ConfigException(Exception):
"""Config system only exception. Makes it easier to distinguish config
errors"""
pass
class ConfigParameter(object):
"""This class keeps information about a single configuration parameter"""
def __init__(self, name, data, unit_name, unit_kind):
"""Construct a ConfigParameter
Positional arguments:
name - the name of the configuration parameter
data - the data associated with the configuration parameter
unit_name - the unit (target/library/application) that defines this
parameter
unit_ kind - the kind of the unit ("target", "library" or "application")
"""
self.name = self.get_full_name(name, unit_name, unit_kind,
allow_prefix=False)
self.defined_by = self.get_display_name(unit_name, unit_kind)
self.set_value(data.get("value", None), unit_name, unit_kind)
self.help_text = data.get("help", None)
self.required = data.get("required", False)
self.macro_name = data.get("macro_name", "MBED_CONF_%s" %
self.sanitize(self.name.upper()))
self.config_errors = []
@staticmethod
def get_full_name(name, unit_name, unit_kind, label=None,
allow_prefix=True):
"""Return the full (prefixed) name of a parameter. If the parameter
already has a prefix, check if it is valid
Positional arguments:
name - the simple (unqualified) name of the parameter
unit_name - the unit (target/library/application) that defines this
parameter
unit_kind - the kind of the unit ("target", "library" or "application")
Keyword arguments:
label - the name of the label in the 'target_config_overrides' section
allow_prefix - True to allow the original name to have a prefix, False
otherwise
"""
if name.find('.') == -1: # the name is not prefixed
if unit_kind == "target":
prefix = "target."
elif unit_kind == "application":
prefix = "app."
else:
prefix = unit_name + '.'
return prefix + name
# The name has a prefix, so check if it is valid
if not allow_prefix:
raise ConfigException("Invalid parameter name '%s' in '%s'" %
(name, ConfigParameter.get_display_name(
unit_name, unit_kind, label)))
temp = name.split(".")
# Check if the parameter syntax is correct (must be
# unit_name.parameter_name)
if len(temp) != 2:
raise ConfigException("Invalid parameter name '%s' in '%s'" %
(name, ConfigParameter.get_display_name(
unit_name, unit_kind, label)))
prefix = temp[0]
# Check if the given parameter prefix matches the expected prefix
if (unit_kind == "library" and prefix != unit_name) or \
(unit_kind == "target" and prefix != "target"):
raise ConfigException(
"Invalid prefix '%s' for parameter name '%s' in '%s'" %
(prefix, name, ConfigParameter.get_display_name(
unit_name, unit_kind, label)))
return name
@staticmethod
def get_display_name(unit_name, unit_kind, label=None):
"""Return the name displayed for a unit when interrogating the origin
and the last set place of a parameter
Positional arguments:
unit_name - the unit (target/library/application) that defines this
parameter
unit_kind - the kind of the unit ("target", "library" or "application")
Keyword arguments:
label - the name of the label in the 'target_config_overrides' section
"""
if unit_kind == "target":
return "target:" + unit_name
elif unit_kind == "application":
return "application%s" % ("[%s]" % label if label else "")
else: # library
return "library:%s%s" % (unit_name, "[%s]" % label if label else "")
@staticmethod
def sanitize(name):
""" "Sanitize" a name so that it is a valid C macro name. Currently it
simply replaces '.' and '-' with '_'.
Positional arguments:
name - the name to make into a valid C macro
"""
return name.replace('.', '_').replace('-', '_')
def set_value(self, value, unit_name, unit_kind, label=None):
""" Sets a value for this parameter, remember the place where it was
set. If the value is a Boolean, it is converted to 1 (for True) or
to 0 (for False).
Positional arguments:
value - the value of the parameter
unit_name - the unit (target/library/application) that defines this
parameter
unit_kind - the kind of the unit ("target", "library" or "application")
Keyword arguments:
label - the name of the label in the 'target_config_overrides' section
(optional)
"""
self.value = int(value) if isinstance(value, bool) else value
self.set_by = self.get_display_name(unit_name, unit_kind, label)
def __str__(self):
"""Return the string representation of this configuration parameter
Arguments: None
"""
if self.value is not None:
return '%s = %s (macro name: "%s")' % \
(self.name, self.value, self.macro_name)
else:
return '%s has no value' % self.name
def get_verbose_description(self):
"""Return a verbose description of this configuration parameter as a
string
Arguments: None
"""
desc = "Name: %s%s\n" % \
(self.name, " (required parameter)" if self.required else "")
if self.help_text:
desc += " Description: %s\n" % self.help_text
desc += " Defined by: %s\n" % self.defined_by
if not self.value:
return desc + " No value set"
desc += " Macro name: %s\n" % self.macro_name
desc += " Value: %s (set by %s)" % (self.value, self.set_by)
return desc
class ConfigMacro(object):
""" A representation of a configuration macro. It handles both macros
without a value (MACRO) and with a value (MACRO=VALUE)
"""
def __init__(self, name, unit_name, unit_kind):
"""Construct a ConfigMacro object
Positional arguments:
name - the macro's name
unit_name - the location where the macro was defined
unit_kind - the type of macro this is
"""
self.name = name
self.defined_by = ConfigParameter.get_display_name(unit_name, unit_kind)
if name.find("=") != -1:
tmp = name.split("=")
if len(tmp) != 2:
raise ValueError("Invalid macro definition '%s' in '%s'" %
(name, self.defined_by))
self.macro_name = tmp[0]
self.macro_value = tmp[1]
else:
self.macro_name = name
self.macro_value = None
class ConfigCumulativeOverride(object):
"""Representation of overrides for cumulative attributes"""
def __init__(self, name, additions=None, removals=None, strict=False):
"""Construct a ConfigCumulativeOverride object
Positional arguments:
name - the name of the config file this came from ?
Keyword arguments:
additions - macros to add to the overrides
removals - macros to remove from the overrides
strict - Boolean indicating that attempting to remove from an override
that does not exist should error
"""
self.name = name
if additions:
self.additions = set(additions)
else:
self.additions = set()
if removals:
self.removals = set(removals)
else:
self.removals = set()
self.strict = strict
def remove_cumulative_overrides(self, overrides):
"""Extend the list of override removals.
Positional arguments:
overrides - a list of names that, when the override is evaluated, will
be removed
"""
for override in overrides:
if override in self.additions:
raise ConfigException(
"Configuration conflict. The %s %s both added and removed."
% (self.name[:-1], override))
self.removals |= set(overrides)
def add_cumulative_overrides(self, overrides):
"""Extend the list of override additions.
Positional arguments:
overrides - a list of a names that, when the override is evaluated, will
be added to the list
"""
for override in overrides:
if override in self.removals or \
(self.strict and override not in self.additions):
raise ConfigException(
"Configuration conflict. The %s %s both added and removed."
% (self.name[:-1], override))
self.additions |= set(overrides)
def strict_cumulative_overrides(self, overrides):
"""Remove all overrides that are not the specified ones
Positional arguments:
overrides - a list of names that will replace the entire attribute when
this override is evaluated.
"""
self.remove_cumulative_overrides(self.additions - set(overrides))
self.add_cumulative_overrides(overrides)
self.strict = True
def update_target(self, target):
"""Update the attributes of a target based on this override"""
setattr(target, self.name,
list((set(getattr(target, self.name, []))
| self.additions) - self.removals))
def _process_config_parameters(data, params, unit_name, unit_kind):
"""Process a "config_parameters" section in either a target, a library,
or the application.
Positional arguments:
data - a dictionary with the configuration parameters
params - storage for the discovered configuration parameters
unit_name - the unit (target/library/application) that defines this
parameter
unit_kind - the kind of the unit ("target", "library" or "application")
"""
for name, val in data.items():
full_name = ConfigParameter.get_full_name(name, unit_name, unit_kind)
# If the parameter was already defined, raise an error
if full_name in params:
raise ConfigException(
"Parameter name '%s' defined in both '%s' and '%s'" %
(name, ConfigParameter.get_display_name(unit_name, unit_kind),
params[full_name].defined_by))
# Otherwise add it to the list of known parameters
# If "val" is not a dictionary, this is a shortcut definition,
# otherwise it is a full definition
params[full_name] = ConfigParameter(name, val if isinstance(val, dict)
else {"value": val}, unit_name,
unit_kind)
return params
def _process_macros(mlist, macros, unit_name, unit_kind):
"""Process a macro definition and check for incompatible duplicate
definitions.
Positional arguments:
mlist - list of macro names to process
macros - dictionary with currently discovered macros
unit_name - the unit (library/application) that defines this macro
unit_kind - the kind of the unit ("library" or "application")
"""
for mname in mlist:
macro = ConfigMacro(mname, unit_name, unit_kind)
if (macro.macro_name in macros) and \
(macros[macro.macro_name].name != mname):
# Found an incompatible definition of the macro in another module,
# so raise an error
full_unit_name = ConfigParameter.get_display_name(unit_name,
unit_kind)
raise ConfigException(
("Macro '%s' defined in both '%s' and '%s'"
% (macro.macro_name, macros[macro.macro_name].defined_by,
full_unit_name)) +
" with incompatible values")
macros[macro.macro_name] = macro
class Config(object):
"""'Config' implements the mbed configuration mechanism"""
# Libraries and applications have different names for their configuration
# files
__mbed_app_config_name = "mbed_app.json"
__mbed_lib_config_name = "mbed_lib.json"
# Allowed keys in configuration dictionaries
# (targets can have any kind of keys, so this validation is not applicable
# to them)
__allowed_keys = {
"library": set(["name", "config", "target_overrides", "macros",
"__config_path"]),
"application": set(["config", "target_overrides",
"macros", "__config_path"])
}
# Allowed features in configurations
__allowed_features = [
"UVISOR", "BLE", "CLIENT", "IPV4", "LWIP", "COMMON_PAL", "STORAGE", "NANOSTACK",
# Nanostack configurations
"LOWPAN_BORDER_ROUTER", "LOWPAN_HOST", "LOWPAN_ROUTER", "NANOSTACK_FULL", "THREAD_BORDER_ROUTER", "THREAD_END_DEVICE", "THREAD_ROUTER", "ETHERNET_HOST"
]
def __init__(self, tgt, top_level_dirs=None, app_config=None):
"""Construct a mbed configuration
Positional arguments:
target - the name of the mbed target used for this configuration
instance
Keyword argumets:
top_level_dirs - a list of top level source directories (where
mbed_app_config.json could be found)
app_config - location of a chosen mbed_app.json file
NOTE: Construction of a Config object will look for the application
configuration file in top_level_dirs. If found once, it'll parse it.
top_level_dirs may be None (in this case, the constructor will not
search for a configuration file).
"""
app_config_location = app_config
if app_config_location is None:
for directory in top_level_dirs or []:
full_path = os.path.join(directory, self.__mbed_app_config_name)
if os.path.isfile(full_path):
if app_config_location is not None:
raise ConfigException("Duplicate '%s' file in '%s' and '%s'"
% (self.__mbed_app_config_name,
app_config_location, full_path))
else:
app_config_location = full_path
try:
self.app_config_data = json_file_to_dict(app_config_location) \
if app_config_location else {}
except ValueError as exc:
sys.stderr.write(str(exc) + "\n")
self.app_config_data = {}
# Check the keys in the application configuration data
unknown_keys = set(self.app_config_data.keys()) - \
self.__allowed_keys["application"]
if unknown_keys:
raise ConfigException("Unknown key(s) '%s' in %s" %
(",".join(unknown_keys),
self.__mbed_app_config_name))
# Update the list of targets with the ones defined in the application
# config, if applicable
self.lib_config_data = {}
# Make sure that each config is processed only once
self.processed_configs = {}
if isinstance(tgt, basestring):
if tgt in TARGET_MAP:
self.target = TARGET_MAP[tgt]
else:
self.target = generate_py_target(
self.app_config_data.get("custom_targets", {}), tgt)
else:
self.target = tgt
self.target = deepcopy(self.target)
self.target_labels = self.target.labels
self.cumulative_overrides = {key: ConfigCumulativeOverride(key)
for key in CUMULATIVE_ATTRIBUTES}
self._process_config_and_overrides(self.app_config_data, {}, "app",
"application")
self.config_errors = None
def add_config_files(self, flist):
"""Add configuration files
Positional arguments:
flist - a list of files to add to this configuration
"""
for config_file in flist:
if not config_file.endswith(self.__mbed_lib_config_name):
continue
full_path = os.path.normpath(os.path.abspath(config_file))
# Check that we didn't already process this file
if self.processed_configs.has_key(full_path):
continue
self.processed_configs[full_path] = True
# Read the library configuration and add a "__full_config_path"
# attribute to it
try:
cfg = json_file_to_dict(config_file)
except ValueError as exc:
sys.stderr.write(str(exc) + "\n")
continue
cfg["__config_path"] = full_path
if "name" not in cfg:
raise ConfigException(
"Library configured at %s has no name field." % full_path)
# If there's already a configuration for a module with the same
# name, exit with error
if self.lib_config_data.has_key(cfg["name"]):
raise ConfigException(
"Library name '%s' is not unique (defined in '%s' and '%s')"
% (cfg["name"], full_path,
self.lib_config_data[cfg["name"]]["__config_path"]))
self.lib_config_data[cfg["name"]] = cfg
def _process_config_and_overrides(self, data, params, unit_name, unit_kind):
"""Process "config_parameters" and "target_config_overrides" into a
given dictionary
Positional arguments:
data - the configuration data of the library/appliation
params - storage for the discovered configuration parameters
unit_name - the unit (library/application) that defines this parameter
unit_kind - the kind of the unit ("library" or "application")
"""
self.config_errors = []
_process_config_parameters(data.get("config", {}), params, unit_name,
unit_kind)
for label, overrides in data.get("target_overrides", {}).items():
# If the label is defined by the target or it has the special value
# "*", process the overrides
if (label == '*') or (label in self.target_labels):
# Check for invalid cumulative overrides in libraries
if (unit_kind == 'library' and
any(attr.startswith('target.extra_labels') for attr
in overrides.iterkeys())):
raise ConfigException(
"Target override 'target.extra_labels' in " +
ConfigParameter.get_display_name(unit_name, unit_kind,
label) +
" is only allowed at the application level")
# Parse out cumulative overrides
for attr, cumulatives in self.cumulative_overrides.iteritems():
if 'target.'+attr in overrides:
cumulatives.strict_cumulative_overrides(
overrides['target.'+attr])
del overrides['target.'+attr]
if 'target.'+attr+'_add' in overrides:
cumulatives.add_cumulative_overrides(
overrides['target.'+attr+'_add'])
del overrides['target.'+attr+'_add']
if 'target.'+attr+'_remove' in overrides:
cumulatives.remove_cumulative_overrides(
overrides['target.'+attr+'_remove'])
del overrides['target.'+attr+'_remove']
# Consider the others as overrides
for name, val in overrides.items():
# Get the full name of the parameter
full_name = ConfigParameter.get_full_name(name, unit_name,
unit_kind, label)
if full_name in params:
params[full_name].set_value(val, unit_name, unit_kind,
label)
else:
self.config_errors.append(
ConfigException(
"Attempt to override undefined parameter" +
(" '%s' in '%s'"
% (full_name,
ConfigParameter.get_display_name(unit_name,
unit_kind,
label)))))
for cumulatives in self.cumulative_overrides.itervalues():
cumulatives.update_target(self.target)
return params
def get_target_config_data(self):
"""Read and interpret configuration data defined by targets.
We consider the resolution order for our target and sort it by level
reversed, so that we first look at the top level target (the parent),
then its direct children, then the children of those children and so on,
until we reach self.target
TODO: this might not work so well in some multiple inheritance scenarios
At each step, look at two keys of the target data:
- config_parameters: used to define new configuration parameters
- config_overrides: used to override already defined configuration
parameters
Arguments: None
"""
params, json_data = {}, self.target.json_data
resolution_order = [e[0] for e
in sorted(
self.target.resolution_order,
key=lambda e: e[1], reverse=True)]
for tname in resolution_order:
# Read the target data directly from its description
target_data = json_data[tname]
# Process definitions first
_process_config_parameters(target_data.get("config", {}), params,
tname, "target")
# Then process overrides
for name, val in target_data.get("overrides", {}).items():
full_name = ConfigParameter.get_full_name(name, tname, "target")
# If the parameter name is not defined or if there isn't a path
# from this target to the target where the parameter was defined
# in the target inheritance tree, raise an error We need to use
# 'defined_by[7:]' to remove the "target:" prefix from
# defined_by
rel_names = [tgt for tgt, _ in
get_resolution_order(self.target.json_data, tname,
[])]
if (full_name not in params) or \
(params[full_name].defined_by[7:] not in rel_names):
raise ConfigException(
"Attempt to override undefined parameter '%s' in '%s'"
% (name,
ConfigParameter.get_display_name(tname, "target")))
# Otherwise update the value of the parameter
params[full_name].set_value(val, tname, "target")
return params
def get_lib_config_data(self):
""" Read and interpret configuration data defined by libraries. It is
assumed that "add_config_files" above was already called and the library
configuration data exists in self.lib_config_data
Arguments: None
"""
all_params, macros = {}, {}
for lib_name, lib_data in self.lib_config_data.items():
unknown_keys = set(lib_data.keys()) - self.__allowed_keys["library"]
if unknown_keys:
raise ConfigException("Unknown key(s) '%s' in %s" %
(",".join(unknown_keys), lib_name))
all_params.update(self._process_config_and_overrides(lib_data, {},
lib_name,
"library"))
_process_macros(lib_data.get("macros", []), macros, lib_name,
"library")
return all_params, macros
def get_app_config_data(self, params, macros):
""" Read and interpret the configuration data defined by the target. The
target can override any configuration parameter, as well as define its
own configuration data.
Positional arguments.
params - the dictionary with configuration parameters found so far (in
the target and in libraries)
macros - the list of macros defined in the configuration
"""
app_cfg = self.app_config_data
# The application can have a "config_parameters" and a
# "target_config_overrides" section just like a library
self._process_config_and_overrides(app_cfg, params, "app",
"application")
# The application can also defined macros
_process_macros(app_cfg.get("macros", []), macros, "app",
"application")
def get_config_data(self):
""" Return the configuration data in two parts: (params, macros)
params - a dictionary with mapping a name to a ConfigParam
macros - the list of macros defined with "macros" in libraries and in
the application (as ConfigMacro instances)
Arguments: None
"""
all_params = self.get_target_config_data()
lib_params, macros = self.get_lib_config_data()
all_params.update(lib_params)
self.get_app_config_data(all_params, macros)
return all_params, macros
@staticmethod
def _check_required_parameters(params):
"""Check that there are no required parameters without a value
Positional arguments:
params - the list of parameters to check
NOTE: This function does not return. Instead, it throws a
ConfigException when any of the required parameters are missing values
"""
for param in params.values():
if param.required and (param.value is None):
raise ConfigException("Required parameter '" + param.name +
"' defined by '" + param.defined_by +
"' doesn't have a value")
@staticmethod
def parameters_to_macros(params):
""" Encode the configuration parameters as C macro definitions.
Positional arguments:
params - a dictionary mapping a name to a ConfigParameter
Return: a list of strings that encode the configuration parameters as
C pre-processor macros
"""
return ['%s=%s' % (m.macro_name, m.value) for m in params.values()
if m.value is not None]
@staticmethod
def config_macros_to_macros(macros):
""" Return the macro definitions generated for a dictionary of
ConfigMacros (as returned by get_config_data).
Positional arguments:
params - a dictionary mapping a name to a ConfigMacro instance
Return: a list of strings that are the C pre-processor macros
"""
return [m.name for m in macros.values()]
@staticmethod
def config_to_macros(config):
"""Convert the configuration data to a list of C macros
Positional arguments:
config - configuration data as (ConfigParam instances, ConfigMacro
instances) tuple (as returned by get_config_data())
"""
params, macros = config[0], config[1]
Config._check_required_parameters(params)
return Config.config_macros_to_macros(macros) + \
Config.parameters_to_macros(params)
def get_config_data_macros(self):
""" Convert a Config object to a list of C macros
Arguments: None
"""
return self.config_to_macros(self.get_config_data())
def get_features(self):
""" Extract any features from the configuration data
Arguments: None
"""
params, _ = self.get_config_data()
self._check_required_parameters(params)
self.cumulative_overrides['features']\
.update_target(self.target)
for feature in self.target.features:
if feature not in self.__allowed_features:
raise ConfigException(
"Feature '%s' is not a supported features" % feature)
return self.target.features
def validate_config(self):
""" Validate configuration settings. This either returns True or
raises an exception
Arguments: None
"""
if self.config_errors:
raise self.config_errors[0]
return True
def load_resources(self, resources):
""" Load configuration data from a Resources instance and expand it
based on defined features.
Positional arguments:
resources - the resources object to load from and expand
"""
# Update configuration files until added features creates no changes
prev_features = set()
while True:
# Add/update the configuration with any .json files found while
# scanning
self.add_config_files(resources.json_files)
# Add features while we find new ones
features = set(self.get_features())
if features == prev_features:
break
for feature in features:
if feature in resources.features:
resources.add(resources.features[feature])
prev_features = features
self.validate_config()
return resources
@staticmethod
def config_to_header(config, fname=None):
""" Convert the configuration data to the content of a C header file,
meant to be included to a C/C++ file. The content is returned as a
string.
Positional arguments:
config - configuration data as (ConfigParam instances, ConfigMacro
instances) tuple (as returned by get_config_data())
Keyword arguments:
fname - also write the content is to the file called "fname".
WARNING: if 'fname' names an existing file, it will be
overwritten!
"""
params, macros = config[0], config[1]
Config._check_required_parameters(params)
header_data = "// Automatically generated configuration file.\n"
header_data += "// DO NOT EDIT, content will be overwritten.\n\n"
header_data += "#ifndef __MBED_CONFIG_DATA__\n"
header_data += "#define __MBED_CONFIG_DATA__\n\n"
# Compute maximum length of macro names for proper alignment
max_param_macro_name_len = (max([len(m.macro_name) for m
in params.values()
if m.value is not None])
if params else 0)
max_direct_macro_name_len = (max([len(m.macro_name) for m
in macros.values()])
if macros else 0)
max_macro_name_len = max(max_param_macro_name_len,
max_direct_macro_name_len)
# Compute maximum length of macro values for proper alignment
max_param_macro_val_len = (max([len(str(m.value)) for m
in params.values()
if m.value is not None])
if params else 0)
max_direct_macro_val_len = max([len(m.macro_value or "") for m
in macros.values()]) if macros else 0
max_macro_val_len = max(max_param_macro_val_len,
max_direct_macro_val_len)
# Generate config parameters first
if params:
header_data += "// Configuration parameters\n"
for macro in params.values():
if macro.value is not None:
header_data += ("#define {0:<{1}} {2!s:<{3}} " +
"// set by {4}\n")\
.format(macro.macro_name, max_macro_name_len,
macro.value, max_macro_val_len, macro.set_by)
# Then macros
if macros:
header_data += "// Macros\n"
for macro in macros.values():
if macro.macro_value:
header_data += ("#define {0:<{1}} {2!s:<{3}}" +
" // defined by {4}\n")\
.format(macro.macro_name, max_macro_name_len,
macro.macro_value, max_macro_val_len,
macro.defined_by)
else:
header_data += ("#define {0:<{1}}" +
" // defined by {2}\n")\
.format(macro.macro_name,
max_macro_name_len + max_macro_val_len + 1,
macro.defined_by)
header_data += "\n#endif\n"
# If fname is given, write "header_data" to it
if fname:
with open(fname, "w+") as file_desc:
file_desc.write(header_data)
return header_data
def get_config_data_header(self, fname=None):
""" Convert a Config instance to the content of a C header file, meant
to be included to a C/C++ file. The content is returned as a string.
Keyword arguments:
fname - also write the content to the file called "fname".
WARNING: if 'fname' names an existing file, it will be
overwritten!
"""
return self.config_to_header(self.get_config_data(), fname)
|
maximmbed/mbed
|
tools/config.py
|
Python
|
apache-2.0
| 35,774
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class NetworkingApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def get_api_group(self, **kwargs):
"""
get information of a group
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_api_group(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: V1APIGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_api_group_with_http_info(**kwargs)
else:
(data) = self.get_api_group_with_http_info(**kwargs)
return data
def get_api_group_with_http_info(self, **kwargs):
"""
get information of a group
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_api_group_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: V1APIGroup
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_group" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/apis/networking.k8s.io/'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIGroup',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
djkonro/client-python
|
kubernetes/client/apis/networking_api.py
|
Python
|
apache-2.0
| 4,940
|
import datetime
import bisect
import json
import logging
import math
import numpy as np
import random
from learning.rlpower_controller import RLPowerController
from scipy.interpolate import splrep, splev
from hal.inputs.fitness_querier import FitnessQuerier
__author__ = 'matteo'
class RLPowerAlgorithm:
epsilon = 10 ** -10
def __init__(self, config_parameters):
self.RANKING_SIZE = config_parameters['ranking_size']
self.NUM_SERVOS = len(config_parameters['servo_pins'])
# In the original algorithm they used variance and square-rooted it every time. We're using standard deviation
# and decay parameter is also a square root of the parameter from original algorithm
self._sigma = math.sqrt(config_parameters['variance'])
self._sigma_decay = math.sqrt(config_parameters['sigma_decay_squared'])
self._initial_spline_size = config_parameters['initial_spline_size']
self._end_spline_size = config_parameters['end_spline_size']
self._number_of_fitness_evaluations = config_parameters['number_of_fitness_evaluations']
self._fitness_evaluation = config_parameters['fitness_evaluation_method']
self._light_fitness_weight = config_parameters['light_fitness_weight']
self._current_spline_size = self._initial_spline_size
self._current_evaluation = 0
self._runtime_data_file = config_parameters['runtime_data_file']
self._hist_filename = config_parameters['hist_filename']
self._hist_freq_save = config_parameters['hist_freq_save']
self._start_date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
# Create an instance of fitness querier
if self._fitness_evaluation == 'auto':
self._fitness_querier = FitnessQuerier(config_parameters)
self._fitness_querier.start()
# Recover evaluation data from tmp file
self._runtime_data = self._load_runtime_data_from_file(self._runtime_data_file)
if 'last_spline' in self._runtime_data:
self.ranking = self._runtime_data['ranking']
self._current_spline = self._runtime_data['last_spline']
self._sigma = self._runtime_data['sigma']
self._current_spline_size = len(self._current_spline[0])
self._current_evaluation = self._runtime_data['evaluation']
else:
self.ranking = []
# Spline initialisation
self._current_spline = np.array(
[[0.5 + random.normalvariate(0, self._sigma) for x in range(self._initial_spline_size)]
for y in range(self.NUM_SERVOS)])
self.controller = RLPowerController(self._current_spline)
def _generate_spline(self):
# Add a weighted average of the best splines seen so far
total = self.epsilon # something similar to 0, but not 0 ( division by 0 is evil )
modifier = np.zeros(self._current_spline.shape)
for (fitness, spline) in self.ranking:
total += fitness
modifier += (spline - self._current_spline) * fitness
# random noise for the spline
noise = np.array(
[[random.normalvariate(0, self._sigma) for x in range(self._current_spline_size)]
for y in range(self.NUM_SERVOS)])
return self._current_spline + noise + modifier / total
def skip_evaluation(self):
logging.info("Skipping evaluation, starting new one")
self._current_spline = self._generate_spline()
self.controller.set_spline(self._current_spline)
self._fitness_querier.start()
def next_evaluation(self, light_sensor_value=0):
self._current_evaluation += 1
logging.info("current spline size: {}".format(self._current_spline_size))
# generate fitness
movement_fitness = self.get_current_fitness()
light_fitness = self._light_fitness_weight * light_sensor_value
current_fitness = movement_fitness + light_fitness
logging.info("Last evaluation fitness: {} (movement: {} + light: {})".format(current_fitness, movement_fitness, light_fitness))
logging.info("Current position: {}".format(self._fitness_querier.get_position()))
# save old evaluation
self.save_in_ranking(current_fitness, self._current_spline)
# check if is time to increase the number of evaluations
if math.floor((self._end_spline_size - self._initial_spline_size)/self._number_of_fitness_evaluations *
self._current_evaluation) + 3 > self._current_spline_size:
self._current_spline_size += 1
self._current_spline = self.recalculate_spline(self._current_spline, self._current_spline_size)
for number, (fitness, rspline) in enumerate(self.ranking):
self.ranking[number] = _RankingEntry((fitness, self.recalculate_spline(rspline, self._current_spline_size)))
# update values
self._current_spline = self._generate_spline()
self.controller.set_spline(self._current_spline)
self._sigma *= self._sigma_decay
self._save_runtime_data_to_file(self._runtime_data_file)
self._save_history()
self._fitness_querier.start()
def recalculate_spline(self, spline, spline_size):
return np.apply_along_axis(self._interpolate, 1, spline, spline_size + 1)
def _interpolate(self, spline, spline_size):
spline = np.append(spline, spline[0])
x = np.linspace(0, 1, len(spline))
tck = splrep(x, spline, per=True)
x2 = np.linspace(0, 1, spline_size)
return splev(x2, tck)[:-1]
def get_current_fitness(self):
# Manual fitness evaluation
if self._fitness_evaluation == 'manual':
fitness = float(input("Enter fitness of current gait: "))
# Random fitness (for testing purposes)
elif self._fitness_evaluation == 'random':
fitness = 5 + random.normalvariate(0, 2)
elif self._fitness_evaluation == 'auto':
fitness = self._fitness_querier.get_fitness()
#logging.info("fitness: {}".format(fitness))
else:
logging.error("Unknown fitness evaluation method")
raise NameError("Unknown fitness evaluation method")
return fitness
def save_in_ranking(self, current_fitness, current_spline):
if len(self.ranking) < self.RANKING_SIZE:
bisect.insort(self.ranking, _RankingEntry((current_fitness, current_spline)))
elif current_fitness > self.ranking[0][0]:
bisect.insort(self.ranking, _RankingEntry((current_fitness, current_spline)))
self.ranking.pop(0)
self._save_runtime_data_to_file(self._runtime_data_file)
def _load_runtime_data_from_file(self, filename):
try:
with open(filename) as json_data:
d = json.load(json_data)
ranking_serialized = d['ranking']
ranking = [_RankingEntry((elem['fitness'], np.array(elem['spline']))) for elem in ranking_serialized]
d['ranking'] = ranking
d['last_spline'] = np.array(d['last_spline'])
return d
except IOError:
return {}
def _save_runtime_data_to_file(self, filename):
ranking_serialized = [{'fitness': f, 'spline': s.tolist()} for (f, s) in self.ranking]
data = {'ranking': ranking_serialized,
'last_spline': self._current_spline.tolist(),
'sigma': self._sigma,
'evaluation': self._current_evaluation
}
with open(filename, 'w') as outfile:
json.dump(data, outfile)
def _save_history(self):
"""
Saves the current state of the RLPower algorithm into a new file, saving the history of the evolution
if the configuration "hist_freq_save" is <= 0, this feature is disabled
"""
if self._hist_freq_save <= 0:
return
if self._current_evaluation % self._hist_freq_save == 0:
self._save_runtime_data_to_file(
"{} {}_{}.json".format(self._start_date, self._hist_filename, self._current_evaluation)
)
class _RankingEntry(tuple):
def __lt__(self, other):
return other[0] > self[0]
def __gt__(self, other):
return not self.__lt__(other)
|
portaloffreedom/robot-baby
|
RobotController/learning/rlpower_algorithm.py
|
Python
|
apache-2.0
| 8,399
|
import os
from plenum.common.util import hexToFriendly
from plenum.bls.bls_crypto_factory import create_default_bls_crypto_factory
from plenum.common.constants import CLIENT_STACK_SUFFIX
from plenum.common.stacks import nodeStackClass
from stp_core.crypto.util import randomSeed
def initLocalKeys(name, keys_dir, sigseed, *, use_bls, override=False):
# * forces usage of names for args on the right hand side
pubkey, verkey = nodeStackClass.initLocalKeys(name, keys_dir, sigseed, override=override)
print("Public key is", hexToFriendly(pubkey))
print("Verification key is", hexToFriendly(verkey))
blspk, key_proof = init_bls_keys(keys_dir, name, sigseed) if use_bls \
else (None, None)
return pubkey, verkey, blspk, key_proof
def initRemoteKeys(name, remote_name, keys_dir, verkey, override=False):
nodeStackClass.initRemoteKeys(name, remote_name, keys_dir, verkey,
override=override)
def init_bls_keys(keys_dir, node_name, seed=None):
# TODO: do we need keys based on transport keys?
bls_keys_dir = os.path.join(keys_dir, node_name)
bls_factory = create_default_bls_crypto_factory(keys_dir=bls_keys_dir)
stored_pk, key_proof = bls_factory.generate_and_store_bls_keys(seed)
print("BLS Public key is", stored_pk)
print("Proof of possession for BLS key is", key_proof)
return stored_pk, key_proof
def initNodeKeysForBothStacks(name, keys_dir, sigseed, *, use_bls=True,
override=False):
# `sigseed` is initialised to keep the seed same for both stacks.
# Both node and client stacks need to have same keys
if not sigseed:
sigseed = sigseed or randomSeed()
print("Generating keys for random seed", sigseed)
else:
print("Generating keys for provided seed", sigseed)
node_stack_name = name
client_stack_name = node_stack_name + CLIENT_STACK_SUFFIX
print("Init local keys for client-stack")
initLocalKeys(client_stack_name, keys_dir, sigseed, use_bls=False, override=override)
print("Init local keys for node-stack")
keys = initLocalKeys(node_stack_name, keys_dir, sigseed, use_bls=use_bls, override=override)
return keys
def areKeysSetup(name, keys_dir):
return nodeStackClass.areKeysSetup(name, keys_dir)
def learnKeysFromOthers(keys_dir, nodeName, otherNodes):
otherNodeStacks = []
for otherNode in otherNodes:
if otherNode.name != nodeName:
otherNodeStacks.append(otherNode.nodestack)
otherNodeStacks.append(otherNode.clientstack)
nodeStackClass.learnKeysFromOthers(keys_dir, nodeName, otherNodeStacks)
def tellKeysToOthers(node, otherNodes):
otherNodeStacks = []
for otherNode in otherNodes:
if otherNode != node:
otherNodeStacks.append(otherNode.nodestack)
otherNodeStacks.append(otherNode.clientstack)
node.nodestack.tellKeysToOthers(otherNodeStacks)
node.clientstack.tellKeysToOthers(otherNodeStacks)
|
evernym/zeno
|
plenum/common/keygen_utils.py
|
Python
|
apache-2.0
| 3,012
|
"""
This dependency resolver resolves tool shed dependencies (those defined
tool_dependencies.xml) installed using Platform Homebrew and converted
via shed2tap (e.g. https://github.com/jmchilton/homebrew-toolshed).
"""
import logging
import os
from xml.etree import ElementTree as ET
from .resolver_mixins import (
UsesHomebrewMixin,
UsesToolDependencyDirMixin,
UsesInstalledRepositoriesMixin,
)
from ..resolvers import DependencyResolver, INDETERMINATE_DEPENDENCY
log = logging.getLogger(__name__)
class HomebrewToolShedDependencyResolver(
DependencyResolver,
UsesHomebrewMixin,
UsesToolDependencyDirMixin,
UsesInstalledRepositoriesMixin,
):
resolver_type = "tool_shed_tap"
def __init__(self, dependency_manager, **kwds):
self._init_homebrew(**kwds)
self._init_base_path(dependency_manager, **kwds)
def resolve(self, name, version, type, **kwds):
if type != "package":
return INDETERMINATE_DEPENDENCY
if version is None:
return INDETERMINATE_DEPENDENCY
return self._find_tool_dependencies(name, version, type, **kwds)
def _find_tool_dependencies(self, name, version, type, **kwds):
installed_tool_dependency = self._get_installed_dependency(name, type, version=version, **kwds)
if installed_tool_dependency:
return self._resolve_from_installed_tool_dependency(name, version, installed_tool_dependency)
if "tool_dir" in kwds:
tool_directory = os.path.abspath(kwds["tool_dir"])
tool_depenedencies_path = os.path.join(tool_directory, "tool_dependencies.xml")
if os.path.exists(tool_depenedencies_path):
return self._resolve_from_tool_dependencies_path(name, version, tool_depenedencies_path)
return INDETERMINATE_DEPENDENCY
def _resolve_from_installed_tool_dependency(self, name, version, installed_tool_dependency):
tool_shed_repository = installed_tool_dependency.tool_shed_repository
recipe_name = build_recipe_name(
package_name=name,
package_version=version,
repository_owner=tool_shed_repository.owner,
repository_name=tool_shed_repository.name,
)
return self._find_dep_default(recipe_name, None)
def _resolve_from_tool_dependencies_path(self, name, version, tool_dependencies_path):
try:
raw_dependencies = RawDependencies(tool_dependencies_path)
except Exception:
log.debug("Failed to parse dependencies in file %s" % tool_dependencies_path)
return INDETERMINATE_DEPENDENCY
raw_dependency = raw_dependencies.find(name, version)
if not raw_dependency:
return INDETERMINATE_DEPENDENCY
recipe_name = build_recipe_name(
package_name=name,
package_version=version,
repository_owner=raw_dependency.repository_owner,
repository_name=raw_dependency.repository_name
)
dep = self._find_dep_default(recipe_name, None)
return dep
class RawDependencies(object):
def __init__(self, dependencies_file):
self.root = ET.parse(dependencies_file).getroot()
dependencies = []
package_els = self.root.findall("package") or []
for package_el in package_els:
repository_el = package_el.find("repository")
if repository_el is None:
continue
dependency = RawDependency(self, package_el, repository_el)
dependencies.append(dependency)
self.dependencies = dependencies
def find(self, package_name, package_version):
target_dependency = None
for dependency in self.dependencies:
if dependency.package_name == package_name and dependency.package_version == package_version:
target_dependency = dependency
break
return target_dependency
class RawDependency(object):
def __init__(self, dependencies, package_el, repository_el):
self.dependencies = dependencies
self.package_el = package_el
self.repository_el = repository_el
def __repr__(self):
temp = "Dependency[package_name=%s,version=%s,dependent_package=%s]"
return temp % (
self.package_el.attrib["name"],
self.package_el.attrib["version"],
self.repository_el.attrib["name"]
)
@property
def repository_owner(self):
return self.repository_el.attrib["owner"]
@property
def repository_name(self):
return self.repository_el.attrib["name"]
@property
def package_name(self):
return self.package_el.attrib["name"]
@property
def package_version(self):
return self.package_el.attrib["version"]
def build_recipe_name(package_name, package_version, repository_owner, repository_name):
# TODO: Consider baking package_name and package_version into name? (would be more "correct")
owner = repository_owner.replace("-", "")
name = repository_name
name = name.replace("_", "").replace("-", "")
base = "%s_%s" % (owner, name)
return base
__all__ = ['HomebrewToolShedDependencyResolver']
|
ssorgatem/pulsar
|
galaxy/tools/deps/resolvers/brewed_tool_shed_packages.py
|
Python
|
apache-2.0
| 5,246
|
# Copyright (c) 2016 OpenStack Foundation
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslo_log import log as logging
from magnum.common import service
from magnum.tests import base
class TestMagnumService(base.BaseTestCase):
@mock.patch.object(logging, 'register_options')
@mock.patch.object(logging, 'setup')
@mock.patch('magnum.common.config.set_config_defaults')
@mock.patch('magnum.common.config.parse_args')
def test_prepare_service_with_argv_not_none(self, mock_parse, mock_set,
mock_setup, mock_reg):
argv = 'foo'
mock_parse.side_effect = lambda *args, **kwargs: None
service.prepare_service(argv)
mock_parse.assert_called_once_with(argv)
mock_setup.assert_called_once_with(base.CONF, 'magnum')
mock_reg.assert_called_once_with(base.CONF)
mock_set.assert_called_once_with()
@mock.patch.object(logging, 'register_options')
@mock.patch.object(logging, 'setup')
@mock.patch('magnum.common.config.set_config_defaults')
@mock.patch('magnum.common.config.parse_args')
def test_prepare_service_with_argv_none(self, mock_parse, mock_set,
mock_setup, mock_reg):
argv = None
mock_parse.side_effect = lambda *args, **kwargs: None
service.prepare_service(argv)
mock_parse.assert_called_once_with([])
mock_setup.assert_called_once_with(base.CONF, 'magnum')
mock_reg.assert_called_once_with(base.CONF)
mock_set.assert_called_once_with()
|
ArchiFleKs/magnum
|
magnum/tests/unit/common/test_service.py
|
Python
|
apache-2.0
| 2,144
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
class DesignateDomain(resource.Resource):
"""Heat Template Resource for Designate Domain.
Designate provides DNS-as-a-Service services for OpenStack. So, domain
is a realm with an identification string, unique in DNS.
"""
support_status = support.SupportStatus(
version='5.0.0')
entity = 'domains'
PROPERTIES = (
NAME, TTL, DESCRIPTION, EMAIL
) = (
'name', 'ttl', 'description', 'email'
)
ATTRIBUTES = (
SERIAL,
) = (
'serial',
)
properties_schema = {
# Based on RFC 1035, length of name is set to max of 255
NAME: properties.Schema(
properties.Schema.STRING,
_('Domain name.'),
required=True,
constraints=[constraints.Length(max=255)]
),
# Based on RFC 1035, range for ttl is set to 1 to signed 32 bit number
TTL: properties.Schema(
properties.Schema.INTEGER,
_('Time To Live (Seconds).'),
update_allowed=True,
constraints=[constraints.Range(min=1,
max=2147483647)]
),
# designate mandates to the max length of 160 for description
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of domain.'),
update_allowed=True,
constraints=[constraints.Length(max=160)]
),
EMAIL: properties.Schema(
properties.Schema.STRING,
_('Domain email.'),
update_allowed=True,
required=True
)
}
attributes_schema = {
SERIAL: attributes.Schema(
_("DNS domain serial."),
type=attributes.Schema.STRING
),
}
default_client_name = 'designate'
entity = 'domains'
def handle_create(self):
args = dict((k, v) for k, v in six.iteritems(self.properties) if v)
domain = self.client_plugin().domain_create(**args)
self.resource_id_set(domain.id)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
args = dict()
if prop_diff.get(self.EMAIL):
args['email'] = prop_diff.get(self.EMAIL)
if prop_diff.get(self.TTL):
args['ttl'] = prop_diff.get(self.TTL)
if prop_diff.get(self.DESCRIPTION):
args['description'] = prop_diff.get(self.DESCRIPTION)
if len(args.keys()) > 0:
args['id'] = self.resource_id
self.client_plugin().domain_update(**args)
def _resolve_attribute(self, name):
if name == self.SERIAL:
domain = self.client().domains.get(self.resource_id)
return domain.serial
# FIXME(kanagaraj-manickam) Remove this method once designate defect
# 1485552 is fixed.
def _show_resource(self):
return dict(self.client().domains.get(self.resource_id).items())
def parse_live_resource_data(self, resource_properties, resource_data):
domain_reality = {}
for key in self.PROPERTIES:
domain_reality.update({key: resource_data.get(key)})
return domain_reality
def resource_mapping():
return {
'OS::Designate::Domain': DesignateDomain
}
|
cwolferh/heat-scratch
|
heat/engine/resources/openstack/designate/domain.py
|
Python
|
apache-2.0
| 4,053
|
# ===============================================================================
# Copyright 2016 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from __future__ import print_function
from traits.api import HasTraits
# ============= standard library imports ========================
import os
# ============= local library imports ==========================
from pychron.dvc.dvc_database import DVCDatabase
from pychron.dvc.dvc_orm import (
ProjectTbl,
AnalysisTbl,
SampleTbl,
IrradiationPositionTbl,
)
from six.moves import map
TABLES = {"project": ProjectTbl, "sample": SampleTbl}
class CustomAnalysisQuery(HasTraits):
def execute_query(self, filters):
q = self.session.query(AnalysisTbl)
q = q.join(IrradiationPositionTbl)
q = q.join(SampleTbl)
q = q.join(ProjectTbl)
for fi in filters:
q = q.filter(fi)
results = self.db._query_all(q)
print(len(results))
def load_query(self):
pass
def generate_query(self, txt):
filters = []
for line in txt.split("\n"):
tbl, val = list(map(str.strip, line.split(":")))
if "." in tbl:
tbl, attr = tbl.split(".")
else:
tbl = tbl
attr = "name"
tbl = TABLES.get(tbl)
if tbl:
attr = getattr(tbl, attr)
if "," in val:
f = attr.in_(val.split("."))
else:
f = attr == val
filters.append(f)
else:
print("invalid table {}".format(tbl))
return filters
if __name__ == "__main__":
db = DVCDatabase(
host="localhost",
username=os.environ.get("LOCALHOST_DB_USER"),
password=os.environ.get("LOCALHOST_DB_PWD"),
kind="mysql",
# echo=True,
name="pychrondvc_dev",
)
txt = """project.name: Irradiation-NM-274
sample: FC-2"""
# txt = '''sample: FC-2'''
db.connect()
c = CustomAnalysisQuery(db=db)
q = c.generate_query(txt)
c.execute_query(q)
# ============= EOF =============================================
|
USGSDenverPychron/pychron
|
pychron/pipeline/custom_query.py
|
Python
|
apache-2.0
| 2,881
|
#
# Copyright 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer.dispatcher.resources import base
class Identity(base.ResourceBase):
@staticmethod
def get_resource_extra_attributes(sample):
return {}
@staticmethod
def get_metrics_names():
return ['identity.authenticate.success',
'identity.authenticate.pending',
'identity.authenticate.failure',
'identity.user.created',
'identity.user.deleted',
'identity.user.updated',
'identity.group.created',
'identity.group.deleted',
'identity.group.updated',
'identity.role.created',
'identity.role.deleted',
'identity.role.updated',
'identity.project.created',
'identity.project.deleted',
'identity.project.updated',
'identity.trust.created',
'identity.trust.deleted',
'identity.role_assignment.created',
'identity.role_assignment.deleted',
]
|
pkilambi/ceilometer
|
ceilometer/dispatcher/resources/identity.py
|
Python
|
apache-2.0
| 1,654
|
"""Clean up some schema artifacts
Revision ID: 4a72628695ff
Revises: 420f0f384465
Create Date: 2014-04-29 23:51:44.045058
"""
# revision identifiers, used by Alembic.
revision = '4a72628695ff'
down_revision = '420f0f384465'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
op.drop_column('object_events', 'permissions_json')
op.drop_column('object_files', 'permissions_json')
op.drop_column('object_folders', 'permissions_json')
def downgrade():
op.add_column('object_folders', sa.Column('permissions_json', mysql.TEXT(), nullable=False))
op.add_column('object_files', sa.Column('permissions_json', mysql.TEXT(), nullable=False))
op.add_column('object_events', sa.Column('permissions_json', mysql.TEXT(), nullable=False))
|
vladan-m/ggrc-core
|
src/ggrc_gdrive_integration/migrations/versions/20140429235144_4a72628695ff_clean_up_some_schema.py
|
Python
|
apache-2.0
| 806
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module provides an interface between the previous Pod
API and outputs a kubernetes.client.models.V1Pod.
The advantage being that the full Kubernetes API
is supported and no serialization need be written.
"""
import copy
import datetime
import hashlib
import os
import re
import uuid
import warnings
from functools import reduce
from typing import List, Optional, Union
from dateutil import parser
from kubernetes.client import models as k8s
from kubernetes.client.api_client import ApiClient
from airflow.exceptions import AirflowConfigException
from airflow.kubernetes.pod_generator_deprecated import PodDefaults, PodGenerator as PodGeneratorDeprecated
from airflow.utils import yaml
from airflow.version import version as airflow_version
MAX_LABEL_LEN = 63
def make_safe_label_value(string):
"""
Valid label values must be 63 characters or less and must be empty or begin and
end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_),
dots (.), and alphanumerics between.
If the label value is greater than 63 chars once made safe, or differs in any
way from the original value sent to this function, then we need to truncate to
53 chars, and append it with a unique hash.
"""
safe_label = re.sub(r"^[^a-z0-9A-Z]*|[^a-zA-Z0-9_\-\.]|[^a-z0-9A-Z]*$", "", string)
if len(safe_label) > MAX_LABEL_LEN or string != safe_label:
safe_hash = hashlib.md5(string.encode()).hexdigest()[:9]
safe_label = safe_label[: MAX_LABEL_LEN - len(safe_hash) - 1] + "-" + safe_hash
return safe_label
def datetime_to_label_safe_datestring(datetime_obj: datetime.datetime) -> str:
"""
Kubernetes doesn't like ":" in labels, since ISO datetime format uses ":" but
not "_" let's
replace ":" with "_"
:param datetime_obj: datetime.datetime object
:return: ISO-like string representing the datetime
"""
return datetime_obj.isoformat().replace(":", "_").replace('+', '_plus_')
def label_safe_datestring_to_datetime(string: str) -> datetime.datetime:
"""
Kubernetes doesn't permit ":" in labels. ISO datetime format uses ":" but not
"_", let's
replace ":" with "_"
:param string: str
:return: datetime.datetime object
"""
return parser.parse(string.replace('_plus_', '+').replace("_", ":"))
class PodGenerator:
"""
Contains Kubernetes Airflow Worker configuration logic
Represents a kubernetes pod and manages execution of a single pod.
Any configuration that is container specific gets applied to
the first container in the list of containers.
:param pod: The fully specified pod. Mutually exclusive with `path_or_string`
:param pod_template_file: Path to YAML file. Mutually exclusive with `pod`
:param extract_xcom: Whether to bring up a container for xcom
"""
def __init__(
self,
pod: Optional[k8s.V1Pod] = None,
pod_template_file: Optional[str] = None,
extract_xcom: bool = True,
):
if not pod_template_file and not pod:
raise AirflowConfigException(
"Podgenerator requires either a `pod` or a `pod_template_file` argument"
)
if pod_template_file and pod:
raise AirflowConfigException("Cannot pass both `pod` and `pod_template_file` arguments")
if pod_template_file:
self.ud_pod = self.deserialize_model_file(pod_template_file)
else:
self.ud_pod = pod
# Attach sidecar
self.extract_xcom = extract_xcom
def gen_pod(self) -> k8s.V1Pod:
"""Generates pod"""
result = self.ud_pod
result.metadata.name = self.make_unique_pod_id(result.metadata.name)
if self.extract_xcom:
result = self.add_xcom_sidecar(result)
return result
@staticmethod
def add_xcom_sidecar(pod: k8s.V1Pod) -> k8s.V1Pod:
"""Adds sidecar"""
warnings.warn(
"This function is deprecated. "
"Please use airflow.providers.cncf.kubernetes.utils.xcom_sidecar.add_xcom_sidecar instead"
)
pod_cp = copy.deepcopy(pod)
pod_cp.spec.volumes = pod.spec.volumes or []
pod_cp.spec.volumes.insert(0, PodDefaults.VOLUME)
pod_cp.spec.containers[0].volume_mounts = pod_cp.spec.containers[0].volume_mounts or []
pod_cp.spec.containers[0].volume_mounts.insert(0, PodDefaults.VOLUME_MOUNT)
pod_cp.spec.containers.append(PodDefaults.SIDECAR_CONTAINER)
return pod_cp
@staticmethod
def from_obj(obj) -> Optional[Union[dict, k8s.V1Pod]]:
"""Converts to pod from obj"""
if obj is None:
return None
k8s_legacy_object = obj.get("KubernetesExecutor", None)
k8s_object = obj.get("pod_override", None)
if k8s_legacy_object and k8s_object:
raise AirflowConfigException(
"Can not have both a legacy and new"
"executor_config object. Please delete the KubernetesExecutor"
"dict and only use the pod_override kubernetes.client.models.V1Pod"
"object."
)
if not k8s_object and not k8s_legacy_object:
return None
if isinstance(k8s_object, k8s.V1Pod):
return k8s_object
elif isinstance(k8s_legacy_object, dict):
warnings.warn(
'Using a dictionary for the executor_config is deprecated and will soon be removed.'
'please use a `kubernetes.client.models.V1Pod` class with a "pod_override" key'
' instead. ',
category=DeprecationWarning,
)
return PodGenerator.from_legacy_obj(obj)
else:
raise TypeError(
'Cannot convert a non-kubernetes.client.models.V1Pod object into a KubernetesExecutorConfig'
)
@staticmethod
def from_legacy_obj(obj) -> Optional[k8s.V1Pod]:
"""Converts to pod from obj"""
if obj is None:
return None
# We do not want to extract constant here from ExecutorLoader because it is just
# A name in dictionary rather than executor selection mechanism and it causes cyclic import
namespaced = obj.get("KubernetesExecutor", {})
if not namespaced:
return None
resources = namespaced.get('resources')
if resources is None:
requests = {
'cpu': namespaced.pop('request_cpu', None),
'memory': namespaced.pop('request_memory', None),
'ephemeral-storage': namespaced.get('ephemeral-storage'), # We pop this one in limits
}
limits = {
'cpu': namespaced.pop('limit_cpu', None),
'memory': namespaced.pop('limit_memory', None),
'ephemeral-storage': namespaced.pop('ephemeral-storage', None),
}
all_resources = list(requests.values()) + list(limits.values())
if all(r is None for r in all_resources):
resources = None
else:
# remove None's so they don't become 0's
requests = {k: v for k, v in requests.items() if v is not None}
limits = {k: v for k, v in limits.items() if v is not None}
resources = k8s.V1ResourceRequirements(requests=requests, limits=limits)
namespaced['resources'] = resources
return PodGeneratorDeprecated(**namespaced).gen_pod()
@staticmethod
def reconcile_pods(base_pod: k8s.V1Pod, client_pod: Optional[k8s.V1Pod]) -> k8s.V1Pod:
"""
:param base_pod: has the base attributes which are overwritten if they exist
in the client pod and remain if they do not exist in the client_pod
:param client_pod: the pod that the client wants to create.
:return: the merged pods
This can't be done recursively as certain fields some overwritten, and some concatenated.
"""
if client_pod is None:
return base_pod
client_pod_cp = copy.deepcopy(client_pod)
client_pod_cp.spec = PodGenerator.reconcile_specs(base_pod.spec, client_pod_cp.spec)
client_pod_cp.metadata = PodGenerator.reconcile_metadata(base_pod.metadata, client_pod_cp.metadata)
client_pod_cp = merge_objects(base_pod, client_pod_cp)
return client_pod_cp
@staticmethod
def reconcile_metadata(base_meta, client_meta):
"""
Merge kubernetes Metadata objects
:param base_meta: has the base attributes which are overwritten if they exist
in the client_meta and remain if they do not exist in the client_meta
:param client_meta: the spec that the client wants to create.
:return: the merged specs
"""
if base_meta and not client_meta:
return base_meta
if not base_meta and client_meta:
return client_meta
elif client_meta and base_meta:
client_meta.labels = merge_objects(base_meta.labels, client_meta.labels)
client_meta.annotations = merge_objects(base_meta.annotations, client_meta.annotations)
extend_object_field(base_meta, client_meta, 'managed_fields')
extend_object_field(base_meta, client_meta, 'finalizers')
extend_object_field(base_meta, client_meta, 'owner_references')
return merge_objects(base_meta, client_meta)
return None
@staticmethod
def reconcile_specs(
base_spec: Optional[k8s.V1PodSpec], client_spec: Optional[k8s.V1PodSpec]
) -> Optional[k8s.V1PodSpec]:
"""
:param base_spec: has the base attributes which are overwritten if they exist
in the client_spec and remain if they do not exist in the client_spec
:param client_spec: the spec that the client wants to create.
:return: the merged specs
"""
if base_spec and not client_spec:
return base_spec
if not base_spec and client_spec:
return client_spec
elif client_spec and base_spec:
client_spec.containers = PodGenerator.reconcile_containers(
base_spec.containers, client_spec.containers
)
merged_spec = extend_object_field(base_spec, client_spec, 'init_containers')
merged_spec = extend_object_field(base_spec, merged_spec, 'volumes')
return merge_objects(base_spec, merged_spec)
return None
@staticmethod
def reconcile_containers(
base_containers: List[k8s.V1Container], client_containers: List[k8s.V1Container]
) -> List[k8s.V1Container]:
"""
:param base_containers: has the base attributes which are overwritten if they exist
in the client_containers and remain if they do not exist in the client_containers
:param client_containers: the containers that the client wants to create.
:return: the merged containers
The runs recursively over the list of containers.
"""
if not base_containers:
return client_containers
if not client_containers:
return base_containers
client_container = client_containers[0]
base_container = base_containers[0]
client_container = extend_object_field(base_container, client_container, 'volume_mounts')
client_container = extend_object_field(base_container, client_container, 'env')
client_container = extend_object_field(base_container, client_container, 'env_from')
client_container = extend_object_field(base_container, client_container, 'ports')
client_container = extend_object_field(base_container, client_container, 'volume_devices')
client_container = merge_objects(base_container, client_container)
return [client_container] + PodGenerator.reconcile_containers(
base_containers[1:], client_containers[1:]
)
@staticmethod
def construct_pod(
dag_id: str,
task_id: str,
pod_id: str,
try_number: int,
kube_image: str,
date: Optional[datetime.datetime],
args: List[str],
pod_override_object: Optional[k8s.V1Pod],
base_worker_pod: k8s.V1Pod,
namespace: str,
scheduler_job_id: str,
run_id: Optional[str] = None,
) -> k8s.V1Pod:
"""
Construct a pod by gathering and consolidating the configuration from 3 places:
- airflow.cfg
- executor_config
- dynamic arguments
"""
try:
image = pod_override_object.spec.containers[0].image # type: ignore
if not image:
image = kube_image
except Exception:
image = kube_image
annotations = {
'dag_id': dag_id,
'task_id': task_id,
'try_number': str(try_number),
}
labels = {
'airflow-worker': make_safe_label_value(scheduler_job_id),
'dag_id': make_safe_label_value(dag_id),
'task_id': make_safe_label_value(task_id),
'try_number': str(try_number),
'airflow_version': airflow_version.replace('+', '-'),
'kubernetes_executor': 'True',
}
if date:
annotations['execution_date'] = date.isoformat()
labels['execution_date'] = datetime_to_label_safe_datestring(date)
if run_id:
annotations['run_id'] = run_id
labels['run_id'] = make_safe_label_value(run_id)
dynamic_pod = k8s.V1Pod(
metadata=k8s.V1ObjectMeta(
namespace=namespace,
annotations=annotations,
name=PodGenerator.make_unique_pod_id(pod_id),
labels=labels,
),
spec=k8s.V1PodSpec(
containers=[
k8s.V1Container(
name="base",
args=args,
image=image,
env=[k8s.V1EnvVar(name="AIRFLOW_IS_K8S_EXECUTOR_POD", value="True")],
)
]
),
)
# Reconcile the pods starting with the first chronologically,
# Pod from the pod_template_File -> Pod from executor_config arg -> Pod from the K8s executor
pod_list = [base_worker_pod, pod_override_object, dynamic_pod]
return reduce(PodGenerator.reconcile_pods, pod_list)
@staticmethod
def serialize_pod(pod: k8s.V1Pod) -> dict:
"""
Converts a k8s.V1Pod into a jsonified object
:param pod: k8s.V1Pod object
:return: Serialized version of the pod returned as dict
"""
api_client = ApiClient()
return api_client.sanitize_for_serialization(pod)
@staticmethod
def deserialize_model_file(path: str) -> k8s.V1Pod:
"""
:param path: Path to the file
:return: a kubernetes.client.models.V1Pod
Unfortunately we need access to the private method
``_ApiClient__deserialize_model`` from the kubernetes client.
This issue is tracked here; https://github.com/kubernetes-client/python/issues/977.
"""
if os.path.exists(path):
with open(path) as stream:
pod = yaml.safe_load(stream)
else:
pod = yaml.safe_load(path)
return PodGenerator.deserialize_model_dict(pod)
@staticmethod
def deserialize_model_dict(pod_dict: dict) -> k8s.V1Pod:
"""
Deserializes python dictionary to k8s.V1Pod
:param pod_dict: Serialized dict of k8s.V1Pod object
:return: De-serialized k8s.V1Pod
"""
api_client = ApiClient()
return api_client._ApiClient__deserialize_model(pod_dict, k8s.V1Pod)
@staticmethod
def make_unique_pod_id(pod_id: str) -> Optional[str]:
r"""
Kubernetes pod names must consist of one or more lowercase
rfc1035/rfc1123 labels separated by '.' with a maximum length of 253
characters. Each label has a maximum length of 63 characters.
Name must pass the following regex for validation
``^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$``
For more details, see:
https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/design/identifiers.md
:param pod_id: a dag_id with only alphanumeric characters
:return: ``str`` valid Pod name of appropriate length
"""
if not pod_id:
return None
safe_uuid = uuid.uuid4().hex # safe uuid will always be less than 63 chars
# Get prefix length after subtracting the uuid length. Clean up '.' and '-' from
# end of podID ('.' can't be followed by '-').
label_prefix_length = MAX_LABEL_LEN - len(safe_uuid) - 1 # -1 for separator
trimmed_pod_id = pod_id[:label_prefix_length].rstrip('-.')
# previously used a '.' as the separator, but this could create errors in some situations
return f"{trimmed_pod_id}-{safe_uuid}"
def merge_objects(base_obj, client_obj):
"""
:param base_obj: has the base attributes which are overwritten if they exist
in the client_obj and remain if they do not exist in the client_obj
:param client_obj: the object that the client wants to create.
:return: the merged objects
"""
if not base_obj:
return client_obj
if not client_obj:
return base_obj
client_obj_cp = copy.deepcopy(client_obj)
if isinstance(base_obj, dict) and isinstance(client_obj_cp, dict):
base_obj_cp = copy.deepcopy(base_obj)
base_obj_cp.update(client_obj_cp)
return base_obj_cp
for base_key in base_obj.to_dict().keys():
base_val = getattr(base_obj, base_key, None)
if not getattr(client_obj, base_key, None) and base_val:
if not isinstance(client_obj_cp, dict):
setattr(client_obj_cp, base_key, base_val)
else:
client_obj_cp[base_key] = base_val
return client_obj_cp
def extend_object_field(base_obj, client_obj, field_name):
"""
:param base_obj: an object which has a property `field_name` that is a list
:param client_obj: an object which has a property `field_name` that is a list.
A copy of this object is returned with `field_name` modified
:param field_name: the name of the list field
:return: the client_obj with the property `field_name` being the two properties appended
"""
client_obj_cp = copy.deepcopy(client_obj)
base_obj_field = getattr(base_obj, field_name, None)
client_obj_field = getattr(client_obj, field_name, None)
if (not isinstance(base_obj_field, list) and base_obj_field is not None) or (
not isinstance(client_obj_field, list) and client_obj_field is not None
):
raise ValueError("The chosen field must be a list.")
if not base_obj_field:
return client_obj_cp
if not client_obj_field:
setattr(client_obj_cp, field_name, base_obj_field)
return client_obj_cp
appended_fields = base_obj_field + client_obj_field
setattr(client_obj_cp, field_name, appended_fields)
return client_obj_cp
|
Acehaidrey/incubator-airflow
|
airflow/kubernetes/pod_generator.py
|
Python
|
apache-2.0
| 20,138
|
# -*- coding: utf-8 -*-
#
# Apache Open Climate Workbench documentation build configuration file, created by
# sphinx-quickstart on Fri Oct 25 07:58:45 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
#
# Assuming a fresh checkout of trunk, this will point us to the OCW package
sys.path.insert(0, os.path.abspath('../../ocw'))
sys.path.insert(0, os.path.abspath('../../ocw/data_source'))
sys.path.insert(0, os.path.abspath('../../ocw-ui/backend'))
sys.path.insert(0, os.path.abspath('../../ocw-config-runner'))
# -- General configuration -----------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.ifconfig',
'sphinxcontrib.autohttp.bottle',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Apache Open Climate Workbench'
copyright = u'2016, Apache Software Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.1.0'
# The full version, including alpha/beta/rc tags.
release = '1.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'ocw-logo-variant-sm-01-01-new.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ApacheOpenClimateWorkbenchdoc'
# -- Options for LaTeX output --------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ApacheOpenClimateWorkbench.tex', u'Apache Open Climate Workbench Documentation',
u'Michael Joyce', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'apacheopenclimateworkbench', u'Apache Open Climate Workbench Documentation',
[u'Michael Joyce'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ApacheOpenClimateWorkbench', u'Apache Open Climate Workbench Documentation',
u'Michael Joyce', 'ApacheOpenClimateWorkbench', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('http://docs.python.org/2', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
'matplotlib': ('http://matplotlib.sourceforge.net/', None)}
# Autodoc config
#
# Select which content is inserted into the main body of an autoclass directive
# "class" - Only the class' docstring
# "both" - The class' and __init__ method's docstring
# "init" - Only __init__'s docstring
autoclass_content = "both"
|
huikyole/climate
|
docs/source/conf.py
|
Python
|
apache-2.0
| 9,272
|
# Copyright 2018 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates autoscaled, network LB IGM running specified docker image."""
def GenerateConfig(context):
"""Generate YAML resource configuration."""
resources = [{
'name': context.env['name'],
'type': 'compute.v1.instanceTemplate',
'properties': {
'properties': {
'machineType': context.properties['machineType'],
'disks': [{
'deviceName': 'boot',
'boot': True,
'type': 'PERSISTENT',
'autoDelete': True,
'mode': 'READ_WRITE',
'initializeParams': {
'sourceImage': context.properties['image']
}
}],
'networkInterfaces': [{
'accessConfigs': [{
'name': 'external-nat',
'type': 'ONE_TO_ONE_NAT'
}],
'network': 'global/networks/default',
}],
'metadata': {
'items': [{
'key': 'startup-script',
'value': ''.join(['#!/bin/bash\n',
'python -m SimpleHTTPServer 8080'])
}]
}
}
}
}]
return {'resources': resources}
|
aljim/deploymentmanager-samples
|
examples/v2/igm-updater/python/instance-template.py
|
Python
|
apache-2.0
| 1,916
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DJANGO_APPS = ['filebrowser']
NICE_NAME = "File Browser"
REQUIRES_HADOOP = False
ICON = "filebrowser/art/icon_filebrowser_48.png"
MENU_INDEX = 20
IS_URL_NAMESPACED = True
from aws.conf import PERMISSION_ACTION_S3
from azure.conf import PERMISSION_ACTION_ADLS, PERMISSION_ACTION_ABFS
from desktop.conf import PERMISSION_ACTION_GS
PERMISSION_ACTIONS = (
(PERMISSION_ACTION_S3, "Access to S3 from filebrowser and filepicker."),
(PERMISSION_ACTION_ADLS, "Access to ADLS from filebrowser and filepicker."),
(PERMISSION_ACTION_ABFS, "Access to ABFS from filebrowser and filepicker."),
(PERMISSION_ACTION_GS, "Access to GS from filebrowser and filepicker.")
)
|
kawamon/hue
|
apps/filebrowser/src/filebrowser/settings.py
|
Python
|
apache-2.0
| 1,456
|
import sys
from setuptools import setup, find_packages
with open("README.rst") as fp:
long_description = fp.read()
install_requires = [
"requests>=2.12",
"PyYAML",
"six>=1.10.0",
"tzlocal",
]
if sys.version_info < (3,):
install_requires.extend([
"ipaddress",
])
setup(
name="pykube",
version="0.16a1",
description="Python client library for Kubernetes",
long_description=long_description,
author="Eldarion, Inc.",
author_email="development@eldarion.com",
license="Apache",
url="https://github.com/kelproject/pykube",
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
],
zip_safe=False,
packages=find_packages(),
entry_points={
"httpie.plugins.transport.v1": [
"httpie_pykube = pykube.contrib.httpie_plugin:PyKubeTransportPlugin"
],
},
install_requires=install_requires,
extras_require={
"gcp": [
"google-auth",
"jsonpath-ng",
]
},
)
|
eldarion-gondor/pykube
|
setup.py
|
Python
|
apache-2.0
| 1,253
|
import base
from client.rest import ApiException
class Chart(base.Base, object):
def __init__(self):
super(Chart,self).__init__(api_type = "chart")
def upload_chart(self, repository, chart, prov = None, expect_status_code = 201, **kwargs):
client = self._get_client(**kwargs)
try:
_, status_code, _ = client.chartrepo_repo_charts_post_with_http_info(repository, chart)
except ApiException as e:
base._assert_status_code(expect_status_code, e.status)
else:
base._assert_status_code(expect_status_code, status_code)
base._assert_status_code(201, status_code)
def get_charts(self, repository, expect_status_code = 200, **kwargs):
client = self._get_client(**kwargs)
try:
body, status_code, _ = client.chartrepo_repo_charts_get_with_http_info(repository)
except ApiException as e:
base._assert_status_code(expect_status_code, e.status)
return []
else:
base._assert_status_code(expect_status_code, status_code)
base._assert_status_code(200, status_code)
return body
def chart_should_exist(self, repository, chart_name, expect_status_code = 200, **kwargs):
charts_data = self.get_charts(repository, expect_status_code = expect_status_code, **kwargs)
for chart in charts_data:
if chart.name == chart_name:
return True
if expect_status_code == 200:
raise Exception(r"Chart {} does not exist in project {}.".format(chart_name, repository))
def delete_chart_with_version(self, repository, chart_name, version, expect_status_code = 200, **kwargs):
client = self._get_client(**kwargs)
try:
_, status_code, _ = client.chartrepo_repo_charts_name_version_delete_with_http_info(repository, chart_name, version)
except ApiException as e:
base._assert_status_code(expect_status_code, e.status)
else:
base._assert_status_code(expect_status_code, status_code)
base._assert_status_code(200, status_code)
|
wy65701436/harbor
|
tests/apitests/python/library/chart.py
|
Python
|
apache-2.0
| 2,143
|